code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package hashsets
// New factory that creates a hash set
func New(values ...interface{}) *HashSet {
set := HashSet{data: make(map[interface{}]struct{}, len(values))}
set.Add(values...)
return &set
}
// HashSet datastructure
type HashSet struct {
data map[interface{}]struct{}
}
// Add adds values to the set
func (s *HashSet) Add(values ...interface{}) {
for _, value := range values {
s.data[value] = struct{}{}
}
}
// Remove removes values from the set
func (s *HashSet) Remove(values ...interface{}) {
for _, value := range values {
delete(s.data, value)
}
}
// Contains checks if the value is in the set
func (s *HashSet) Contains(value interface{}) bool {
_, exists := s.data[value]
return exists
}
// ContainsAll checks if all values are in the set
func (s *HashSet) ContainsAll(values ...interface{}) bool {
for _, value := range values {
if !s.Contains(value) {
return false
}
}
return true
}
// ContainsAny checks if any of the values are in the set
func (s *HashSet) ContainsAny(values ...interface{}) bool {
for _, value := range values {
if s.Contains(value) {
return true
}
}
return false
}
// Merge the two sets
func (s *HashSet) Merge(sets ...*HashSet) {
for _, set := range sets {
for _, value := range set.GetValues() {
s.Add(value)
}
}
}
// Clear clears set
func (s *HashSet) Clear() {
s.data = make(map[interface{}]struct{})
}
// GetValues returns values
func (s *HashSet) GetValues() []interface{} {
values := make([]interface{}, 0, s.Size())
for key := range s.data {
values = append(values, key)
}
return values
}
// IsEmpty checks if the set is empty
func (s *HashSet) IsEmpty() bool {
return s.Size() == 0
}
// Size returns size of the set
func (s *HashSet) Size() int {
return len(s.data)
}
// Common set functions
// Copy makes an identical copy of the set
func (s *HashSet) Copy() *HashSet {
return New(s.GetValues()...)
}
// Union makes a set that has all of the elements in either of two sets
func (s *HashSet) Union(ss *HashSet) *HashSet {
new := s.Copy()
new.Merge(ss)
return new
}
// Intersection makes a set that has only the elements common to both of two sets
func (s *HashSet) Intersection(ss *HashSet) *HashSet {
new := s.Copy()
for _, v := range new.GetValues() {
if !ss.Contains(v) {
new.Remove(v)
}
}
return new
}
// SymmetricDifference makes a set that has elements that are in one of two sets, but not both
func (s *HashSet) SymmetricDifference(ss *HashSet) *HashSet {
new := &HashSet{make(map[interface{}]struct{}, s.Size())}
for _, v := range s.GetValues() {
if !ss.Contains(v) {
new.Add(v)
}
}
for _, v := range ss.GetValues() {
if !s.Contains(v) {
new.Add(v)
}
}
return new
}
// Subtraction makes a set with the elements that are in the first set, but not the second
func (s *HashSet) Subtraction(ss *HashSet) *HashSet {
new := s.Copy()
for _, v := range ss.GetValues() {
new.Remove(v)
}
return new
} | datastructures/sets/hashsets/hash_set.go | 0.815783 | 0.471041 | hash_set.go | starcoder |
// Package eval provides an expression evaluator.
package eval
import (
"fmt"
"math"
)
// Env is the list of variables (name/value)
type Env map[Var]float64
// Eval returns the value of the variable
func (v Var) Eval(env Env) float64 {
return env[v]
}
// Eval returns the value of the literal
func (l literal) Eval(_ Env) float64 {
return float64(l)
}
// Eval returns the value of the unitary expression
func (u unary) Eval(env Env) float64 {
switch u.op {
case '+':
return +u.x.Eval(env)
case '-':
return -u.x.Eval(env)
}
panic(fmt.Sprintf("unsupported unary operator: %q", u.op))
}
// Eval returns the value of the binary expression
func (b binary) Eval(env Env) float64 {
switch b.op {
case '+':
return b.x.Eval(env) + b.y.Eval(env)
case '-':
return b.x.Eval(env) - b.y.Eval(env)
case '*':
return b.x.Eval(env) * b.y.Eval(env)
case '/':
return b.x.Eval(env) / b.y.Eval(env)
}
panic(fmt.Sprintf("unsupported binary operator: %q", b.op))
}
// Eval returns the value of the function call
func (c call) Eval(env Env) float64 {
switch c.fn {
case "pow":
return math.Pow(c.args[0].Eval(env), c.args[1].Eval(env))
case "sin":
return math.Sin(c.args[0].Eval(env))
case "sqrt":
return math.Sqrt(c.args[0].Eval(env))
}
panic(fmt.Sprintf("unsupported function call: %s", c.fn))
}
// Eval returns the value of the set operation
func (s set) Eval(env Env) float64 {
switch s.op {
case "min":
var min float64
if len(s.args) > 0 {
min = s.args[0].Eval(env)
for _, expr := range s.args[1:] {
v := expr.Eval(env)
if v < min {
min = v
}
}
}
return min
case "max":
var max float64
if len(s.args) > 0 {
max = s.args[0].Eval(env)
for _, expr := range s.args[1:] {
v := expr.Eval(env)
if v > max {
max = v
}
}
}
return max
case "sum":
var sum float64
for _, expr := range s.args {
sum += expr.Eval(env)
}
return sum
case "avg":
var sum float64
for _, expr := range s.args {
sum += expr.Eval(env)
}
return sum / float64(len(s.args))
case "count":
return float64(len(s.args))
}
panic(fmt.Sprintf("unsupported set operation: %s", s.op))
} | Chapter-7/Exercice-14/eval/eval.go | 0.748812 | 0.47317 | eval.go | starcoder |
package worker
import (
"fmt"
"github.com/splitio/go-split-commons/v3/dtos"
"github.com/splitio/split-synchronizer/v4/splitio/common"
)
func toImpressionsDTO(impressionsMap map[string][]dtos.ImpressionDTO) ([]dtos.ImpressionsDTO, error) {
if impressionsMap == nil {
return nil, fmt.Errorf("Impressions map cannot be null")
}
toReturn := make([]dtos.ImpressionsDTO, 0)
for feature, impressions := range impressionsMap {
toReturn = append(toReturn, dtos.ImpressionsDTO{
TestName: feature,
KeyImpressions: impressions,
})
}
return toReturn, nil
}
func wrapData(impressions []dtos.Impression, collectedData map[dtos.Metadata]map[string][]dtos.ImpressionDTO, metadata dtos.Metadata) map[dtos.Metadata]map[string][]dtos.ImpressionDTO {
for _, impression := range impressions { // To prevent errors use range instead of first element
_, instanceExists := collectedData[metadata]
if !instanceExists {
collectedData[metadata] = make(map[string][]dtos.ImpressionDTO)
}
_, featureExists := collectedData[metadata][impression.FeatureName]
if !featureExists {
collectedData[metadata][impression.FeatureName] = make([]dtos.ImpressionDTO, 0)
}
collectedData[metadata][impression.FeatureName] = append(
collectedData[metadata][impression.FeatureName],
dtos.ImpressionDTO{
BucketingKey: impression.BucketingKey,
ChangeNumber: impression.ChangeNumber,
KeyName: impression.KeyName,
Label: impression.Label,
Time: impression.Time,
Treatment: impression.Treatment,
Pt: impression.Pt,
},
)
}
return collectedData
}
func wrapDataForListener(impressions []dtos.Impression, collectedData map[dtos.Metadata]map[string][]common.ImpressionListener, metadata dtos.Metadata) map[dtos.Metadata]map[string][]common.ImpressionListener {
for _, impression := range impressions { // To prevent errors use range instead of first element
_, instanceExists := collectedData[metadata]
if !instanceExists {
collectedData[metadata] = make(map[string][]common.ImpressionListener)
}
_, featureExists := collectedData[metadata][impression.FeatureName]
if !featureExists {
collectedData[metadata][impression.FeatureName] = make([]common.ImpressionListener, 0)
}
collectedData[metadata][impression.FeatureName] = append(
collectedData[metadata][impression.FeatureName],
common.ImpressionListener{
BucketingKey: impression.BucketingKey,
ChangeNumber: impression.ChangeNumber,
KeyName: impression.KeyName,
Label: impression.Label,
Time: impression.Time,
Treatment: impression.Treatment,
Pt: impression.Pt,
},
)
}
return collectedData
}
func toListenerDTO(impressionsMap map[string][]common.ImpressionListener) ([]common.ImpressionsListener, error) {
if impressionsMap == nil {
return nil, fmt.Errorf("Impressions map cannot be null")
}
toReturn := make([]common.ImpressionsListener, 0)
for feature, impressions := range impressionsMap {
toReturn = append(toReturn, common.ImpressionsListener{
TestName: feature,
KeyImpressions: impressions,
})
}
return toReturn, nil
}
func wrapDTOListener(collectedData map[dtos.Metadata]map[string][]common.ImpressionListener) map[dtos.Metadata][]common.ImpressionsListener {
var err error
impressions := make(map[dtos.Metadata][]common.ImpressionsListener)
for metadata, impsForMetadata := range collectedData {
impressions[metadata], err = toListenerDTO(impsForMetadata)
if err != nil {
continue
}
}
return impressions
} | splitio/producer/worker/util.go | 0.625438 | 0.488893 | util.go | starcoder |
package block
// FlowerType represents a type of flower.
type FlowerType struct {
flower
}
type flower uint8
// Dandelion is a dandelion flower.
func Dandelion() FlowerType {
return FlowerType{0}
}
// Poppy is a poppy flower.
func Poppy() FlowerType {
return FlowerType{1}
}
// BlueOrchid is a blue orchid flower.
func BlueOrchid() FlowerType {
return FlowerType{2}
}
// Allium is an allium flower.
func Allium() FlowerType {
return FlowerType{3}
}
// AzureBluet is an azure bluet flower.
func AzureBluet() FlowerType {
return FlowerType{4}
}
// RedTulip is a red tulip flower.
func RedTulip() FlowerType {
return FlowerType{5}
}
// OrangeTulip is an orange tulip flower.
func OrangeTulip() FlowerType {
return FlowerType{6}
}
// WhiteTulip is a white tulip flower.
func WhiteTulip() FlowerType {
return FlowerType{7}
}
// PinkTulip is a pink tulip flower.
func PinkTulip() FlowerType {
return FlowerType{8}
}
// OxeyeDaisy is an oxeye daisy flower.
func OxeyeDaisy() FlowerType {
return FlowerType{9}
}
// Cornflower is a cornflower flower.
func Cornflower() FlowerType {
return FlowerType{10}
}
// LilyOfTheValley is a lily of the valley flower.
func LilyOfTheValley() FlowerType {
return FlowerType{11}
}
// WitherRose is a wither rose flower.
func WitherRose() FlowerType {
return FlowerType{12}
}
// Uint8 returns the flower as a uint8.
func (f flower) Uint8() uint8 {
return uint8(f)
}
// Name ...
func (f flower) Name() string {
switch f {
case 0:
return "Dandelion"
case 1:
return "Poppy"
case 2:
return "Blue Orchid"
case 3:
return "Allium"
case 4:
return "Azure Bluet"
case 5:
return "Red Tulip"
case 6:
return "Orange Tulip"
case 7:
return "White Tulip"
case 8:
return "Pink Tulip"
case 9:
return "Oxeye Daisy"
case 10:
return "Cornflower"
case 11:
return "Lily of the Valley"
case 12:
return "Wither Rose"
}
panic("unknown flower type")
}
// String ...
func (f flower) String() string {
switch f {
case 0:
return "dandelion"
case 1:
return "poppy"
case 2:
return "orchid"
case 3:
return "allium"
case 4:
return "houstonia"
case 5:
return "tulip_red"
case 6:
return "tulip_orange"
case 7:
return "tulip_white"
case 8:
return "tulip_pink"
case 9:
return "oxeye"
case 10:
return "cornflower"
case 11:
return "lily_of_the_valley"
case 12:
return "wither_rose"
}
panic("unknown flower type")
}
// FlowerTypes ...
func FlowerTypes() []FlowerType {
return []FlowerType{Dandelion(), Poppy(), BlueOrchid(), Allium(), AzureBluet(), RedTulip(), OrangeTulip(), WhiteTulip(), PinkTulip(), OxeyeDaisy(), Cornflower(), LilyOfTheValley(), WitherRose()}
} | server/block/flower_type.go | 0.794385 | 0.532851 | flower_type.go | starcoder |
package dynamicanalysis
import "fmt"
// PassContext represents information about how an object was passed to a function
type PassContext struct {
Function string // canonical name of function passed to
Keyword string // name of the argument, if it was passed with a keyword, or "*" or "**"
Position int // Position in the argument list (including other keyword args)
}
// Usage represents an object that had zero or more attributes accessed on it while being traced
type Usage struct {
Object *FirstObservation // information about the object from its first observation
Type string // canonical name of the object's type
ReturnedFrom string // canonical name of function that this object was returned from
PassedTo []PassContext // functions that this object was passed to
Attributes []string // attributes that were accessed, in order of access
}
// UsagesFromEvents converts an event stream to a set of usages for each observed object
func UsagesFromEvents(events []Event) ([]*Usage, error) {
// First construct the map from ID to observation
usages := make(map[int64]*Usage)
for _, event := range events {
if event, ok := event.(*FirstObservation); ok {
if _, seen := usages[event.ID]; seen {
return nil, fmt.Errorf("duplicate observations for ID %d", event.ID)
}
usages[event.ID] = &Usage{Object: event}
}
}
// Fill in type info
for _, usage := range usages {
usage.Type = usages[usage.Object.TypeID].Object.CanonicalName
}
// Now process the rest of the events
for _, event := range events {
switch event := event.(type) {
case *Call:
fun := usages[event.FunctionID].Object.CanonicalName
usages[event.ResultID].ReturnedFrom = fun
for i, arg := range event.Arguments {
usages[arg.ValueID].PassedTo = append(usages[arg.ValueID].PassedTo, PassContext{
Position: i,
Function: fun,
Keyword: arg.Name,
})
}
if event.VarargID != 0 {
usages[event.VarargID].PassedTo = append(usages[event.VarargID].PassedTo, PassContext{
Function: fun,
Keyword: "*",
})
}
if event.KwargID != 0 {
usages[event.KwargID].PassedTo = append(usages[event.KwargID].PassedTo, PassContext{
Function: fun,
Keyword: "**",
})
}
case *AttributeLookup:
usages[event.ObjectID].Attributes = append(usages[event.ObjectID].Attributes, event.Attribute)
}
}
// Now flatten the map into a list
var out []*Usage
for _, usage := range usages {
out = append(out, usage)
}
return out, nil
} | kite-go/dynamicanalysis/usage.go | 0.736874 | 0.415492 | usage.go | starcoder |
package image
import (
"context"
"fmt"
"github.com/google/gapid/core/data/protoutil"
"github.com/google/gapid/gapis/database"
)
// Converter is used to convert the the image formed from the parameters data,
// width and height into another format. If the conversion succeeds then the
// converted image data is returned, otherwise an error is returned.
type Converter func(data []byte, width int, height int) ([]byte, error)
type srcDstFmt struct{ src, dst interface{} }
var registeredConverters = make(map[srcDstFmt]Converter)
// RegisterConverter registers the Converter for converting from src to dst
// formats. If a converter already exists for converting from src to dst, then
// this function panics.
func RegisterConverter(src, dst *Format, c Converter) {
key := srcDstFmt{src.Key(), dst.Key()}
if _, found := registeredConverters[key]; found {
panic(fmt.Errorf("Converter from %s to %s already registered", src, dst))
}
registeredConverters[key] = c
}
func registered(src, dst *Format) bool {
key := srcDstFmt{src.Key(), dst.Key()}
_, found := registeredConverters[key]
return found
}
// converter is the interface implemented by formats that support format
// conversion.
type converter interface {
// convert converts the image formed from data, width and height to dstFmt.
// If converter is unable to convert to dstFmt then nil, nil is returned.
convert(data []byte, width, height int, dstFmt *Format) ([]byte, error)
}
// Convert uses the registered Converters to convert the image formed from
// data, width and height from srcFmt to dstFmt.
// If no direct converter has been registered to convert from srcFmt to dstFmt,
// then Convert may try converting via an intermediate format.
func Convert(data []byte, width, height int, srcFmt, dstFmt *Format) ([]byte, error) {
srcKey, dstKey := srcFmt.Key(), dstFmt.Key()
if srcKey == dstKey {
return data, nil // No conversion required.
}
if err := srcFmt.Check(data, width, height); err != nil {
return nil, fmt.Errorf("Source data of format %s is invalid: %s", srcFmt, err)
}
// Look for a registered converter.
if conv, found := registeredConverters[srcDstFmt{srcKey, dstKey}]; found {
return conv(data, width, height)
}
// Check if the source format supports the converter interface.
if c, ok := protoutil.OneOf(srcFmt.Format).(converter); ok {
data, err := c.convert(data, width, height, dstFmt)
if data != nil || err != nil {
return data, err
}
}
// No direct conversion found. Try going via RGBA_U8_NORM.
rgbaU8Key := RGBA_U8_NORM.Key()
if convA, found := registeredConverters[srcDstFmt{srcKey, rgbaU8Key}]; found {
if convB, found := registeredConverters[srcDstFmt{rgbaU8Key, dstKey}]; found {
if data, err := convA(data, width, height); err != nil {
return convB(data, width, height)
}
}
}
return nil, fmt.Errorf("No converter registered that can convert from format '%s' to '%s'\n",
srcFmt, dstFmt)
}
// Resolve returns the byte array holding the converted image for the resolve
// request.
// TODO: Can this be moved to the resolve package?
func (r *ConvertResolvable) Resolve(ctx context.Context) (interface{}, error) {
data, err := database.Resolve(ctx, r.Data.ID())
if err != nil {
return nil, err
}
from, to := r.FormatFrom, r.FormatTo
rowLength := from.Size(int(r.Width), 1)
if r.StrideFrom != 0 && r.StrideFrom != uint32(rowLength) {
// Remove any padding from the source image
packed := make([]byte, from.Size(int(r.Width), int(r.Height)))
src, dst := data.([]byte), packed
for y := 0; y < int(r.Height); y++ {
copy(dst, src[:rowLength])
dst, src = dst[rowLength:], src[r.StrideFrom:]
}
data = packed
}
data, err = Convert(data.([]byte), int(r.Width), int(r.Height), from, to)
if err != nil {
return nil, err
}
return data, nil
} | core/image/convert.go | 0.656768 | 0.463444 | convert.go | starcoder |
package main
import (
"bufio"
"flag"
"fmt"
"log"
"math"
"os"
"strconv"
"strings"
)
type point struct {
id, x, y int
}
type grid map[point]int
var input = flag.String("input", "input", "Puzzle input file")
var maxTotalDistance = flag.Int("distance", 10000, "Max total distance threshold")
func main() {
flag.Parse()
grid := loadGrid(*input)
fmt.Printf("Size of the largest non-infinite area: %d.\n", grid.largestNonInfiniteArea())
fmt.Printf("Size of the region containing all locations within total max distance: %d.\n", grid.areaWithin(*maxTotalDistance))
}
func (p point) distanceTo(p2 point) int {
return int(math.Abs(float64(p.x-p2.x)) + math.Abs(float64(p.y-p2.y)))
}
func (g grid) maxPoint() point {
maxX, maxY := 0, 0
for point := range g {
if point.x > maxX {
maxX = point.x
}
if point.y > maxY {
maxY = point.y
}
}
return point{x: maxX, y: maxY}
}
func loadGrid(filename string) grid {
id := 0
grid := make(grid)
file, err := os.Open(filename)
if err != nil {
log.Fatal(err)
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
pointData := strings.Split(scanner.Text(), ", ")
x, err := strconv.Atoi(pointData[0])
if err != nil {
log.Fatal(err)
}
y, err := strconv.Atoi(pointData[1])
if err != nil {
log.Fatal(err)
}
grid[point{id: id, x: x, y: y}], id = 0, id+1
}
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
return grid
}
func (g grid) largestNonInfiniteArea() int {
distGrid := make(grid)
maxPoint := g.maxPoint()
// Calculate the closest element for every position. The value for each
// entry will be set to the ID of its closest point.
for x := 0; x <= maxPoint.x; x++ {
for y := 0; y <= maxPoint.y; y++ {
gp := point{x: x, y: y}
closestDistance := math.MaxInt64
closestPoints := []point{}
for p := range g {
d := gp.distanceTo(p)
if d < closestDistance {
closestDistance = d
closestPoints = []point{p}
} else if d == closestDistance {
closestPoints = append(closestPoints, p)
}
}
// Disconsider points equally closest to two or more points.
if len(closestPoints) > 1 {
continue
}
distGrid[gp] = closestPoints[0].id
}
}
// Compute the area for each point in the distance grid; and assume the
// points closest to the edges have infinite area.
area := map[int]int{}
for gp, pid := range distGrid {
if gp.y == 0 || gp.y == maxPoint.y || gp.x == 0 || gp.x == maxPoint.x {
area[pid] = math.MaxInt64
} else if area[pid] != math.MaxInt64 {
area[pid]++
}
}
pointIDWithLargestArea := -1
for pid, a := range area {
if a != math.MaxInt64 && (pointIDWithLargestArea == -1 || a > area[pointIDWithLargestArea]) {
pointIDWithLargestArea = pid
}
}
return area[pointIDWithLargestArea]
}
func (g grid) areaWithin(threshold int) int {
sum := 0
grid := make(grid)
maxPoint := g.maxPoint()
for x := 0; x <= maxPoint.x; x++ {
for y := 0; y <= maxPoint.y; y++ {
p := point{x: x, y: y}
sum := 0
for pg := range g {
sum += p.distanceTo(pg)
}
grid[p] = sum
}
}
for x := 0; x <= maxPoint.x; x++ {
for y := 0; y <= maxPoint.y; y++ {
p := point{x: x, y: y}
if grid[p] < threshold {
sum++
}
}
}
return sum
} | 2018/06/main.go | 0.669096 | 0.433802 | main.go | starcoder |
package main
import "fmt"
// Heap is a heap.
type Heap struct {
values []int
size int
maxsize int
}
// newHeap creates a heap.
func newHeap(maxsize int) *Heap {
return &Heap{
values: []int{},
size: 0,
maxsize: maxsize,
}
}
// leaf checks whether index is a leaf.
func (h *Heap) leaf(index int) bool {
if index >= (h.size/2) && index <= h.size {
return true
}
return false
}
// parent checks whether index is a parent.
func (h *Heap) parent(index int) int {
return (index - 1) / 2
}
// leftchild checks whether index is a leftchild.
func (h *Heap) leftchild(index int) int {
return 2*index + 1
}
// rightchild checks whether index is a rightchild.
func (h *Heap) rightchild(index int) int {
return 2*index + 2
}
// insert inserts a item to a heap.
func (h *Heap) insert(item int) error {
if h.size >= h.maxsize {
return fmt.Errorf("Heal is ful")
}
h.values = append(h.values, item)
h.size++
h.upHeapify(h.size - 1)
return nil
}
// swap swaps two values.
func (h *Heap) swap(first, second int) {
temp := h.values[first]
h.values[first] = h.values[second]
h.values[second] = temp
}
// upHeapify reconstruct a heap for up.
func (h *Heap) upHeapify(index int) {
for h.values[index] < h.values[h.parent(index)] {
h.swap(index, h.parent(index))
}
}
// downHeapify reconstruct a heap for down.
func (h *Heap) downHeapify(current int) {
if h.leaf(current) {
return
}
smallest := current
leftChildIndex := h.leftchild(current)
rightRightIndex := h.rightchild(current)
if leftChildIndex < h.size && h.values[leftChildIndex] < h.values[smallest] {
smallest = leftChildIndex
}
if rightRightIndex < h.size && h.values[rightRightIndex] < h.values[smallest] {
smallest = rightRightIndex
}
if smallest != current {
h.swap(current, smallest)
h.downHeapify(smallest)
}
return
}
// buildMinHeap builds a min heap.
func (h *Heap) buildMinHeap() {
for index := ((h.size / 2) - 1); index >= 0; index-- {
h.downHeapify(index)
}
}
// remove removes a value.
func (h *Heap) remove() int {
top := h.values[0]
h.values[0] = h.values[h.size-1]
h.values = h.values[:(h.size)-1]
h.size--
h.downHeapify(0)
return top
}
func main() {
s := []int{6, 5, 3, 7, 2, 8}
h := newHeap(len(s))
for i := 0; i < len(s); i++ {
h.insert(s[i])
}
h.buildMinHeap()
for i := 0; i < len(s); i++ {
fmt.Println(h.remove())
}
fmt.Scanln()
} | data_structures/heap/heap.go | 0.658308 | 0.436742 | heap.go | starcoder |
package specs
import (
"testing"
"github.com/go-rel/rel"
"github.com/go-rel/rel/where"
"github.com/stretchr/testify/assert"
)
func createPreloadUser(repo rel.Repository) User {
var (
user = User{
Name: "preload",
Gender: "male",
Age: 25,
Addresses: []Address{
{Name: "primary"},
{Name: "home"},
{Name: "work"},
},
}
)
repo.MustInsert(ctx, &user)
return user
}
// PreloadHasMany tests specification for preloading has many association.
func PreloadHasMany(t *testing.T, repo rel.Repository) {
var (
result User
user = createPreloadUser(repo)
)
waitForReplication()
err := repo.Find(ctx, &result, where.Eq("id", user.ID))
assert.Nil(t, err)
err = repo.Preload(ctx, &result, "addresses")
assert.Nil(t, err)
assert.Equal(t, user, result)
}
// PreloadHasManyWithQuery tests specification for preloading has many association.
func PreloadHasManyWithQuery(t *testing.T, repo rel.Repository) {
var (
result User
user = createPreloadUser(repo)
)
waitForReplication()
err := repo.Find(ctx, &result, where.Eq("id", user.ID))
assert.Nil(t, err)
err = repo.Preload(ctx, &result, "addresses", where.Eq("name", "primary"))
assert.Nil(t, err)
assert.Equal(t, 1, len(result.Addresses))
assert.Equal(t, user.Addresses[0], result.Addresses[0])
}
// PreloadHasManySlice tests specification for preloading has many association from multiple records.
func PreloadHasManySlice(t *testing.T, repo rel.Repository) {
var (
result []User
users = []User{
createPreloadUser(repo),
createPreloadUser(repo),
}
)
waitForReplication()
err := repo.FindAll(ctx, &result, where.In("id", users[0].ID, users[1].ID))
assert.Nil(t, err)
err = repo.Preload(ctx, &result, "addresses")
assert.Nil(t, err)
assert.Equal(t, users, result)
}
// PreloadHasOne tests specification for preloading has one association.
func PreloadHasOne(t *testing.T, repo rel.Repository) {
var (
result User
user = createPreloadUser(repo)
)
waitForReplication()
err := repo.Find(ctx, &result, where.Eq("id", user.ID))
assert.Nil(t, err)
err = repo.Preload(ctx, &result, "primary_address")
assert.Nil(t, err)
assert.NotNil(t, result.PrimaryAddress)
}
// PreloadHasOneWithQuery tests specification for preloading has one association.
func PreloadHasOneWithQuery(t *testing.T, repo rel.Repository) {
var (
result User
user = createPreloadUser(repo)
)
waitForReplication()
err := repo.Find(ctx, &result, where.Eq("id", user.ID))
assert.Nil(t, err)
err = repo.Preload(ctx, &result, "primary_address", where.Eq("name", "primary"))
assert.Nil(t, err)
assert.Equal(t, user.Addresses[0], *result.PrimaryAddress)
}
// PreloadHasOneSlice tests specification for preloading has one association from multiple records.
func PreloadHasOneSlice(t *testing.T, repo rel.Repository) {
var (
result []User
users = []User{
createPreloadUser(repo),
createPreloadUser(repo),
}
)
waitForReplication()
err := repo.FindAll(ctx, &result, where.In("id", users[0].ID, users[1].ID))
assert.Nil(t, err)
err = repo.Preload(ctx, &result, "primary_address")
assert.Nil(t, err)
assert.NotNil(t, result[0].PrimaryAddress)
assert.NotNil(t, result[1].PrimaryAddress)
}
// PreloadBelongsTo tests specification for preloading belongs to association.
func PreloadBelongsTo(t *testing.T, repo rel.Repository) {
var (
result Address
user = createPreloadUser(repo)
)
waitForReplication()
err := repo.Find(ctx, &result, where.Eq("id", user.Addresses[0].ID))
assert.Nil(t, err)
user.Addresses = nil
err = repo.Preload(ctx, &result, "user")
assert.Nil(t, err)
assert.Equal(t, user, result.User)
}
// PreloadBelongsToWithQuery tests specification for preloading belongs to association.
func PreloadBelongsToWithQuery(t *testing.T, repo rel.Repository) {
var (
result Address
user = createPreloadUser(repo)
)
waitForReplication()
err := repo.Find(ctx, &result, where.Eq("id", user.Addresses[0].ID))
assert.Nil(t, err)
user.Addresses = nil
err = repo.Preload(ctx, &result, "user", where.Eq("name", "not exists"))
assert.Nil(t, err)
assert.Zero(t, result.User)
}
// PreloadBelongsToSlice tests specification for preloading belongs to association from multiple records.
func PreloadBelongsToSlice(t *testing.T, repo rel.Repository) {
var (
user = createPreloadUser(repo)
result = user.Addresses
resultLen = len(result)
)
waitForReplication()
user.Addresses = nil
err := repo.Preload(ctx, &result, "user")
assert.Nil(t, err)
assert.Len(t, result, resultLen)
for i := range result {
assert.Equal(t, user, result[i].User)
}
} | specs/preload.go | 0.525369 | 0.475666 | preload.go | starcoder |
package csvdec
import (
"reflect"
"strconv"
)
// Populates any slice value.
func fillSlice(value reflect.Value, fields []string) error {
kind := value.Type().Elem().Kind()
switch kind {
case reflect.String:
return fillStringSlice(value, fields)
case reflect.Int:
return fillIntSlice(value, fields)
case reflect.Int8:
return fillInt8Slice(value, fields)
case reflect.Int16:
return fillInt16Slice(value, fields)
case reflect.Int32:
return fillInt32Slice(value, fields)
case reflect.Int64:
return fillInt64Slice(value, fields)
case reflect.Uint:
return fillUintSlice(value, fields)
case reflect.Uint8:
return fillUint8Slice(value, fields)
case reflect.Uint16:
return fillUint16Slice(value, fields)
case reflect.Uint32:
return fillUint32Slice(value, fields)
case reflect.Uint64:
return fillUint64Slice(value, fields)
case reflect.Float32:
return fillFloat32Slice(value, fields)
case reflect.Float64:
return fillFloat64Slice(value, fields)
}
panic("Unsupported type: " + value.Type().String())
}
// Populates the given int slice with values parsed from fields.
// Returns an error if parsing fails.
func fillIntSlice(value reflect.Value, fields []string) error {
parsed := make([]int, len(fields))
for i, field := range fields {
n, err := strconv.ParseInt(field, 0, 0)
if err != nil {
return err
}
parsed[i] = int(n)
}
value.Set(reflect.ValueOf(parsed))
return nil
}
// Populates the given int8 slice with values parsed from fields.
// Returns an error if parsing fails.
func fillInt8Slice(value reflect.Value, fields []string) error {
parsed := make([]int8, len(fields))
for i, field := range fields {
n, err := strconv.ParseInt(field, 0, 8)
if err != nil {
return err
}
parsed[i] = int8(n)
}
value.Set(reflect.ValueOf(parsed))
return nil
}
// Populates the given int16 slice with values parsed from fields.
// Returns an error if parsing fails.
func fillInt16Slice(value reflect.Value, fields []string) error {
parsed := make([]int16, len(fields))
for i, field := range fields {
n, err := strconv.ParseInt(field, 0, 16)
if err != nil {
return err
}
parsed[i] = int16(n)
}
value.Set(reflect.ValueOf(parsed))
return nil
}
// Populates the given int32 slice with values parsed from fields.
// Returns an error if parsing fails.
func fillInt32Slice(value reflect.Value, fields []string) error {
parsed := make([]int32, len(fields))
for i, field := range fields {
n, err := strconv.ParseInt(field, 0, 32)
if err != nil {
return err
}
parsed[i] = int32(n)
}
value.Set(reflect.ValueOf(parsed))
return nil
}
// Populates the given int64 slice with values parsed from fields.
// Returns an error if parsing fails.
func fillInt64Slice(value reflect.Value, fields []string) error {
parsed := make([]int64, len(fields))
for i, field := range fields {
n, err := strconv.ParseInt(field, 0, 64)
if err != nil {
return err
}
parsed[i] = int64(n)
}
value.Set(reflect.ValueOf(parsed))
return nil
}
// Populates the given uint slice with values parsed from fields.
// Returns an error if parsing fails.
func fillUintSlice(value reflect.Value, fields []string) error {
parsed := make([]uint, len(fields))
for i, field := range fields {
n, err := strconv.ParseUint(field, 0, 0)
if err != nil {
return err
}
parsed[i] = uint(n)
}
value.Set(reflect.ValueOf(parsed))
return nil
}
// Populates the given uint8 slice with values parsed from fields.
// Returns an error if parsing fails.
func fillUint8Slice(value reflect.Value, fields []string) error {
parsed := make([]uint8, len(fields))
for i, field := range fields {
n, err := strconv.ParseUint(field, 0, 8)
if err != nil {
return err
}
parsed[i] = uint8(n)
}
value.Set(reflect.ValueOf(parsed))
return nil
}
// Populates the given uint16 slice with values parsed from fields.
// Returns an error if parsing fails.
func fillUint16Slice(value reflect.Value, fields []string) error {
parsed := make([]uint16, len(fields))
for i, field := range fields {
n, err := strconv.ParseUint(field, 0, 16)
if err != nil {
return err
}
parsed[i] = uint16(n)
}
value.Set(reflect.ValueOf(parsed))
return nil
}
// Populates the given uint32 slice with values parsed from fields.
// Returns an error if parsing fails.
func fillUint32Slice(value reflect.Value, fields []string) error {
parsed := make([]uint32, len(fields))
for i, field := range fields {
n, err := strconv.ParseUint(field, 0, 32)
if err != nil {
return err
}
parsed[i] = uint32(n)
}
value.Set(reflect.ValueOf(parsed))
return nil
}
// Populates the given uint64 slice with values parsed from fields.
// Returns an error if parsing fails.
func fillUint64Slice(value reflect.Value, fields []string) error {
parsed := make([]uint64, len(fields))
for i, field := range fields {
n, err := strconv.ParseUint(field, 0, 64)
if err != nil {
return err
}
parsed[i] = uint64(n)
}
value.Set(reflect.ValueOf(parsed))
return nil
}
// Populates the given float32 slice with values parsed from fields.
// Returns an error if parsing fails.
func fillFloat32Slice(value reflect.Value, fields []string) error {
parsed := make([]float32, len(fields))
for i, field := range fields {
n, err := strconv.ParseFloat(field, 32)
if err != nil {
return err
}
parsed[i] = float32(n)
}
value.Set(reflect.ValueOf(parsed))
return nil
}
// Populates the given float64 slice with values parsed from fields.
// Returns an error if parsing fails.
func fillFloat64Slice(value reflect.Value, fields []string) error {
parsed := make([]float64, len(fields))
for i, field := range fields {
n, err := strconv.ParseFloat(field, 64)
if err != nil {
return err
}
parsed[i] = float64(n)
}
value.Set(reflect.ValueOf(parsed))
return nil
}
// Populates the given string slice with values parsed from fields.
// Returns an error if parsing fails.
func fillStringSlice(value reflect.Value, fields []string) error {
// Fields may be a part of a bigger slice, so copying to allow the big
// slice to get CG'ed.
slice := make([]string, len(fields))
copy(slice, fields)
value.Set(reflect.ValueOf(slice))
return nil
} | csvdec/fillslice.go | 0.870157 | 0.60288 | fillslice.go | starcoder |
package constants
// NewConstantValue010 get new instance of ConstantValue010
func NewConstantValue010() *ConstantVals {
return &ConstantVals{
int64values: map[ConstantName]int64{
EmissionCurve: 6,
BlocksPerYear: 5256000,
IncentiveCurve: 100, // configures incentive pendulum
OutboundTransactionFee: 2_000000, // A 0.02 Rune fee on all swaps and withdrawals
NativeTransactionFee: 2_000000, // A 0.02 Rune fee on all on chain txs
PoolCycle: 43200, // Make a pool available every 3 days
StagedPoolCost: 10_00000000, // amount of rune to take from a staged pool on every pool cycle
MinRunePoolDepth: 10000_00000000, // minimum rune pool depth to be an available pool
MaxAvailablePools: 100, // maximum number of available pools
MinimumNodesForYggdrasil: 6, // No yggdrasil pools if THORNode have less than 6 active nodes
MinimumNodesForBFT: 4, // Minimum node count to keep network running. Below this, Ragnarök is performed.
DesiredValidatorSet: 100, // desire validator set
AsgardSize: 40, // desired node operators in an asgard vault
FundMigrationInterval: 360, // number of blocks THORNode will attempt to move funds from a retiring vault to an active one
ChurnInterval: 43200, // How many blocks THORNode try to rotate validators
ChurnRetryInterval: 720, // How many blocks until we retry a churn (only if we haven't had a successful churn in ChurnInterval blocks
BadValidatorRedline: 3, // redline multiplier to find a multitude of bad actors
BadValidatorRate: 43200, // rate to mark a validator to be rotated out for bad behavior
OldValidatorRate: 43200, // rate to mark a validator to be rotated out for age
LowBondValidatorRate: 43200, // rate to mark a validator to be rotated out for low bond
LackOfObservationPenalty: 2, // add two slash point for each block where a node does not observe
SigningTransactionPeriod: 300, // how many blocks before a request to sign a tx by yggdrasil pool, is counted as delinquent.
DoubleSignMaxAge: 24, // number of blocks to limit double signing a block
MinimumBondInRune: 1_000_000_00000000, // 1 million rune
FailKeygenSlashPoints: 720, // slash for 720 blocks , which equals 1 hour
FailKeysignSlashPoints: 2, // slash for 2 blocks
LiquidityLockUpBlocks: 0, // the number of blocks LP can withdraw after their liquidity
ObserveSlashPoints: 1, // the number of slashpoints for making an observation (redeems later if observation reaches consensus
ObservationDelayFlexibility: 10, // number of blocks of flexibility for a validator to get their slash points taken off for making an observation
YggFundLimit: 50, // percentage of the amount of funds a ygg vault is allowed to have.
YggFundRetry: 1000, // number of blocks before retrying to fund a yggdrasil vault
JailTimeKeygen: 720 * 6, // blocks a node account is jailed for failing to keygen. DO NOT drop below tss timeout
JailTimeKeysign: 60, // blocks a node account is jailed for failing to keysign. DO NOT drop below tss timeout
NodePauseChainBlocks: 720, // number of blocks that a node can pause/resume a global chain halt
MinSwapsPerBlock: 10, // process all swaps if queue is less than this number
MaxSwapsPerBlock: 100, // max swaps to process per block
VirtualMultSynths: 2, // pool depth multiplier for synthetic swaps
MaxSynthPerAssetDepth: 3300, // percentage (in basis points) of how many synths are allowed relative to asset depth of the related pool
MinSlashPointsForBadValidator: 100, // The minimum slash point
FullImpLossProtectionBlocks: 1440000, // number of blocks before a liquidity provider gets 100% impermanent loss protection
MinTxOutVolumeThreshold: 1000_00000000, // total txout volume (in rune) a block needs to have to slow outbound transactions
TxOutDelayRate: 25_00000000, // outbound rune per block rate for scheduled transactions (excluding native assets)
TxOutDelayMax: 17280, // max number of blocks a transaction can be delayed
MaxTxOutOffset: 720, // max blocks to offset a txout into a future block
TNSRegisterFee: 10_00000000, // registration fee for new THORName
TNSFeeOnSale: 1000, // fee for TNS sale in basis points
TNSFeePerBlock: 20, // per block cost for TNS, in rune
PermittedSolvencyGap: 100, // the setting is in basis points
},
boolValues: map[ConstantName]bool{
StrictBondLiquidityRatio: true,
},
stringValues: map[ConstantName]string{
DefaultPoolStatus: "Staged",
},
}
} | constants/constants_v1.go | 0.658527 | 0.426919 | constants_v1.go | starcoder |
package packed
// Efficient sequential read/write of packed integers.
type BulkOperationPacked23 struct {
*BulkOperationPacked
}
func newBulkOperationPacked23() BulkOperation {
return &BulkOperationPacked23{newBulkOperationPacked(23)}
}
func (op *BulkOperationPacked23) decodeLongToInt(blocks []int64, values []int32, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i++ {
block0 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(int64(uint64(block0) >> 41))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>18) & 8388607)
valuesOffset++
block1 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block0 & 262143) << 5) | (int64(uint64(block1) >> 59)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>36) & 8388607)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>13) & 8388607)
valuesOffset++
block2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block1 & 8191) << 10) | (int64(uint64(block2) >> 54)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>31) & 8388607)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>8) & 8388607)
valuesOffset++
block3 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block2 & 255) << 15) | (int64(uint64(block3) >> 49)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>26) & 8388607)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>3) & 8388607)
valuesOffset++
block4 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block3 & 7) << 20) | (int64(uint64(block4) >> 44)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4)>>21) & 8388607)
valuesOffset++
block5 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block4 & 2097151) << 2) | (int64(uint64(block5) >> 62)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block5)>>39) & 8388607)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block5)>>16) & 8388607)
valuesOffset++
block6 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block5 & 65535) << 7) | (int64(uint64(block6) >> 57)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block6)>>34) & 8388607)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block6)>>11) & 8388607)
valuesOffset++
block7 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block6 & 2047) << 12) | (int64(uint64(block7) >> 52)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block7)>>29) & 8388607)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block7)>>6) & 8388607)
valuesOffset++
block8 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block7 & 63) << 17) | (int64(uint64(block8) >> 47)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block8)>>24) & 8388607)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block8)>>1) & 8388607)
valuesOffset++
block9 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block8 & 1) << 22) | (int64(uint64(block9) >> 42)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block9)>>19) & 8388607)
valuesOffset++
block10 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block9 & 524287) << 4) | (int64(uint64(block10) >> 60)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block10)>>37) & 8388607)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block10)>>14) & 8388607)
valuesOffset++
block11 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block10 & 16383) << 9) | (int64(uint64(block11) >> 55)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block11)>>32) & 8388607)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block11)>>9) & 8388607)
valuesOffset++
block12 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block11 & 511) << 14) | (int64(uint64(block12) >> 50)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block12)>>27) & 8388607)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block12)>>4) & 8388607)
valuesOffset++
block13 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block12 & 15) << 19) | (int64(uint64(block13) >> 45)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block13)>>22) & 8388607)
valuesOffset++
block14 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block13 & 4194303) << 1) | (int64(uint64(block14) >> 63)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block14)>>40) & 8388607)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block14)>>17) & 8388607)
valuesOffset++
block15 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block14 & 131071) << 6) | (int64(uint64(block15) >> 58)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block15)>>35) & 8388607)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block15)>>12) & 8388607)
valuesOffset++
block16 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block15 & 4095) << 11) | (int64(uint64(block16) >> 53)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block16)>>30) & 8388607)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block16)>>7) & 8388607)
valuesOffset++
block17 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block16 & 127) << 16) | (int64(uint64(block17) >> 48)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block17)>>25) & 8388607)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block17)>>2) & 8388607)
valuesOffset++
block18 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block17 & 3) << 21) | (int64(uint64(block18) >> 43)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block18)>>20) & 8388607)
valuesOffset++
block19 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block18 & 1048575) << 3) | (int64(uint64(block19) >> 61)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block19)>>38) & 8388607)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block19)>>15) & 8388607)
valuesOffset++
block20 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block19 & 32767) << 8) | (int64(uint64(block20) >> 56)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block20)>>33) & 8388607)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block20)>>10) & 8388607)
valuesOffset++
block21 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block20 & 1023) << 13) | (int64(uint64(block21) >> 51)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block21)>>28) & 8388607)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block21)>>5) & 8388607)
valuesOffset++
block22 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block21 & 31) << 18) | (int64(uint64(block22) >> 46)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block22)>>23) & 8388607)
valuesOffset++
values[valuesOffset] = int32(block22 & 8388607)
valuesOffset++
}
}
func (op *BulkOperationPacked23) DecodeByteToInt(blocks []byte, values []int32, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i++ {
byte0 := blocks[blocksOffset]
blocksOffset++
byte1 := blocks[blocksOffset]
blocksOffset++
byte2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte0) << 15) | (int64(byte1) << 7) | int64(uint8(byte2)>>1))
valuesOffset++
byte3 := blocks[blocksOffset]
blocksOffset++
byte4 := blocks[blocksOffset]
blocksOffset++
byte5 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte2&1) << 22) | (int64(byte3) << 14) | (int64(byte4) << 6) | int64(uint8(byte5)>>2))
valuesOffset++
byte6 := blocks[blocksOffset]
blocksOffset++
byte7 := blocks[blocksOffset]
blocksOffset++
byte8 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte5&3) << 21) | (int64(byte6) << 13) | (int64(byte7) << 5) | int64(uint8(byte8)>>3))
valuesOffset++
byte9 := blocks[blocksOffset]
blocksOffset++
byte10 := blocks[blocksOffset]
blocksOffset++
byte11 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte8&7) << 20) | (int64(byte9) << 12) | (int64(byte10) << 4) | int64(uint8(byte11)>>4))
valuesOffset++
byte12 := blocks[blocksOffset]
blocksOffset++
byte13 := blocks[blocksOffset]
blocksOffset++
byte14 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte11&15) << 19) | (int64(byte12) << 11) | (int64(byte13) << 3) | int64(uint8(byte14)>>5))
valuesOffset++
byte15 := blocks[blocksOffset]
blocksOffset++
byte16 := blocks[blocksOffset]
blocksOffset++
byte17 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte14&31) << 18) | (int64(byte15) << 10) | (int64(byte16) << 2) | int64(uint8(byte17)>>6))
valuesOffset++
byte18 := blocks[blocksOffset]
blocksOffset++
byte19 := blocks[blocksOffset]
blocksOffset++
byte20 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte17&63) << 17) | (int64(byte18) << 9) | (int64(byte19) << 1) | int64(uint8(byte20)>>7))
valuesOffset++
byte21 := blocks[blocksOffset]
blocksOffset++
byte22 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte20&127) << 16) | (int64(byte21) << 8) | int64(byte22))
valuesOffset++
}
}
func (op *BulkOperationPacked23) DecodeLongToLong(blocks []int64, values []int64, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i++ {
block0 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64(uint64(block0) >> 41)
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>18) & 8388607
valuesOffset++
block1 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block0 & 262143) << 5) | (int64(uint64(block1) >> 59))
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>36) & 8388607
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>13) & 8388607
valuesOffset++
block2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block1 & 8191) << 10) | (int64(uint64(block2) >> 54))
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>31) & 8388607
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>8) & 8388607
valuesOffset++
block3 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block2 & 255) << 15) | (int64(uint64(block3) >> 49))
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>26) & 8388607
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>3) & 8388607
valuesOffset++
block4 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block3 & 7) << 20) | (int64(uint64(block4) >> 44))
valuesOffset++
values[valuesOffset] = int64(uint64(block4)>>21) & 8388607
valuesOffset++
block5 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block4 & 2097151) << 2) | (int64(uint64(block5) >> 62))
valuesOffset++
values[valuesOffset] = int64(uint64(block5)>>39) & 8388607
valuesOffset++
values[valuesOffset] = int64(uint64(block5)>>16) & 8388607
valuesOffset++
block6 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block5 & 65535) << 7) | (int64(uint64(block6) >> 57))
valuesOffset++
values[valuesOffset] = int64(uint64(block6)>>34) & 8388607
valuesOffset++
values[valuesOffset] = int64(uint64(block6)>>11) & 8388607
valuesOffset++
block7 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block6 & 2047) << 12) | (int64(uint64(block7) >> 52))
valuesOffset++
values[valuesOffset] = int64(uint64(block7)>>29) & 8388607
valuesOffset++
values[valuesOffset] = int64(uint64(block7)>>6) & 8388607
valuesOffset++
block8 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block7 & 63) << 17) | (int64(uint64(block8) >> 47))
valuesOffset++
values[valuesOffset] = int64(uint64(block8)>>24) & 8388607
valuesOffset++
values[valuesOffset] = int64(uint64(block8)>>1) & 8388607
valuesOffset++
block9 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block8 & 1) << 22) | (int64(uint64(block9) >> 42))
valuesOffset++
values[valuesOffset] = int64(uint64(block9)>>19) & 8388607
valuesOffset++
block10 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block9 & 524287) << 4) | (int64(uint64(block10) >> 60))
valuesOffset++
values[valuesOffset] = int64(uint64(block10)>>37) & 8388607
valuesOffset++
values[valuesOffset] = int64(uint64(block10)>>14) & 8388607
valuesOffset++
block11 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block10 & 16383) << 9) | (int64(uint64(block11) >> 55))
valuesOffset++
values[valuesOffset] = int64(uint64(block11)>>32) & 8388607
valuesOffset++
values[valuesOffset] = int64(uint64(block11)>>9) & 8388607
valuesOffset++
block12 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block11 & 511) << 14) | (int64(uint64(block12) >> 50))
valuesOffset++
values[valuesOffset] = int64(uint64(block12)>>27) & 8388607
valuesOffset++
values[valuesOffset] = int64(uint64(block12)>>4) & 8388607
valuesOffset++
block13 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block12 & 15) << 19) | (int64(uint64(block13) >> 45))
valuesOffset++
values[valuesOffset] = int64(uint64(block13)>>22) & 8388607
valuesOffset++
block14 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block13 & 4194303) << 1) | (int64(uint64(block14) >> 63))
valuesOffset++
values[valuesOffset] = int64(uint64(block14)>>40) & 8388607
valuesOffset++
values[valuesOffset] = int64(uint64(block14)>>17) & 8388607
valuesOffset++
block15 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block14 & 131071) << 6) | (int64(uint64(block15) >> 58))
valuesOffset++
values[valuesOffset] = int64(uint64(block15)>>35) & 8388607
valuesOffset++
values[valuesOffset] = int64(uint64(block15)>>12) & 8388607
valuesOffset++
block16 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block15 & 4095) << 11) | (int64(uint64(block16) >> 53))
valuesOffset++
values[valuesOffset] = int64(uint64(block16)>>30) & 8388607
valuesOffset++
values[valuesOffset] = int64(uint64(block16)>>7) & 8388607
valuesOffset++
block17 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block16 & 127) << 16) | (int64(uint64(block17) >> 48))
valuesOffset++
values[valuesOffset] = int64(uint64(block17)>>25) & 8388607
valuesOffset++
values[valuesOffset] = int64(uint64(block17)>>2) & 8388607
valuesOffset++
block18 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block17 & 3) << 21) | (int64(uint64(block18) >> 43))
valuesOffset++
values[valuesOffset] = int64(uint64(block18)>>20) & 8388607
valuesOffset++
block19 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block18 & 1048575) << 3) | (int64(uint64(block19) >> 61))
valuesOffset++
values[valuesOffset] = int64(uint64(block19)>>38) & 8388607
valuesOffset++
values[valuesOffset] = int64(uint64(block19)>>15) & 8388607
valuesOffset++
block20 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block19 & 32767) << 8) | (int64(uint64(block20) >> 56))
valuesOffset++
values[valuesOffset] = int64(uint64(block20)>>33) & 8388607
valuesOffset++
values[valuesOffset] = int64(uint64(block20)>>10) & 8388607
valuesOffset++
block21 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block20 & 1023) << 13) | (int64(uint64(block21) >> 51))
valuesOffset++
values[valuesOffset] = int64(uint64(block21)>>28) & 8388607
valuesOffset++
values[valuesOffset] = int64(uint64(block21)>>5) & 8388607
valuesOffset++
block22 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block21 & 31) << 18) | (int64(uint64(block22) >> 46))
valuesOffset++
values[valuesOffset] = int64(uint64(block22)>>23) & 8388607
valuesOffset++
values[valuesOffset] = block22 & 8388607
valuesOffset++
}
}
func (op *BulkOperationPacked23) decodeByteToLong(blocks []byte, values []int64, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i++ {
byte0 := blocks[blocksOffset]
blocksOffset++
byte1 := blocks[blocksOffset]
blocksOffset++
byte2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte0) << 15) | (int64(byte1) << 7) | int64(uint8(byte2)>>1))
valuesOffset++
byte3 := blocks[blocksOffset]
blocksOffset++
byte4 := blocks[blocksOffset]
blocksOffset++
byte5 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte2&1) << 22) | (int64(byte3) << 14) | (int64(byte4) << 6) | int64(uint8(byte5)>>2))
valuesOffset++
byte6 := blocks[blocksOffset]
blocksOffset++
byte7 := blocks[blocksOffset]
blocksOffset++
byte8 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte5&3) << 21) | (int64(byte6) << 13) | (int64(byte7) << 5) | int64(uint8(byte8)>>3))
valuesOffset++
byte9 := blocks[blocksOffset]
blocksOffset++
byte10 := blocks[blocksOffset]
blocksOffset++
byte11 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte8&7) << 20) | (int64(byte9) << 12) | (int64(byte10) << 4) | int64(uint8(byte11)>>4))
valuesOffset++
byte12 := blocks[blocksOffset]
blocksOffset++
byte13 := blocks[blocksOffset]
blocksOffset++
byte14 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte11&15) << 19) | (int64(byte12) << 11) | (int64(byte13) << 3) | int64(uint8(byte14)>>5))
valuesOffset++
byte15 := blocks[blocksOffset]
blocksOffset++
byte16 := blocks[blocksOffset]
blocksOffset++
byte17 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte14&31) << 18) | (int64(byte15) << 10) | (int64(byte16) << 2) | int64(uint8(byte17)>>6))
valuesOffset++
byte18 := blocks[blocksOffset]
blocksOffset++
byte19 := blocks[blocksOffset]
blocksOffset++
byte20 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte17&63) << 17) | (int64(byte18) << 9) | (int64(byte19) << 1) | int64(uint8(byte20)>>7))
valuesOffset++
byte21 := blocks[blocksOffset]
blocksOffset++
byte22 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte20&127) << 16) | (int64(byte21) << 8) | int64(byte22))
valuesOffset++
}
} | core/util/packed/bulkOperation23.go | 0.592313 | 0.747155 | bulkOperation23.go | starcoder |
package vector
import (
"gonet/base/containers"
"log"
)
func assert(x bool, y string) {
if bool(x) == false {
log.Printf("\nFatal :{%s}", y)
}
}
const (
VectorBlockSize = 16
)
type (
Vector struct {
elementCount int
arraySize int
array []interface{}
}
IVector interface {
containers.Container
insert(int)
increment()
decrement()
Erase(int)
PushFront(interface{})
PushBack(interface{})
PopFront()
PopBack()
Front() interface{}
Back() interface{}
Len() int
Get(int) interface{}
Swap(i, j int)
Less(i, j int) bool
}
)
func (v *Vector) insert(index int) {
assert(index <= v.elementCount, "Vector<T>::insert - out of bounds index.")
if v.elementCount == v.arraySize {
v.resize(v.elementCount + 1)
} else {
v.elementCount++
}
for i := v.elementCount - 1; i > index; i-- {
v.array[i] = v.array[i-1]
}
}
func (v *Vector) increment() {
if v.elementCount == v.arraySize {
v.resize(v.elementCount + 1)
} else {
v.elementCount++
}
}
func (v *Vector) decrement() {
assert(v.elementCount != 0, "Vector<T>::decrement - cannot decrement zero-length vector.")
v.elementCount--
}
func (v *Vector) resize(newCount int) bool {
if newCount > 0 {
blocks := newCount / VectorBlockSize
if newCount%VectorBlockSize != 0 {
blocks++
}
v.elementCount = newCount
v.arraySize = blocks * VectorBlockSize
newAarray := make([]interface{}, v.arraySize+1)
copy(newAarray, v.array)
v.array = newAarray
}
return true
}
func (v *Vector) Erase(index int) {
assert(index < v.elementCount, "Vector<T>::erase - out of bounds index.")
if index < v.elementCount-1 {
copy(v.array[index:v.elementCount], v.array[index+1:v.elementCount])
}
v.elementCount--
}
func (v *Vector) PushFront(value interface{}) {
v.insert(0)
v.array[0] = value
}
func (v *Vector) PushBack(value interface{}) {
v.increment()
v.array[v.elementCount-1] = value
}
func (v *Vector) PopFront() {
assert(v.elementCount != 0, "Vector<T>::pop_front - cannot pop the front of a zero-length vector.")
v.Erase(0)
}
func (v *Vector) PopBack() {
assert(v.elementCount != 0, "Vector<T>::pop_back - cannot pop the back of a zero-length vector.")
v.decrement()
}
// Check that the index is within bounds of the list
func (v *Vector) withinRange(index int) bool {
return index >= 0 && index < v.elementCount
}
func (v *Vector) Front() interface{} {
assert(v.elementCount != 0, "Vector<T>::first - Error, no first element of a zero sized array! (const)")
return v.array[0]
}
func (v *Vector) Back() interface{} {
assert(v.elementCount != 0, "Vector<T>::last - Error, no last element of a zero sized array! (const)")
return v.array[v.elementCount-1]
}
func (v *Vector) Empty() bool {
return v.elementCount == 0
}
func (v *Vector) Size() int {
return v.arraySize
}
func (v *Vector) Clear() {
v.elementCount = 0
}
func (v *Vector) Len() int {
return v.elementCount
}
func (v *Vector) Get(index int) interface{} {
assert(index < v.elementCount, "Vector<T>::operator[] - out of bounds array access!")
return v.array[index]
}
func (v *Vector) Values() []interface{} {
return v.array[0:v.elementCount]
}
func (v *Vector) Swap(i, j int) {
v.array[i], v.array[j] = v.array[j], v.array[i]
}
func (v *Vector) Less(i, j int) bool {
return true
}
func NewVector() *Vector {
return &Vector{}
} | base/vector/vector.go | 0.603348 | 0.517022 | vector.go | starcoder |
package vec2
import (
"fmt"
"image"
"github.com/andreas-jonsson/fix16"
)
func Rectangle(r image.Rectangle) T {
if r.Min != image.ZP {
panic("rectangle min is not zero")
}
return Point(r.Max)
}
func Point(pt image.Point) T {
return Int(pt.X, pt.Y)
}
func Int(x, y int) T {
return T{fix16.Int(x), fix16.Int(y)}
}
type T [2]fix16.T
func (v T) String() string {
return fmt.Sprintf("[%s, %s]", v.X().String(), v.Y().String())
}
func (v T) X() fix16.T {
return v[0]
}
func (v T) Y() fix16.T {
return v[1]
}
func (v T) XY() (fix16.T, fix16.T) {
return v.X(), v.Y()
}
func (v T) YX() (fix16.T, fix16.T) {
return v.Y(), v.X()
}
func (v T) Add(b T) T {
return T{v.X().Add(b.X()), v.Y().Add(b.Y())}
}
func (v T) Sub(b T) T {
return T{v.X().Sub(b.X()), v.Y().Sub(b.Y())}
}
func (v T) Mul(b T) T {
return T{v.X().Mul(b.X()), v.Y().Mul(b.Y())}
}
func (v T) Div(b T) T {
return T{v.X().Div(b.X()), v.Y().Div(b.Y())}
}
func floorSqrt(x int64) int64 {
if x == 0 || x == 1 {
return x
}
var (
start int64 = 1
end = x
res int64
)
for start <= end {
mid := (start + end) / 2
// If x is a perfect square.
sprt := mid * mid
if sprt == x {
return mid
}
// Since we need floor, we update answer when mid*mid is
// smaller than x, and move closer to sqrt(x).
if sprt < x {
start = mid + 1
res = mid
} else { // If mid*mid is greater than x.
end = mid - 1
}
}
return res
}
func (v T) Length() fix16.T {
x, xf := v.X().Split()
y, yf := v.Y().Split()
xi, yi := x.Int64(), y.Int64()
il := floorSqrt(yi*yi + xi*xi)
fl := xf.Mul(xf).Add(yf.Mul(yf)).Sqrt()
return fix16.Int64(il).Add(fl)
}
func (v T) Scale(s fix16.T) T {
return T{v.X().Mul(s), v.Y().Mul(s)}
}
func (v T) Invert() T {
return T{v.X().Inv(), v.Y().Inv()}
}
func (v T) Normalize() T {
l := v.Length()
if l == fix16.Zero || l == fix16.Binary(1) {
return v
}
s := fix16.One.Div(l)
return v.Scale(s)
}
func (v T) Dot(b T) fix16.T {
return v.X().Mul(b.X()).Add(v.Y().Mul(b.Y()))
}
func (v T) Point() image.Point {
return image.Pt(v.X().Int(), v.Y().Int())
}
func (v T) Rectangle() image.Rectangle {
return image.Rect(0, 0, v.X().Int(), v.Y().Int())
} | vec2/vec2.go | 0.833325 | 0.498901 | vec2.go | starcoder |
package pemutil
import (
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"io/ioutil"
)
var (
ErrNoBlocks = errors.New("no PEM blocks")
)
type Block struct {
Type string
Headers map[string]string
Object interface{}
}
func LoadBlocks(path string) ([]Block, error) {
return loadBlocks(path, 0, "")
}
func ParseBlocks(pemBytes []byte) ([]Block, error) {
return parseBlocks(pemBytes, 0, "")
}
func loadBlock(path string, expectedTypes ...string) (*Block, error) {
blocks, err := loadBlocks(path, 1, expectedTypes...)
if err != nil {
return nil, err
}
return &blocks[0], nil
}
func loadBlocks(path string, expectedCount int, expectedTypes ...string) (blocks []Block, err error) {
pemBytes, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
return parseBlocks(pemBytes, expectedCount, expectedTypes...)
}
func parseBlock(pemBytes []byte, expectedTypes ...string) (*Block, error) {
blocks, err := parseBlocks(pemBytes, 1, expectedTypes...)
if err != nil {
return nil, err
}
return &blocks[0], nil
}
func parseBlocks(pemBytes []byte, expectedCount int, expectedTypes ...string) (blocks []Block, err error) {
for blockno := 1; ; blockno++ {
var pemBlock *pem.Block
pemBlock, pemBytes = pem.Decode(pemBytes)
if pemBlock == nil {
if len(blocks) == 0 {
return nil, ErrNoBlocks
}
if expectedCount > 0 && len(blocks) != expectedCount {
return nil, fmt.Errorf("expected %d PEM blocks; got %d", expectedCount, len(blocks))
}
return blocks, nil
}
block := Block{
Type: pemBlock.Type,
Headers: pemBlock.Headers,
}
if len(expectedTypes) > 0 {
found := false
for _, expectedType := range expectedTypes {
if expectedType == pemBlock.Type {
found = true
break
}
}
if !found {
var expectedTypeList interface{} = expectedTypes
if len(expectedTypes) == 1 {
expectedTypeList = expectedTypes[0]
}
return nil, fmt.Errorf("expected block type %q; got %q", expectedTypeList, pemBlock.Type)
}
}
switch pemBlock.Type {
case certificateType:
block.Object, err = x509.ParseCertificate(pemBlock.Bytes)
case certificateRequestType:
block.Object, err = x509.ParseCertificateRequest(pemBlock.Bytes)
case rsaPrivateKeyType:
block.Object, err = x509.ParsePKCS1PrivateKey(pemBlock.Bytes)
case ecPrivateKeyType:
block.Object, err = x509.ParseECPrivateKey(pemBlock.Bytes)
case privateKeyType:
block.Object, err = x509.ParsePKCS8PrivateKey(pemBlock.Bytes)
}
if err != nil {
return nil, fmt.Errorf("unable to parse %q PEM block %d: %v", pemBlock.Type, blockno, err)
}
blocks = append(blocks, block)
}
} | pkg/common/pemutil/block.go | 0.538255 | 0.442998 | block.go | starcoder |
package basic
// KeysNumberToNumberTest is template
func KeysNumberToNumberTest() string {
return `
func TestKeys<FINPUT_TYPE1><FINPUT_TYPE2>(t *testing.T) {
m := map[<INPUT_TYPE1>]<INPUT_TYPE2>{1: 1}
expectedList := []<INPUT_TYPE1>{1}
actualList := Keys<FINPUT_TYPE1><FINPUT_TYPE2>(m)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("Test Keys<FINPUT_TYPE1><FINPUT_TYPE2> failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
}
`
}
// KeysNumberToStrTest is template
func KeysNumberToStrTest() string {
return `
func TestKeys<FINPUT_TYPE1><FINPUT_TYPE2>(t *testing.T) {
m := map[<INPUT_TYPE1>]<INPUT_TYPE2>{1: "1"}
expectedList := []<INPUT_TYPE1>{1}
actualList := Keys<FINPUT_TYPE1><FINPUT_TYPE2>(m)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("Test Keys<FINPUT_TYPE1><FINPUT_TYPE2> failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
}
`
}
// KeysStrToNumberTest is template
func KeysStrToNumberTest() string {
return `
func TestKeys<FINPUT_TYPE1><FINPUT_TYPE2>(t *testing.T) {
m := map[<INPUT_TYPE1>]<INPUT_TYPE2>{"1": 1}
expectedList := []<INPUT_TYPE1>{"1"}
actualList := Keys<FINPUT_TYPE1><FINPUT_TYPE2>(m)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("Test Keys<FINPUT_TYPE1><FINPUT_TYPE2> failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
}
`
}
// KeysStrToBoolTest is template
func KeysStrToBoolTest() string {
return `
func TestKeys<FINPUT_TYPE1><FINPUT_TYPE2>(t *testing.T) {
m := map[<INPUT_TYPE1>]<INPUT_TYPE2>{"1": true}
expectedList := []<INPUT_TYPE1>{"1"}
actualList := Keys<FINPUT_TYPE1><FINPUT_TYPE2>(m)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("Test Keys<FINPUT_TYPE1><FINPUT_TYPE2> failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
}
`
}
// KeysBoolToStrTest is template
func KeysBoolToStrTest() string {
return `
func TestKeys<FINPUT_TYPE1><FINPUT_TYPE2>(t *testing.T) {
m := map[<INPUT_TYPE1>]<INPUT_TYPE2>{true: "1"}
expectedList := []<INPUT_TYPE1>{true}
actualList := Keys<FINPUT_TYPE1><FINPUT_TYPE2>(m)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("Test Keys<FINPUT_TYPE1><FINPUT_TYPE2> failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
}
`
}
// KeysNumberToBoolTest is template
func KeysNumberToBoolTest() string {
return `
func TestKeys<FINPUT_TYPE1><FINPUT_TYPE2>(t *testing.T) {
m := map[<INPUT_TYPE1>]<INPUT_TYPE2>{1: true}
expectedList := []<INPUT_TYPE1>{1}
actualList := Keys<FINPUT_TYPE1><FINPUT_TYPE2>(m)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("Test Keys<FINPUT_TYPE1><FINPUT_TYPE2> failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
}
`
}
// KeysBoolToNumberTest is template
func KeysBoolToNumberTest() string {
return `
func TestKeys<FINPUT_TYPE1><FINPUT_TYPE2>(t *testing.T) {
m := map[<INPUT_TYPE1>]<INPUT_TYPE2>{true: 1}
expectedList := []<INPUT_TYPE1>{true}
actualList := Keys<FINPUT_TYPE1><FINPUT_TYPE2>(m)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("Test Keys<FINPUT_TYPE1><FINPUT_TYPE2> failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
}
`
}
// KeysBoolToBoolTest is template
func KeysBoolToBoolTest() string {
return `
func TestKeys<FINPUT_TYPE1><FINPUT_TYPE2>(t *testing.T) {
m := map[<INPUT_TYPE1>]<INPUT_TYPE2>{true: true}
expectedList := []<INPUT_TYPE1>{true}
actualList := Keys<FINPUT_TYPE1><FINPUT_TYPE2>(m)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("Test Keys<FINPUT_TYPE1><FINPUT_TYPE2> failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
}
`
}
// KeysStrToStrTest is template
func KeysStrToStrTest() string {
return `
func TestKeys<FINPUT_TYPE1><FINPUT_TYPE2>(t *testing.T) {
m := map[<INPUT_TYPE1>]<INPUT_TYPE2>{"ram": "ram"}
expectedList := []<INPUT_TYPE1>{"ram"}
actualList := Keys<FINPUT_TYPE1><FINPUT_TYPE2>(m)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("Test Keys<FINPUT_TYPE1><FINPUT_TYPE2> failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
}
`
} | internal/template/basic/keystest.go | 0.669637 | 0.485905 | keystest.go | starcoder |
package main
import (
"fmt"
"os"
"strings"
"text/tabwriter"
"github.com/bradfitz/slice"
)
// Character is the type representing a role playing character
type Character struct {
Name string
Backgrounds map[string]Background
Aptitudes map[string]Aptitude
Characteristics map[string]Characteristic
Skills map[string]Skill
Talents map[string]Talent
Gauges map[string]Gauge
Rules map[string]Rule
Spells map[string]Spell
Experience int
Spent int
History []Upgrade
}
// NewCharacter creates a new character from the given sheet and universe.
func NewCharacter(universe Universe, sheet Sheet) (*Character, error) {
// Create a character
c := Character{
Name: sheet.Header.Name,
Backgrounds: make(map[string]Background),
Aptitudes: make(map[string]Aptitude),
Characteristics: make(map[string]Characteristic),
Skills: make(map[string]Skill),
Talents: make(map[string]Talent),
Gauges: make(map[string]Gauge),
Rules: make(map[string]Rule),
Spells: make(map[string]Spell),
Experience: 0,
Spent: 0,
}
// The characteristics described in the header of the sheet are parsed as upgrades
for _, upgrade := range sheet.Characteristics {
// Get the characteristic from the universe
characteristic, found := universe.FindCharacteristic(upgrade)
if !found {
return nil, NewError(UndefinedCharacteristic, upgrade.Line)
}
// Check it is not already applied
_, found = c.Characteristics[characteristic.Name]
if found {
return nil, NewError(DuplicateUpgrade, upgrade.Line)
}
// Apply the upgrade
err := characteristic.Apply(&c, upgrade)
if err != nil {
return nil, err
}
}
// Next are the backgrounds
for typ, metas := range sheet.Header.Metas {
for _, meta := range metas {
// Find the background corresponding to the meta
background, found := universe.FindBackground(typ, meta.Label)
if !found {
return nil, NewError(UndefinedBackground, meta.Line, typ, meta.Label)
}
err := background.Apply(&c, universe)
if err != nil {
return nil, err
}
}
}
// Next are the sessions
for _, session := range sheet.Sessions {
// Apply the experience gain if needed
if session.Reward != nil {
c.Experience += *session.Reward
}
// Apply each upgrade in order
for _, upgrade := range session.Upgrades {
err := c.ApplyUpgrade(upgrade, universe)
if err != nil {
return nil, err
}
}
}
return &c, nil
}
// Intersect return the number of aptitudes of the given slice
// that are in the character's aptitudes.
func (c *Character) Intersect(aptitudes []Aptitude) int {
count := 0
for _, aptitude := range aptitudes {
if _, found := c.Aptitudes[string(aptitude)]; found {
count++
}
}
return count
}
// ApplyUpgrade changes the character's attributes according to the given upgrade.
func (c *Character) ApplyUpgrade(upgrade Upgrade, universe Universe) error {
// Find the attribute corresponding to the upgrade., and initialize a new
// rule if there isn't any.
coster, found := universe.FindCoster(upgrade)
if !found {
coster = Rule{
Name: upgrade.Name,
}
}
// If no cost is defined, compute it on the fly.
if upgrade.Cost == nil {
cost, err := coster.Cost(universe, *c)
if err != nil {
return err
}
upgrade.Cost = &cost
}
// Apply the upgrade.
err := coster.Apply(c, upgrade)
c.Spent += *upgrade.Cost
// If there is no error, add the upgrade to the history.
if err == nil {
c.History = append(c.History, upgrade)
}
return err
}
// Print the character sheet on the screen
func (c *Character) Print() {
// Print the name
fmt.Printf("%s\t%s\n", theme.Title("Name"), c.Name)
// Print the backgrounds
backgrounds := []Background{}
for _, background := range c.Backgrounds {
backgrounds = append(backgrounds, background)
}
slice.Sort(backgrounds, func(i, j int) bool {
if backgrounds[i].Type != backgrounds[j].Type {
return backgrounds[i].Type < backgrounds[j].Type
}
return backgrounds[i].Name < backgrounds[j].Name
})
for _, background := range backgrounds {
fmt.Printf("%s\t%s\n", theme.Title(strings.Title(background.Type)), strings.Title(background.Name))
}
// Print the aptitudes
aptitudes := []Aptitude{}
for _, aptitude := range c.Aptitudes {
aptitudes = append(aptitudes, aptitude)
}
slice.Sort(aptitudes, func(i, j int) bool {
return aptitudes[i] < aptitudes[j]
})
fmt.Printf("\n%s (%s)\n", theme.Title("Aptitudes"), theme.Value(fmt.Sprintf("%d", len(aptitudes))))
for _, aptitude := range aptitudes {
fmt.Printf("%s\n", strings.Title(string(aptitude)))
}
// Print the experience
fmt.Printf("\n%s\t%d/%d\n", theme.Title("Experience"), c.Spent, c.Experience)
// Print the characteristics
var characteristicSum int
characteristics := []Characteristic{}
for _, characteristic := range c.Characteristics {
characteristicSum += characteristic.Value
characteristics = append(characteristics, characteristic)
}
slice.Sort(characteristics, func(i, j int) bool {
return characteristics[i].Name < characteristics[j].Name
})
fmt.Printf("\n%s (%s)\n", theme.Title("Characteristics"), theme.Value(fmt.Sprintf("%d", characteristicSum)))
for _, characteristic := range characteristics {
fmt.Printf("%s\t%s %s\n", characteristic.Name, theme.Value(characteristic.Value), theme.Value(characteristic.Level()))
}
// Print the gauges
if len(c.Gauges) != 0 {
fmt.Printf("\n%s\n", theme.Title("Gauges"))
gauges := []Gauge{}
for _, gauge := range c.Gauges {
gauges = append(gauges, gauge)
}
slice.Sort(gauges, func(i, j int) bool {
return gauges[i].Name < gauges[j].Name
})
for _, gauge := range gauges {
fmt.Printf("%s\t%s\n", gauge.Name, theme.Value(gauge.Value))
}
}
// Print the skills
if len(c.Skills) != 0 {
// Print the skills using a tabwriter
fmt.Printf("\n%s\n", theme.Title("Skills"))
skills := []Skill{}
for _, skill := range c.Skills {
skills = append(skills, skill)
}
slice.Sort(skills, func(i, j int) bool {
return skills[i].FullName() < skills[j].FullName()
})
w := tabwriter.NewWriter(os.Stdout, 10, 1, 2, ' ', 0)
for _, skill := range skills {
fmt.Fprintf(w, "%s\t+%s\n", strings.Title(skill.FullName()), theme.Value((skill.Tier-1)*10))
}
w.Flush()
}
// Print the talents
if len(c.Talents) != 0 {
fmt.Printf("\n%s\n", theme.Title("Talents"))
talents := []Talent{}
for _, talent := range c.Talents {
talents = append(talents, talent)
}
slice.Sort(talents, func(i, j int) bool {
return talents[i].FullName() < talents[j].FullName()
})
w := tabwriter.NewWriter(os.Stdout, 10, 1, 2, ' ', 0)
for _, talent := range talents {
if talent.Value != 1 {
fmt.Fprintf(w, "%s (%d)\t%s\n", strings.Title(talent.FullName()), talent.Value, talent.Description)
} else {
fmt.Fprintf(w, "%s\t%s\n", strings.Title(talent.FullName()), talent.Description)
}
}
w.Flush()
}
// Print the spells
if len(c.Spells) != 0 {
fmt.Printf("\n%s\n", theme.Title("Spells"))
spells := []Spell{}
for _, spell := range c.Spells {
spells = append(spells, spell)
}
slice.Sort(spells, func(i, j int) bool {
return spells[i].Name < spells[j].Name
})
w := tabwriter.NewWriter(os.Stdout, 10, 1, 2, ' ', 0)
for _, spell := range spells {
fmt.Fprintf(w, "%s\t%s\n", strings.Title(spell.Name), spell.Description)
}
w.Flush()
}
// Print the special rules
if len(c.Rules) != 0 {
fmt.Printf("\n%s\n", theme.Title("Rules"))
rules := []Rule{}
for _, rule := range c.Rules {
rules = append(rules, rule)
}
slice.Sort(rules, func(i, j int) bool {
return rules[i].Name < rules[j].Name
})
w := tabwriter.NewWriter(os.Stdout, 10, 1, 2, ' ', 0)
for _, rule := range rules {
fmt.Printf("%s\t%s\n", strings.Title(rule.Name), rule.Description)
}
w.Flush()
}
}
// PrintHistory displays the history of expences of the character.
func (c *Character) PrintHistory() {
// Print the name.
fmt.Printf("%s\t%s\n", theme.Title("Name"), c.Name)
// Print the experience
fmt.Printf("\n%s\t%d/%d\n", theme.Title("Experience"), c.Spent, c.Experience)
// Print the history.
fmt.Printf("\n%s\n", theme.Title("History"))
w := tabwriter.NewWriter(os.Stdout, 10, 1, 2, ' ', 0)
for _, upgrade := range c.History {
if upgrade.Cost != nil {
fmt.Fprintf(w, "%d\t%s\n", *upgrade.Cost, strings.Title(upgrade.Name))
} else {
fmt.Fprintf(w, "%d\t%s\n", 0, strings.Title(upgrade.Name))
}
}
w.Flush()
}
// Suggest the next purchasable upgrades of the character.
func (c *Character) Suggest(universe Universe, max int, all bool, allowSpells bool) {
// Aggregate each coster into a unique slice of costers.
costers := []Coster{}
for _, upgrade := range universe.Characteristics {
costers = append(costers, upgrade)
}
for _, upgrade := range universe.Skills {
costers = append(costers, upgrade)
}
for _, upgrade := range universe.Talents {
costers = append(costers, upgrade)
}
for _, upgrade := range universe.Gauges {
upgrade.Value = 1
costers = append(costers, upgrade)
}
if allowSpells {
for _, upgrade := range universe.Spells {
costers = append(costers, upgrade)
}
}
// Default max value equals to the remaining XP.
if max == 0 {
max = c.Experience - c.Spent
}
// The slice of appliable upgrades.
var appliable []Upgrade
// Attempt to apply each coster once.
for _, coster := range costers {
var upgrade Upgrade
// Don't propose the upgrade its cost cannot be defined
cost, err := coster.Cost(universe, *c)
if err != nil {
continue
}
// Don't propose the upgrade if it is free.
if cost == 0 {
continue
}
// Don't propose the upgrade if it is too expensive.
if !all && max < cost {
continue
}
upgrade.Cost = &cost
upgrade.Mark = MarkApply
upgrade.Name = coster.DefaultName()
err = coster.Apply(c, upgrade)
if err != nil {
continue
}
appliable = append(appliable, upgrade)
}
// Sort by cost then name.
slice.Sort(appliable, func(i, j int) bool {
ci, cj := *appliable[i].Cost, *appliable[j].Cost
if ci == cj {
return appliable[i].Name < appliable[j].Name
}
return ci < cj
})
// Print the name.
fmt.Printf("%s\t%s\n", theme.Title("Name"), c.Name)
// Print the experience
fmt.Printf("\n%s\t%d/%d\n", theme.Title("Experience"), c.Spent, c.Experience)
// Print the history.
fmt.Printf("\n%s\n", theme.Title("Suggestions"))
w := tabwriter.NewWriter(os.Stdout, 10, 1, 2, ' ', 0)
for i, upgrade := range appliable {
if i > 0 && *appliable[i-1].Cost != *upgrade.Cost {
fmt.Fprintln(w)
}
fmt.Fprintf(w, "%s\t%s\n", theme.Value(*upgrade.Cost), strings.Title(upgrade.Name))
}
w.Flush()
} | src/adeptus/character.go | 0.667581 | 0.418994 | character.go | starcoder |
package sorting
import "math"
/*
A non-empty zero-indexed array A consisting of N integers is given.
The product of triplet (P, Q, R) equates to A[P] * A[Q] * A[R] (0 ≤ P < Q < R < N).
For example, array A such that:
A[0] = -3
A[1] = 1
A[2] = 2
A[3] = -2
A[4] = 5
A[5] = 6
contains the following example triplets:
(0, 1, 2), product is −3 * 1 * 2 = −6
(1, 2, 4), product is 1 * 2 * 5 = 10
(2, 4, 5), product is 2 * 5 * 6 = 60
Your goal is to find the maximal product of any triplet.
Write a function:
func Solution(A []int) int
that, given a non-empty zero-indexed array A, returns the value of the maximal product of any triplet.
For example, given array A such that:
A[0] = -3
A[1] = 1
A[2] = 2
A[3] = -2
A[4] = 5
A[5] = 6
the function should return 60, as the product of triplet (2, 4, 5) is maximal.
Assume that:
N is an integer within the range [3..100,000];
each element of array A is an integer within the range [−1,000..1,000].
Complexity:
expected worst-case time complexity is O(N*log(N));
expected worst-case space complexity is O(1), beyond input storage
(not counting the storage required for input arguments).
*/
// Find the largest element a, the second largest element b and the 3rd largest element c. Find also the smallest element d
// and the second smallest e element. Return max(a*b*c, a*d*e)
// For example if we had the 2 smallest elements to be negative it would have a chance to be the answer as they can produce a
// positive nuber
func MaxProductOfThree(A []int) int {
arrayLen := len(A)
if arrayLen < 3 {
// Early exit
return 0
}
a, b, c := -1 << 31, -1 << 31, -1 << 31
d, e := 1 << 31 - 1, 1 << 31 - 1
for i := 0; i < arrayLen; i += 1 {
// Update maximums
if A[i] > a {
c = b
b = a
a = A[i]
} else if A[i] > b {
c = b
b = A[i]
} else if A[i] > c {
c = A[i]
}
// Update minimums
if A[i] < d {
e = d
d = A[i]
} else if A[i] < e {
e = A[i]
}
}
return int(math.Max(float64(a) * float64(b) * float64(c), float64(a) * float64(d) * float64(e)));
} | sorting/MaxProductOfThree.go | 0.845465 | 0.827445 | MaxProductOfThree.go | starcoder |
package exp
import (
"io"
"strconv"
"strings"
"xelf.org/xelf/ast"
"xelf.org/xelf/cor"
"xelf.org/xelf/knd"
"xelf.org/xelf/lit"
"xelf.org/xelf/typ"
)
// Parse parses str and returns an expression or an error.
func Parse(reg *lit.Reg, str string) (Exp, error) { return Read(reg, strings.NewReader(str), "") }
// Read parses named reader r and returns an expression or an error.
func Read(reg *lit.Reg, r io.Reader, name string) (Exp, error) {
a, err := ast.Read(r, name)
if err != nil {
return nil, err
}
if reg == nil {
reg = &lit.Reg{}
}
return ParseAst(reg, a)
}
// ParseAst parses a as expression and returns it or an error.
func ParseAst(reg *lit.Reg, a ast.Ast) (Exp, error) {
switch a.Kind {
case knd.Int:
n, err := strconv.ParseInt(a.Raw, 10, 64)
if err != nil {
return nil, ast.ErrInvalid(a, knd.Int, err)
}
return &Lit{Res: typ.Num, Val: lit.Int(n), Src: a.Src}, nil
case knd.Real:
n, err := strconv.ParseFloat(a.Raw, 64)
if err != nil {
return nil, ast.ErrInvalid(a, knd.Real, err)
}
return &Lit{Res: typ.Real, Val: lit.Real(n), Src: a.Src}, nil
case knd.Str:
txt, err := cor.Unquote(a.Raw)
if err != nil {
return nil, ast.ErrInvalid(a, knd.Str, err)
}
return &Lit{Res: typ.Char, Val: lit.Str(txt), Src: a.Src}, nil
case knd.Sym:
switch a.Raw {
case "null":
return &Lit{Res: typ.None, Val: lit.Null{}, Src: a.Src}, nil
case "false", "true":
return &Lit{Res: typ.Bool, Val: lit.Bool(len(a.Raw) == 4), Src: a.Src}, nil
}
return &Sym{Sym: a.Raw, Src: a.Src}, nil
case knd.List:
list := &lit.List{Reg: reg}
if err := list.Parse(a); err != nil {
return nil, err
}
return &Lit{Res: typ.List, Val: list, Src: a.Src}, nil
case knd.Dict:
dict := &lit.Dict{Reg: reg}
if err := dict.Parse(a); err != nil {
return nil, err
}
return &Lit{Res: typ.Keyr, Val: dict, Src: a.Src}, nil
case knd.Tag:
if len(a.Seq) == 0 {
return nil, ast.ErrInvalidTag(a.Tok)
}
t := a.Seq[0]
tag := t.Raw
var err error
if t.Kind == knd.Str {
tag, err = cor.Unquote(a.Raw)
if err != nil {
return nil, ast.ErrInvalid(a, knd.Str, err)
}
}
var e Exp
if len(a.Seq) > 1 {
e, err = ParseAst(reg, a.Seq[1])
if err != nil {
return nil, err
}
}
return &Tag{Tag: tag, Exp: e, Src: a.Src}, nil
case knd.Call:
if len(a.Seq) == 0 {
return &Call{Src: a.Src}, nil
} else if fst := a.Seq[0]; fst.Kind&(knd.Typ|knd.Call) != 0 && len(fst.Seq) == 0 {
return &Call{Src: a.Src}, nil
}
res := &Call{Src: a.Src, Args: make([]Exp, 0, len(a.Seq))}
for _, e := range a.Seq {
el, err := ParseAst(reg, e)
if err != nil {
return nil, err
}
if el.Kind() != knd.Call || len(el.(*Call).Args) != 0 {
res.Args = append(res.Args, el)
}
}
return res, nil
case knd.Typ:
t, err := typ.ParseAst(a)
if err != nil {
return nil, err
}
return &Lit{Res: typ.Typ, Val: t, Src: a.Src}, nil
}
return nil, ast.ErrUnexpected(a)
} | exp/parse.go | 0.556882 | 0.464112 | parse.go | starcoder |
package squareRoot
import (
"github.com/acra5y/go-dilation/internal/eye"
"gonum.org/v1/gonum/mat"
"math"
)
/*
The algorithm to calculate the square root of a positive definite matrix is taken from
"A New Algorithm for Computing the Square Rootof a Matrix"
(https://scholarworks.rit.edu/cgi/viewcontent.cgi?article=10419&context=theses, chapter three):
1. Declare some nonsingular matrix C with dimensions (n, n).
2. Initialize i for number of iterations, S_0 = I and S_1 = C.
3. Initialize Z = C − I.
4. For i iterations or until S_i becomes too ill-conditioned, do S_{i+1} = 2S_i + (Z)(S_{i−1}),
5. After iteration steps stop, find S_{i}^{−1}
.
6. Set n × n matrix Q = S_{i+1}(S_{i}^{−1}) − I.
The step 6 is implemented by solving a linear equation to achieve a better numerical stability:
For a matrix A we denote t(A) as the transposed Matrix
Using the notation from above, we define Q' := Q - I, R := S_{i}, S := S_{i+1}
We have the following equivalency: Q' = S*R^{-1} <=> Q'*R = S <=> t(R)*t(Q') = t(S).
This is a linear equation that we can solve without computing a matrix.
We assume that the matrix c fulfilles all necessary preconditions.
*/
func nextGuess(c, z, prePredecessor, predecessor *mat.Dense) (guess *mat.Dense) {
n, _ := c.Dims()
var p *mat.Dense
guess = mat.NewDense(n, n, nil)
p = mat.NewDense(n, n, nil)
guess.Scale(2, predecessor)
p.Product(z, prePredecessor)
guess.Add(guess, p)
return
}
func isIllConditioned(m* mat.Dense, iteration int) bool {
n, _ := m.Dims()
negative := mat.NewDense(n, n, nil)
negative.Scale(-1, m)
max := math.Max(mat.Max(m), mat.Max(negative))
det := mat.Det(m)
return math.Pow(max, float64(n)) / det > 1e15
}
func Calculate(c *mat.Dense) (sq *mat.Dense, err error) {
err = nil
n, _ := c.Dims()
var m2, m3, eyeN, z *mat.Dense
eyeN = eye.OfDimension(n)
sq = mat.NewDense(n, n, nil)
m2 = mat.NewDense(n, n, nil)
m3 = mat.NewDense(n, n, nil)
sq.CloneFrom(eyeN)
m2.CloneFrom(c)
z = mat.NewDense(n, n, nil)
z.Sub(c, eyeN)
for i := 1; i <= 100; i++ {
m3 = nextGuess(c, z, sq, m2)
sq.CloneFrom(m2)
m2.CloneFrom(m3)
if (isIllConditioned(m3, i)) {
break;
}
}
sq.Solve(sq.T(), m2.T())
sq.Sub(sq.T(), eyeN)
return
} | internal/squareRoot/squareRoot.go | 0.829008 | 0.727104 | squareRoot.go | starcoder |
package roots
import (
"math"
"github.com/applied-math-coding/heuristic/common"
"github.com/applied-math-coding/heuristic/meta_opt_pso"
"github.com/applied-math-coding/heuristic/newton"
"github.com/applied-math-coding/heuristic/pso"
"gonum.org/v1/gonum/mat"
)
type Params = struct {
Root_Recognition float64 // f(x) to be recognized as root by particles
Location_Precision float64 // min distance to distinguish several roots
N_particles int // based on the dimension and size of interval, one must play with this
Precision float64 // aimed precission of root
}
type Segment = struct {
idx int
roots []mat.Vector
}
// FindRoots tries to find all roots of f in given multi-dim-interval. The algorithm uses a heuristic search (PSO)
// which imposes no restrictions on f. A recursive bisection procedure ensures the pso-search to provide
// as much as possible independent results.
// In case the system is n*n and a derivative is supplied, it is used for a Newton-method to refine roots.
// If the system is n*n and no derivative is supplied, the derivative will be approximated internally.
func FindRoots(f common.System, D common.Derivative, b_low mat.Vector, b_up mat.Vector,
params *Params) []mat.Vector {
pso_params := meta_opt_pso.Optimize(createTargetFn(f), b_low, b_up)
pso_params.Max_iter = 100
pso_params.N_particles = int(math.Max(500.0, float64(params.N_particles)))
roots := recFindRoots(f, b_low, b_up, params, pso_params, nil)
res := make([]mat.Vector, 0)
for _, r := range roots {
if !isRootContainedInList(params.Location_Precision, r, res) {
res = append(res, r)
}
}
m := f(b_low).Len()
if m == b_low.Len() {
return RefineRoots(f, D, res, params)
}
return res
}
func RefineRoots(f common.System, D common.Derivative, roots []mat.Vector, params *Params) []mat.Vector {
res := make([]mat.Vector, len(roots))
for idx, r := range roots {
refined, errFindRoot := newton.FindRoot(
f, D, r, &newton.Params{Max_iter: 1000, Precision: params.Precision})
if errFindRoot != nil {
res[idx] = r
} else {
res[idx] = refined
}
}
return res
}
func isRootContainedInList(location_Precision float64, r mat.Vector, list []mat.Vector) bool {
for _, e := range list {
isSame := true
for idx := 0; idx < r.Len(); idx++ {
if math.Abs(r.AtVec(idx)-e.AtVec(idx)) > location_Precision {
isSame = false
break
}
}
if isSame {
return true
}
}
return false
}
func isUnderPrecision(b_low mat.Vector, b_up mat.Vector, precision float64) bool {
for idx := 0; idx < b_low.Len(); idx++ {
if math.Abs(b_low.AtVec(idx)-b_up.AtVec(idx)) < precision {
return true
}
}
return false
}
func recFindRoots(f common.System, b_low mat.Vector, b_up mat.Vector,
params *Params, pso_params *pso.Params, segment *Segment) []mat.Vector {
res := make([]mat.Vector, 0)
if segment == nil {
segment = &Segment{idx: 0, roots: make([]mat.Vector, 0)}
}
if isUnderPrecision(b_low, b_up, params.Location_Precision) {
return res
}
if len(segment.roots) > 0 {
res = append(res, findRootsInDeeperLevels(f, b_low, b_up, params, pso_params, segment)...)
} else {
root := searchRoot(f, b_low, b_up, pso_params, params)
if root == nil {
return res
} else {
segment.roots = append(segment.roots, root)
res = append(res, root)
res = append(res, findRootsInDeeperLevels(f, b_low, b_up, params, pso_params, segment)...)
}
}
return res
}
func findRootsInDeeperLevels(f common.System, b_low mat.Vector, b_up mat.Vector,
params *Params, pso_params *pso.Params, segment *Segment) []mat.Vector {
res := make([]mat.Vector, 0)
idx := int(math.Mod(float64(segment.idx)+1.0, float64(b_low.Len())))
b_center_up, b_center_low := splitInterval(idx, b_low, b_up)
segment_low_roots := make([]mat.Vector, 0)
segment_up_roots := make([]mat.Vector, 0)
for _, r := range segment.roots {
if isContained(r, b_low, b_center_up) {
segment_low_roots = append(segment_low_roots, r)
} else {
segment_up_roots = append(segment_up_roots, r)
}
}
segment_low := &Segment{idx: idx, roots: segment_low_roots}
res = append(res, recFindRoots(f, b_low, b_center_up, params, pso_params, segment_low)...)
segment_up := &Segment{idx: idx, roots: segment_up_roots}
res = append(res, recFindRoots(f, b_center_low, b_up, params, pso_params, segment_up)...)
return res
}
func isContained(v mat.Vector, b_low mat.Vector, b_up mat.Vector) bool {
res := true
for idx := 0; idx < v.Len(); idx++ {
res = v.AtVec(idx) >= b_low.AtVec(idx) && v.AtVec(idx) < b_up.AtVec(idx)
}
return res
}
func createTargetFn(f common.System) common.Target {
return func(x mat.Vector) float64 {
res := 0.0
y := f(x)
for i := 0; i < y.Len(); i++ {
res = res + math.Pow(y.AtVec(i), 2.0)
}
return res
}
}
func searchRoot(f common.System, b_low mat.Vector, b_up mat.Vector, pso_params *pso.Params, params *Params) mat.Vector {
x_0 := pso.Optimize(createTargetFn(f), b_low, b_up, pso_params)
isRoot := true
y_0 := f(x_0)
for i := 0; i < x_0.Len(); i++ {
isRoot = math.Abs(y_0.AtVec(i)) <= params.Root_Recognition
if !isRoot {
return nil
}
}
return x_0
}
func splitInterval(idx int, b_low mat.Vector, b_up mat.Vector) (mat.Vector, mat.Vector) {
n := b_low.Len()
mid := b_low.AtVec(idx) + 0.5*(b_up.AtVec(idx)-b_low.AtVec(idx))
b_center_up := mat.NewVecDense(n, nil)
b_center_up.CopyVec(b_up)
b_center_up.SetVec(idx, mid)
b_center_low := mat.NewVecDense(n, nil)
b_center_low.CopyVec(b_low)
b_center_low.SetVec(idx, mid)
return b_center_up, b_center_low
} | roots/roots.go | 0.760295 | 0.528898 | roots.go | starcoder |
package stack
import (
"sync"
)
// Stacker is an interface describing the behaviour of a FILO (first in, last out) stack. It allows concurrency-safe
// stacks to be used in the same places as regular stacks, if performance or concurrency safety are specific
// requirements.
type Stacker interface {
Len() int // Return the number of elements in the stack.
Push(interface{}) // Push an object of unknown type onto the stack.
Pop() interface{} // Remove an object from the top of the stack and return it.
Peek() interface{} // Return an object from the top of the stack without removing it.
}
// Stack is an implementation of a FILO stack structure using a linked list. The reason behind using a linked list
// rather than a dynamic array is because we guarantee any operation on the stack can be completed in O(1) time,
// disregarding overhead. While compiler optimisation might mean dynamic arrays are better in some circumstances, the
// linked list gives us a more general guarantee.
type Stack struct {
topPtr *stackElement
size int
}
// stackElement holds one element from a Stack and is equivalent to a node in a linked list.
type stackElement struct {
value interface{}
next *stackElement
}
// Len returns the number of elements in the stack.
func (s Stack) Len() int {
return s.size
}
// Push pushes a new element on to the stack.
func (s *Stack) Push(v interface{}) {
s.topPtr = &stackElement{
value: v,
next: s.topPtr,
}
s.size++
}
// Pop removes the top element from the stack and returns it. If the stack is empty then this function will return nil.
func (s *Stack) Pop() interface{} {
if s.size > 0 {
retVal := s.topPtr.value
s.topPtr = s.topPtr.next
s.size--
return retVal
}
return nil
}
// Peek returns a copy of the top element on the stack (the one which will be popped first) without removing it from the
// underlying stack. If the stack is empty, it will return nil.
func (s Stack) Peek() interface{} {
if s.size > 0 {
return s.topPtr.value
}
return nil
}
// ConcurrentStack is a concurrency-safe implementation of the Stacker interface. It has a slight performance hit when
// compared to the other implementation (Stack), but the trade-off is that ConcurrentStack can be safely used between
// different goroutines, while the object is kept synchronised.
type ConcurrentStack struct {
internalStack Stack
lock sync.RWMutex
}
// Len returns the number of elements in the stack. Unlike a regular Stack, this function operates on the pointer to cs
// so that the mutex is not duplicated.
func (cs *ConcurrentStack) Len() int {
cs.lock.RLock()
defer cs.lock.RUnlock()
return cs.internalStack.size
}
// Push pushes a new element onto the stack.
func (cs *ConcurrentStack) Push(v interface{}) {
cs.lock.Lock()
defer cs.lock.Unlock()
cs.internalStack.Push(v)
}
// Pop removes an element from the top of the stack and returns it. If the stack is empty, it will return nil.
func (cs *ConcurrentStack) Pop() interface{} {
cs.lock.Lock()
defer cs.lock.Unlock()
return cs.internalStack.Pop()
}
// Peek returns a copy of the top element on the stack (the one which will be popped first) without removing it from the
// underlying stack. If the stack is empty, it will return nil.
func (cs *ConcurrentStack) Peek() interface{} {
cs.lock.RLock()
defer cs.lock.RUnlock()
return cs.internalStack.Peek()
} | stack.go | 0.779112 | 0.431045 | stack.go | starcoder |
package cios
import (
"encoding/json"
)
// SinglePoint struct for SinglePoint
type SinglePoint struct {
Point Point `json:"point"`
}
// NewSinglePoint instantiates a new SinglePoint object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewSinglePoint(point Point, ) *SinglePoint {
this := SinglePoint{}
this.Point = point
return &this
}
// NewSinglePointWithDefaults instantiates a new SinglePoint object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewSinglePointWithDefaults() *SinglePoint {
this := SinglePoint{}
return &this
}
// GetPoint returns the Point field value
func (o *SinglePoint) GetPoint() Point {
if o == nil {
var ret Point
return ret
}
return o.Point
}
// GetPointOk returns a tuple with the Point field value
// and a boolean to check if the value has been set.
func (o *SinglePoint) GetPointOk() (*Point, bool) {
if o == nil {
return nil, false
}
return &o.Point, true
}
// SetPoint sets field value
func (o *SinglePoint) SetPoint(v Point) {
o.Point = v
}
func (o SinglePoint) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["point"] = o.Point
}
return json.Marshal(toSerialize)
}
type NullableSinglePoint struct {
value *SinglePoint
isSet bool
}
func (v NullableSinglePoint) Get() *SinglePoint {
return v.value
}
func (v *NullableSinglePoint) Set(val *SinglePoint) {
v.value = val
v.isSet = true
}
func (v NullableSinglePoint) IsSet() bool {
return v.isSet
}
func (v *NullableSinglePoint) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableSinglePoint(val *SinglePoint) *NullableSinglePoint {
return &NullableSinglePoint{value: val, isSet: true}
}
func (v NullableSinglePoint) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableSinglePoint) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | cios/model_single_point.go | 0.835249 | 0.439627 | model_single_point.go | starcoder |
package merkle
import (
"bytes"
"errors"
"fmt"
"sort"
)
const MaxUint = ^uint(0)
// ValidatePartialTree uses leafIndices, leaves and proof to calculate the merkle root of the tree and then compares it
// to expectedRoot.
func ValidatePartialTree(leafIndices []uint64, leaves, proof [][]byte, expectedRoot []byte,
hash HashFunc) (bool, error) {
v, err := newValidator(leafIndices, leaves, proof, hash, false)
if err != nil {
return false, err
}
root, _, err := v.CalcRoot(MaxUint)
return bytes.Equal(root, expectedRoot), err
}
// ValidatePartialTree uses leafIndices, leaves and proof to calculate the merkle root of the tree and then compares it
// to expectedRoot. Additionally, it reconstructs the parked nodes when each proven leaf was originally added to the
// tree and returns a list of snapshots. This method is ~15% slower than ValidatePartialTree.
func ValidatePartialTreeWithParkingSnapshots(leafIndices []uint64, leaves, proof [][]byte, expectedRoot []byte,
hash HashFunc) (bool, []ParkingSnapshot, error) {
v, err := newValidator(leafIndices, leaves, proof, hash, true)
if err != nil {
return false, nil, err
}
root, parkingSnapshots, err := v.CalcRoot(MaxUint)
return bytes.Equal(root, expectedRoot), parkingSnapshots, err
}
func newValidator(leafIndices []uint64, leaves, proof [][]byte, hash HashFunc, storeSnapshots bool) (*Validator, error) {
if len(leafIndices) != len(leaves) {
return nil, fmt.Errorf("number of leaves (%d) must equal number of indices (%d)", len(leaves),
len(leafIndices))
}
if len(leaves) == 0 {
return nil, errors.New("at least one leaf is required for validation")
}
if !sort.SliceIsSorted(leafIndices, func(i, j int) bool { return leafIndices[i] < leafIndices[j] }) {
return nil, errors.New("leafIndices are not sorted")
}
if len(SetOf(leafIndices...)) != len(leafIndices) {
return nil, errors.New("leafIndices contain duplicates")
}
proofNodes := &proofIterator{proof}
leafIt := &LeafIterator{leafIndices, leaves}
return &Validator{Leaves: leafIt, ProofNodes: proofNodes, Hash: hash, StoreSnapshots: storeSnapshots}, nil
}
type Validator struct {
Leaves *LeafIterator
ProofNodes *proofIterator
Hash HashFunc
StoreSnapshots bool
}
type ParkingSnapshot [][]byte
func (v *Validator) CalcRoot(stopAtLayer uint) ([]byte, []ParkingSnapshot, error) {
activePos, activeNode, err := v.Leaves.next()
if err != nil {
return nil, nil, err
}
var lChild, rChild, sibling []byte
var parkingSnapshots, subTreeSnapshots []ParkingSnapshot
if v.StoreSnapshots {
parkingSnapshots = []ParkingSnapshot{nil}
}
for {
if activePos.Height == stopAtLayer {
break
}
// The activeNode's sibling should be calculated iff it's an ancestor of the next proven leaf. Otherwise, the
// sibling is the next node in the proof.
nextLeafPos, _, err := v.Leaves.peek()
if err == nil && activePos.sibling().isAncestorOf(nextLeafPos) {
sibling, subTreeSnapshots, err = v.CalcRoot(activePos.Height)
if err != nil {
return nil, nil, err
}
} else {
sibling, err = v.ProofNodes.next()
if err == noMoreItems {
break
}
}
if activePos.isRightSibling() {
lChild, rChild = sibling, activeNode
addToAll(parkingSnapshots, lChild)
} else {
lChild, rChild = activeNode, sibling
addToAll(parkingSnapshots, nil)
if len(subTreeSnapshots) > 0 {
parkingSnapshots = append(parkingSnapshots, addToAll(subTreeSnapshots, activeNode)...)
subTreeSnapshots = nil
}
}
activeNode = v.Hash(lChild, rChild)
activePos = activePos.parent()
}
return activeNode, parkingSnapshots, nil
}
func addToAll(snapshots []ParkingSnapshot, node []byte) []ParkingSnapshot {
for i := 0; i < len(snapshots); i++ {
snapshots[i] = append(snapshots[i], node)
}
return snapshots
} | validation.go | 0.710427 | 0.447038 | validation.go | starcoder |
package waf
import (
"encoding/json"
)
// WafDnsRecord A DNS record A dns record describes an individual piece of DNS functionality in a DNS zone.
type WafDnsRecord struct {
// The name of the network node to which a zone resource record pertains Use the value \"@\" to denote current root domain name.
Name *string `json:"name,omitempty"`
// A zone record's type Zone record types describe the zone record's behavior. For instance, a zone record's type can say that the record is a name to IP address value, a name alias, or which mail exchanger is responsible for the domain. See https://support.stackpath.com/hc/en-us/articles/360001085563-What-DNS-record-types-does-StackPath-support for more information.
Type *string `json:"type,omitempty"`
// A zone record's class code This is typically \"IN\" for Internet related resource records.
Class *string `json:"class,omitempty"`
// A zone record's time to live A record's TTL is the number of seconds that the record should be cached by DNS resolvers. Use lower TTL values if you expect zone records to change often. Use higher TTL values for records that won't change to prevent extra DNS lookups by clients.
Ttl *int32 `json:"ttl,omitempty"`
// A zone record's value Expected data formats can vary depending on the zone record's type.
Data *string `json:"data,omitempty"`
}
// NewWafDnsRecord instantiates a new WafDnsRecord object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewWafDnsRecord() *WafDnsRecord {
this := WafDnsRecord{}
return &this
}
// NewWafDnsRecordWithDefaults instantiates a new WafDnsRecord object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewWafDnsRecordWithDefaults() *WafDnsRecord {
this := WafDnsRecord{}
return &this
}
// GetName returns the Name field value if set, zero value otherwise.
func (o *WafDnsRecord) GetName() string {
if o == nil || o.Name == nil {
var ret string
return ret
}
return *o.Name
}
// GetNameOk returns a tuple with the Name field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *WafDnsRecord) GetNameOk() (*string, bool) {
if o == nil || o.Name == nil {
return nil, false
}
return o.Name, true
}
// HasName returns a boolean if a field has been set.
func (o *WafDnsRecord) HasName() bool {
if o != nil && o.Name != nil {
return true
}
return false
}
// SetName gets a reference to the given string and assigns it to the Name field.
func (o *WafDnsRecord) SetName(v string) {
o.Name = &v
}
// GetType returns the Type field value if set, zero value otherwise.
func (o *WafDnsRecord) GetType() string {
if o == nil || o.Type == nil {
var ret string
return ret
}
return *o.Type
}
// GetTypeOk returns a tuple with the Type field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *WafDnsRecord) GetTypeOk() (*string, bool) {
if o == nil || o.Type == nil {
return nil, false
}
return o.Type, true
}
// HasType returns a boolean if a field has been set.
func (o *WafDnsRecord) HasType() bool {
if o != nil && o.Type != nil {
return true
}
return false
}
// SetType gets a reference to the given string and assigns it to the Type field.
func (o *WafDnsRecord) SetType(v string) {
o.Type = &v
}
// GetClass returns the Class field value if set, zero value otherwise.
func (o *WafDnsRecord) GetClass() string {
if o == nil || o.Class == nil {
var ret string
return ret
}
return *o.Class
}
// GetClassOk returns a tuple with the Class field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *WafDnsRecord) GetClassOk() (*string, bool) {
if o == nil || o.Class == nil {
return nil, false
}
return o.Class, true
}
// HasClass returns a boolean if a field has been set.
func (o *WafDnsRecord) HasClass() bool {
if o != nil && o.Class != nil {
return true
}
return false
}
// SetClass gets a reference to the given string and assigns it to the Class field.
func (o *WafDnsRecord) SetClass(v string) {
o.Class = &v
}
// GetTtl returns the Ttl field value if set, zero value otherwise.
func (o *WafDnsRecord) GetTtl() int32 {
if o == nil || o.Ttl == nil {
var ret int32
return ret
}
return *o.Ttl
}
// GetTtlOk returns a tuple with the Ttl field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *WafDnsRecord) GetTtlOk() (*int32, bool) {
if o == nil || o.Ttl == nil {
return nil, false
}
return o.Ttl, true
}
// HasTtl returns a boolean if a field has been set.
func (o *WafDnsRecord) HasTtl() bool {
if o != nil && o.Ttl != nil {
return true
}
return false
}
// SetTtl gets a reference to the given int32 and assigns it to the Ttl field.
func (o *WafDnsRecord) SetTtl(v int32) {
o.Ttl = &v
}
// GetData returns the Data field value if set, zero value otherwise.
func (o *WafDnsRecord) GetData() string {
if o == nil || o.Data == nil {
var ret string
return ret
}
return *o.Data
}
// GetDataOk returns a tuple with the Data field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *WafDnsRecord) GetDataOk() (*string, bool) {
if o == nil || o.Data == nil {
return nil, false
}
return o.Data, true
}
// HasData returns a boolean if a field has been set.
func (o *WafDnsRecord) HasData() bool {
if o != nil && o.Data != nil {
return true
}
return false
}
// SetData gets a reference to the given string and assigns it to the Data field.
func (o *WafDnsRecord) SetData(v string) {
o.Data = &v
}
func (o WafDnsRecord) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Name != nil {
toSerialize["name"] = o.Name
}
if o.Type != nil {
toSerialize["type"] = o.Type
}
if o.Class != nil {
toSerialize["class"] = o.Class
}
if o.Ttl != nil {
toSerialize["ttl"] = o.Ttl
}
if o.Data != nil {
toSerialize["data"] = o.Data
}
return json.Marshal(toSerialize)
}
type NullableWafDnsRecord struct {
value *WafDnsRecord
isSet bool
}
func (v NullableWafDnsRecord) Get() *WafDnsRecord {
return v.value
}
func (v *NullableWafDnsRecord) Set(val *WafDnsRecord) {
v.value = val
v.isSet = true
}
func (v NullableWafDnsRecord) IsSet() bool {
return v.isSet
}
func (v *NullableWafDnsRecord) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableWafDnsRecord(val *WafDnsRecord) *NullableWafDnsRecord {
return &NullableWafDnsRecord{value: val, isSet: true}
}
func (v NullableWafDnsRecord) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableWafDnsRecord) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | pkg/waf/model_waf_dns_record.go | 0.832679 | 0.481332 | model_waf_dns_record.go | starcoder |
package main
import (
"fmt"
"math/big"
"math/rand"
"time"
)
// Pollard's Rho iterator type
type PollardRhoIterator struct {
point1 *Point
point2 *Point
X1 *Point
X2 *Point
a1 int64
b1 int64
a2 int64
b2 int64
X *Point
a int64
b int64
}
func NewPollardRhoIterator(P *Point, Q *Point) (*PollardRhoIterator) {
// Create new iterator
iter := new(PollardRhoIterator)
iter.point1 = P
iter.point2 = Q
// Generate random pair a1, b1 within range of n
s1 := rand.NewSource(time.Now().UnixNano())
r1 := rand.New(s1)
iter.a1 = r1.Int63n(myCurve.n)
if (iter.a1 == 0) {
iter.a1++
}
s2 := rand.NewSource(time.Now().UnixNano())
r2 := rand.New(s2)
iter.b1 = r2.Int63n(myCurve.n)
if (iter.b1 == 0) {
iter.b1++
}
// TBD
// iter.a1 = 1
// iter.b1 = 3
// fmt.Printf("a1 = %d, b1 = %d\n", iter.a1, iter.b1)
// Compute a1P + b1Q
a1P := Multiply(iter.a1, P)
// fmt.Printf("a1P = (%d, %d)\n\n\n", a1P.x, a1P.y)
b1Q := Multiply(iter.b1, Q)
// fmt.Printf("b1Q = (%d, %d)\n\n\n", b1Q.x, b1Q.y)
iter.X1 = Add(a1P, b1Q)
/* if (iter.X1 == nil) {
fmt.Printf("X1 = nil\n\n\n")
} else {
fmt.Printf("X1 = (%d, %d)\n\n\n", iter.X1.x, iter.X1.y)
}
*/
// Generate random pair a2, b2 within range of n
s1 = rand.NewSource(time.Now().UnixNano())
r1 = rand.New(s1)
iter.a2 = r1.Int63n(myCurve.n)
if (iter.a2 == 0) {
iter.a2++
}
s2 = rand.NewSource(time.Now().UnixNano())
r2 = rand.New(s2)
iter.b2 = r2.Int63n(myCurve.n)
if (iter.b2 == 0) {
iter.b2++
}
// Compute a2P + b2Q
a2P := Multiply(iter.a2, P)
// fmt.Printf("a2P = (%d, %d)\n", a2P.x, a2P.y)
b2Q := Multiply(iter.b2, Q)
// fmt.Printf("b2Q = (%d, %d)\n", b2Q.x, b2Q.y)
iter.X2 = Add(a2P, b2Q)
// Initialize iterated values X, a, b
iter.X = nil
iter.a = 0
iter.b = 0
return iter
}
func (iter *PollardRhoIterator) Next() (X *Point, a int64, b int64) {
var i *big.Int
// Partition the curve into three segments
partitionSize := (myCurve.p / 3) + 1
bigPartitionSize := big.NewInt(partitionSize)
if (iter.X == nil) {
i = big.NewInt(0)
} else {
i = new(big.Int)
i.Div(iter.X.x, bigPartitionSize)
}
// Start with 0P, then add 1P to get 1P, where i == 0.
// Next iteration is 2P (doubling 1P to get 2P), where i == 1.
// Next iteration is 3P (adding P to 2P), where i == 2.
if (i.Int64() == 0) {
iter.a += iter.a1
iter.b += iter.b1
// fmt.Printf("Iterating, i = 0, a = %d, b = %d\n", iter.a, iter.b)
iter.X = Add(iter.X, iter.X1)
} else if (i.Int64() == 1) {
iter.a *= 2
iter.b *= 2
// fmt.Printf("Iterating, i = 1, a = %d, b = %d\n", iter.a, iter.b)
iter.X = Double(iter.X)
} else if (i.Int64() == 2) {
iter.a += iter.a2
iter.b += iter.b2
// fmt.Printf("Iterating, i = 2, a = %d, b = %d\n", iter.a, iter.b)
iter.X = Add(iter.X, iter.X2)
} else {
// fmt.Printf("Invalid i, returning.....\n")
return nil, 0, 0
}
// Take the a, b values mod n
iter.a = mod(iter.a, myCurve.n)
iter.b = mod(iter.b, myCurve.n)
a = iter.a
b = iter.b
// fmt.Printf("a, b = %d, %d\n\n\n", a, b)
return iter.X, a, b
}
// Pollard's Rho algorithm
func pollardRho(P *Point, Q *Point) (log int64, numSteps int64) {
var i int64
// Ensure points P and Q are both on the curve
if ((isOnCurve(*P) == false) || (isOnCurve(*Q) == false)) {
fmt.Printf("Exiting, point P or Q is not on curve!\n")
return 0, numSteps
}
// Initialize numSteps to 0
numSteps = 0
tortoise := NewPollardRhoIterator(P, Q)
if (tortoise == nil) {
return 0, 0
}
// Copy values of tortoise to the hare iterator
hare := *tortoise
for i = 0; i < myCurve.n; i++ {
X1, a1, b1 := tortoise.Next()
// Hare skips over a step every time
X2, a2, b2 := hare.Next()
X2, a2, b2 = hare.Next()
numSteps++
// If (x1,y1) == (x2,y2), we've found a match (detected a cycle)
if ((X1 == nil) && (X2 == nil)) {
if (b1 == b2) {
fmt.Printf("Generated random sequence divide by zero, retry\n")
return 0, numSteps
}
log = (a1 - a2) * modInverse((b2 - b1), myCurve.n)
log = mod(log, myCurve.n)
return log, numSteps
} else if ((X1 == nil) || (X2 == nil)) {
numSteps++
} else if ((X1.x.Cmp(X2.x) == 0) && (X1.y.Cmp(X2.y) == 0)) {
if (b1 == b2) {
fmt.Printf("Generated random sequence divide by zero, retry\n")
return 0, numSteps
}
log = (a1 - a2) * modInverse((b2 - b1), myCurve.n)
log = mod(log, myCurve.n)
return log, numSteps
} else {
continue
}
}
return 0, numSteps
} | logs/pollardsrho.go | 0.58059 | 0.468304 | pollardsrho.go | starcoder |
package templates
import "strings"
var (
// ReadMe README.md template.
ReadMe = `# {{.project_name}}
[](https://{{.module_name}}/actions)
[](https://pkg.go.dev/mod/{{.module_name}})
[](https://goreportcard.com/report/{{.module_name}})
{{.project_name}} is a production ready project template for microservices using gRPC - generated by [microgen](https://github.com/pthethanh/microgen)
## Getting Started
These instructions will get you a copy of the project up and running on your local machine for development and testing purposes. See deployment for notes on how to deploy the project on a live system.
### Prerequisites
- Go {{.go_version}}
- Protocol Buffer {{.protobuf_version}}
Install Protocol Buffer
BACKTICKSshell
make install_protobuf
BACKTICKS
### Installing
Clone the code.
BACKTICKSshell
git clone https://{{.module_name}}
BACKTICKS
Gen gRPC, gRPC Gateway, Swagger and verify the code.
BACKTICKSshell
make all
BACKTICKS
Start the service.
BACKTICKSshell
go run main.go
BACKTICKS
Verify the APIs
BACKTICKSshell
curl http://localhost:{{.port}}/internal/readiness
curl http://localhost:{{.port}}/internal/liveness
curl http://localhost:{{.port}}/internal/metrics
BACKTICKS
## Running the tests
Run the test by simply execute: BACKTICKmakeBACKTICK
## Coding style
The code convention should follow [effective-go](https://github.com/golovers/effective-go)
All developers are required to run BACKTICKmakeBACKTICK before pushing the code to remote git.
## Deployment
Build Docker Image
BACKTICKSshell
make docker_build
BACKTICKS
Run With Docker Image
BACKTICKSshell
make docker
BACKTICKS
Run With Docker Compose
BACKTICKSshell
make compose
BACKTICKS
{{if .heroku}}
Deploy to Heroku
BACKTICKSshell
make heroku
BACKTICKS
{{end}}
## Built With
- [micro](https://github.com/pthethanh/micro)- Microservices tool kit.
- [microgen](https://github.com/pthethanh/microgen) - Production ready project template for microservices.
## Contributing
TBU
## Versioning
We use [SemVer](http://semver.org/) for versioning. For the versions available, see the [tags on this repository](https://{{.module_name}}/tags).
## Authors
**<NAME>** - *Initial work* - [<NAME> Github](https://github.com/pthethanh)
See also the list of [contributors](https://{{.module_name}}/contributors) who participated in this project.
## License
This project is licensed under the MIT License - see the [LICENSE.md](LICENSE.md) file for details
## Acknowledgments
TBU
`
)
func init() {
ReadMe = strings.ReplaceAll(ReadMe, "BACKTICKS", "```")
ReadMe = strings.ReplaceAll(ReadMe, "BACKTICK", "`")
} | templates/readme.go | 0.649023 | 0.6463 | readme.go | starcoder |
package hlt
import (
"math"
"sort"
"strconv"
"strings"
)
// Map describes the current state of the game
type Map struct {
MyID, Width, Height int
Planets []Planet
Players []Player
Entities []Entity
}
// Player has an ID for establishing ownership, and a number of ships
type Player struct {
ID int
Ships []Ship
}
// ParsePlayer from a slice of game state tokens
func ParsePlayer(tokens []string) (Player, []string) {
playerID, _ := strconv.Atoi(tokens[0])
playerNumShips, _ := strconv.ParseFloat(tokens[1], 64)
player := Player{
ID: playerID,
Ships: []Ship{},
}
tokens = tokens[2:]
for i := 0; float64(i) < playerNumShips; i++ {
ship, tokensnew := ParseShip(playerID, tokens)
tokens = tokensnew
player.Ships = append(player.Ships, ship)
}
return player, tokens
}
// ParseGameString from a slice of game state tokens
func ParseGameString(c *Connection, gameString string) Map {
tokens := strings.Split(gameString, " ")
numPlayers, _ := strconv.Atoi(tokens[0])
tokens = tokens[1:]
gameMap := Map{
MyID: c.PlayerTag,
Width: c.width,
Height: c.height,
Planets: make([]Planet, 0),
Players: make([]Player, numPlayers),
Entities: make([]Entity, 0),
}
for i := 0; i < numPlayers; i++ {
player, tokensnew := ParsePlayer(tokens)
tokens = tokensnew
gameMap.Players[player.ID] = player
for j := 0; j < len(player.Ships); j++ {
gameMap.Entities = append(gameMap.Entities, player.Ships[j].Entity)
}
}
numPlanets, _ := strconv.Atoi(tokens[0])
tokens = tokens[1:]
for i := 0; i < numPlanets; i++ {
planet, tokensnew := ParsePlanet(tokens)
tokens = tokensnew
gameMap.Planets = append(gameMap.Planets, planet)
gameMap.Entities = append(gameMap.Entities, planet.Entity)
}
return gameMap
}
// ObstaclesBetween demonstrates how the player might determine if the path
// between two enitities is clear
func (gameMap Map) ObstaclesBetween(start Entity, end Entity) bool {
x1 := start.X
y1 := start.Y
x2 := end.X
y2 := end.Y
dx := x2 - x1
dy := y2 - y1
a := dx*dx + dy*dy + 1e-8
crossterms := x1*x1 - x1*x2 + y1*y1 - y1*y2
for i := 0; i < len(gameMap.Entities); i++ {
entity := gameMap.Entities[i]
if entity.ID == start.ID || entity.ID == end.ID {
continue
}
x0 := entity.X
y0 := entity.Y
closestDistance := end.CalculateDistanceTo(entity)
if closestDistance < entity.Radius+1 {
return true
}
b := -2 * (crossterms + x0*dx + y0*dy)
t := -b / (2 * a)
if t <= 0 || t >= 1 {
continue
}
closestX := start.X + dx*t
closestY := start.Y + dy*t
closestDistance = math.Sqrt(math.Pow(closestX-x0, 2) * +math.Pow(closestY-y0, 2))
if closestDistance <= entity.Radius+start.Radius+1 {
return true
}
}
return false
}
// NearestPlanetsByDistance orders all planets based on their proximity
// to a given ship from nearest for farthest
func (gameMap Map) NearestPlanetsByDistance(ship Ship) []Planet {
planets := gameMap.Planets
for i := 0; i < len(planets); i++ {
planets[i].Distance = ship.CalculateDistanceTo(planets[i].Entity)
}
sort.Sort(byDist(planets))
return planets
}
type byDist []Planet
func (a byDist) Len() int { return len(a) }
func (a byDist) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byDist) Less(i, j int) bool { return a[i].Distance < a[j].Distance } | airesources/Go/src/hlt/gamemap.go | 0.767951 | 0.432183 | gamemap.go | starcoder |
package metrics
import (
"regexp"
"time"
"github.com/Masterminds/semver"
)
// AllJobs represents a regex that will collect results from all jobs.
var AllJobs = regexp.MustCompile(".*")
// Phase is a phase of an osde2e run.
type Phase string
// Result is the result of a JUnit test.
type Result string
const (
// Install phase represents tests that were run after the initial installation of the cluster.
Install Phase = "install"
// Upgrade phase represents tests that were run after the upgrade of the cluster.
Upgrade Phase = "upgrade"
// UnknownPhase represents tests that were run in a phase that is currently unknown to the metrics library.
UnknownPhase Phase = "unknown"
// Passed result represents a JUnitResult that passed acceptably.
Passed Result = "passed"
// Failed result represents a JUnitResult that failed.
Failed Result = "failed"
// Skipped result represents a JUnitResult that was skipped during a run.
Skipped Result = "skipped"
// UnknownResult represents a JUnitResult that is currently unknown to the metrics library.
UnknownResult Result = "unknown"
)
// Event objects that are recorded by osde2e runs. These typically represent occurrences that are of
// some note. For example, cluste provisioning failure, failure to collect Hive logs, etc.
type Event struct {
// InstallVersion is the starting install version of the cluster that generated this event.
InstallVersion *semver.Version
// UpgradeVersion is the upgrade version of the cluster that generated this event. This can be nil.
UpgradeVersion *semver.Version
// CloudProvider is the cluster cloud provider that was used when this event was generated.
CloudProvider string
// Environment is the environment that the cluster provider was using during the generation of this event.
Environment string
// Event is the name of the event that was recorded.
Event string
// ClusterID is the cluster ID of the cluster that was provisioned while generating this event.
ClusterID string
// JobName is the name of the job that generated this event.
JobName string
// JobID is the job ID number that corresponds to the job that generated this event.
JobID int64
// Timestamp is the time when this event was recorded.
Timestamp int64
}
// Equal will return true if two event objects are equal.
func (e Event) Equal(that Event) bool {
if !versionsEqual(e.InstallVersion, that.InstallVersion) {
return false
}
if !versionsEqual(e.UpgradeVersion, that.UpgradeVersion) {
return false
}
if e.CloudProvider != that.CloudProvider {
return false
}
if e.Environment != that.Environment {
return false
}
if e.Event != that.Event {
return false
}
if e.ClusterID != that.ClusterID {
return false
}
if e.JobName != that.JobName {
return false
}
if e.JobID != that.JobID {
return false
}
if e.Timestamp != that.Timestamp {
return false
}
return true
}
// Events is a list of events.
type Events []Event
func (e Events) Len() int {
return len(e)
}
func (e Events) Swap(i, j int) {
e[i], e[j] = e[j], e[i]
}
func (e Events) Less(i, k int) bool {
return e[i].Timestamp < e[k].Timestamp
}
// Metadata objects are numerical values associated with metadata calculated by osde2e.
type Metadata struct {
// InstallVersion is the starting install version of the cluster that generated this metadata.
InstallVersion *semver.Version
// UpgradeVersion is the upgrade version of the cluster that generated this metadata. This can be nil.
UpgradeVersion *semver.Version
// CloudProvider is the cluster cloud provider that was used when this metadata was generated.
CloudProvider string
// Environment is the environment that the cluster provider was using during the generation of this metadata.
Environment string
// MetadataName is the name of the metadata that was recorded.
MetadataName string
// ClusterID is the cluster ID of the cluster that was provisioned while generating this metadata.
ClusterID string
// JobName is the name of the job that generated this metadata.
JobName string
// JobID is the job ID number that corresponds to the job that generated this metadata.
JobID int64
// Value is the numerical value associated with this metadata.
Value float64
// Time is the time when this metadata was recorded.
Timestamp int64
}
// Equal will return true if two metadata objects are equal.
func (m Metadata) Equal(that Metadata) bool {
if !versionsEqual(m.InstallVersion, that.InstallVersion) {
return false
}
if !versionsEqual(m.UpgradeVersion, that.UpgradeVersion) {
return false
}
if m.CloudProvider != that.CloudProvider {
return false
}
if m.Environment != that.Environment {
return false
}
if m.MetadataName != that.MetadataName {
return false
}
if m.ClusterID != that.ClusterID {
return false
}
if m.JobName != that.JobName {
return false
}
if m.JobID != that.JobID {
return false
}
if m.Value != that.Value {
return false
}
if m.Timestamp != that.Timestamp {
return false
}
return true
}
// Metadatas is a list of metadata objects.
type Metadatas []Metadata
func (m Metadatas) Len() int {
return len(m)
}
func (m Metadatas) Swap(i, j int) {
m[i], m[j] = m[j], m[i]
}
func (m Metadatas) Less(i, k int) bool {
return m[i].Timestamp < m[k].Timestamp
}
// AddonMetadata is numerical data captured by osde2e runs, similar to Metadata. However, this is customizable and
// focused on addon testing.
type AddonMetadata struct {
Metadata
// Phase is the test phase where this this metadata was generated in.
Phase Phase
}
// Equal will return true if two addon metadata objects are equal.
func (a AddonMetadata) Equal(that AddonMetadata) bool {
if !a.Metadata.Equal(that.Metadata) {
return false
}
if a.Phase != that.Phase {
return false
}
return true
}
// AddonMetadatas is a list of addon metadata objects.
type AddonMetadatas []AddonMetadata
func (a AddonMetadatas) Len() int {
return len(a)
}
func (a AddonMetadatas) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
func (a AddonMetadatas) Less(i, k int) bool {
return a[i].Timestamp < a[k].Timestamp
}
// JUnitResult represents an individual test that was run over the course of an osde2e run.
type JUnitResult struct {
// InstallVersion is the starting install version of the cluster that generated this result.
InstallVersion *semver.Version
// UpgradeVersion is the upgrade version of the cluster that generated this result. This can be nil.
UpgradeVersion *semver.Version
// CloudProvider is the cluster cloud provider that was used when this result was generated.
CloudProvider string
// Environment is the environment that the cluster provider was using during the generation of this result.
Environment string
// Suite is the name of the test suite that this test belongs to.
Suite string
// TestName is the name of the test that was run.
TestName string
// Result is the result of this test.
Result Result
// ClusterID is the cluster ID of the cluster that was provisioned while generating this result.
ClusterID string
// JobName is the name of the job that generated this result.
JobName string
// JobID is the job ID number that corresponds to the job that generated this result.
JobID int64
// Phase is the test phase where this this result was generated in.
Phase Phase
// Duration is the length of time that this test took to run.
Duration time.Duration
// Timestamp is the timestamp when this result was recorded.
Timestamp int64
}
// Equal will return true if two JUnitResult objects are equal.
func (j JUnitResult) Equal(that JUnitResult) bool {
if !versionsEqual(j.InstallVersion, that.InstallVersion) {
return false
}
if !versionsEqual(j.UpgradeVersion, that.UpgradeVersion) {
return false
}
if j.CloudProvider != that.CloudProvider {
return false
}
if j.Environment != that.Environment {
return false
}
if j.Suite != that.Suite {
return false
}
if j.TestName != that.TestName {
return false
}
if j.Result != that.Result {
return false
}
if j.ClusterID != that.ClusterID {
return false
}
if j.JobName != that.JobName {
return false
}
if j.JobID != that.JobID {
return false
}
if j.Phase != that.Phase {
return false
}
if j.Duration != that.Duration {
return false
}
if j.Timestamp != that.Timestamp {
return false
}
return true
}
// JUnitResults is a list of JUnitResults.
type JUnitResults []JUnitResult
func (jr JUnitResults) Len() int {
return len(jr)
}
func (jr JUnitResults) Swap(i, j int) {
jr[i], jr[j] = jr[j], jr[i]
}
func (jr JUnitResults) Less(i, k int) bool {
return jr[i].Timestamp < jr[k].Timestamp
}
// nil safe semver equivalency
func versionsEqual(version1, version2 *semver.Version) bool {
return (version1 == nil && version1 == version2) || (version1 != nil && version2 != nil && version1.Equal(version2))
} | pkg/metrics/objects.go | 0.808219 | 0.415373 | objects.go | starcoder |
package plot
import (
"io"
"math/rand"
"gonum.org/v1/plot"
"gonum.org/v1/plot/plotutil"
"gonum.org/v1/plot/vg"
)
//Verses does a verses line plot with the data passed.
//title,xaxis,yaxis are the labels for the plot image
//h,w are the size of the image
//returns a WriteTo. I did this because I thought the user might want to store an array of them
func Verses(title, xaxis, yaxis string, h, w int, data ...LabeledData) (io.WriterTo, error) {
rand.Seed(int64(20))
p, err := plot.New()
if err != nil {
panic(err)
}
p.X.Min = 0
p.Y.Min = 0
p.Title.Text = title
p.X.Label.Text = xaxis
p.Y.Label.Text = yaxis
for i := range data {
err = plotutil.AddLines(p, data[i].Label, data[i].Data)
if err != nil {
panic(err)
}
}
return p.WriterTo(vg.Length(h)*vg.Centimeter, vg.Length(w)*vg.Centimeter, "jpg")
}
//Verses2 does a verses line plot with the data passed.
//title,xaxis,yaxis are the labels for the plot image
//h,w are the size of the image
//returns a WriteTo. I did this because I thought the user might want to store an array of them
//Verse2 is the same as verses accept it takes a slice of LabeledData instead of a bunch of arguments of LabeledData
func Verses2(title, xaxis, yaxis string, h, w int, data []LabeledData) (io.WriterTo, error) {
rand.Seed(int64(20))
p, err := plot.New()
if err != nil {
panic(err)
}
p.X.Min = 0
p.Y.Min = 0
p.Title.Text = title
p.X.Label.Text = xaxis
p.Y.Label.Text = yaxis
interfaceddata := makeaddlinereadable(data)
err = plotutil.AddLines(p,
interfaceddata...,
)
return p.WriterTo(vg.Length(w)*vg.Centimeter, vg.Length(h)*vg.Centimeter, "jpg")
}
func makeaddlinereadable(data []LabeledData) []interface{} {
x := make([]interface{}, 0)
for i := range data {
x = append(x, data[i].Label)
x = append(x, data[i].Data)
}
return x
}
/*
//Verses3 does a verses line plot with the data passed.
//title,xaxis,yaxis are the labels for the plot image
//h,w are the size of the image
//returns a WriteTo. This will allow up to 12 LabelData to have individual colors
func Verses3(title, xaxis, yaxis string, h, w int, data []LabeledData) (io.WriterTo, error) {
rand.Seed(int64(20))
p, err := plot.New()
if err != nil {
panic(err)
}
p.X.Min = 0
p.Y.Min = 0
p.Title.Text = title
p.X.Label.Text = xaxis
p.Y.Label.Text = yaxis
switch len(data) {
case 1:
err = plotutil.AddLines(p,
data[0].Label, data[0].Data)
if err != nil {
panic(err)
}
case 2:
err = plotutil.AddLines(p,
data[0].Label, data[0].Data,
data[1].Label, data[1].Data)
if err != nil {
panic(err)
}
case 3:
err = plotutil.AddLines(p,
data[0].Label, data[0].Data,
data[1].Label, data[1].Data,
data[2].Label, data[2].Data,
)
if err != nil {
panic(err)
}
case 4:
err = plotutil.AddLines(p,
data[0].Label, data[0].Data,
data[1].Label, data[1].Data,
data[2].Label, data[2].Data,
data[3].Label, data[3].Data,
)
if err != nil {
panic(err)
}
case 5:
err = plotutil.AddLines(p,
data[0].Label, data[0].Data,
data[1].Label, data[1].Data,
data[2].Label, data[2].Data,
data[3].Label, data[3].Data,
data[4].Label, data[4].Data,
)
if err != nil {
panic(err)
}
case 6:
err = plotutil.AddLines(p,
data[0].Label, data[0].Data,
data[1].Label, data[1].Data,
data[2].Label, data[2].Data,
data[3].Label, data[3].Data,
data[4].Label, data[4].Data,
data[5].Label, data[5].Data,
)
if err != nil {
panic(err)
}
case 7:
err = plotutil.AddLines(p,
data[0].Label, data[0].Data,
data[1].Label, data[1].Data,
data[2].Label, data[2].Data,
data[3].Label, data[3].Data,
data[4].Label, data[4].Data,
data[5].Label, data[5].Data,
data[6].Label, data[6].Data,
)
if err != nil {
panic(err)
}
case 8:
err = plotutil.AddLines(p,
data[0].Label, data[0].Data,
data[1].Label, data[1].Data,
data[2].Label, data[2].Data,
data[3].Label, data[3].Data,
data[4].Label, data[4].Data,
data[5].Label, data[5].Data,
data[6].Label, data[6].Data,
data[7].Label, data[7].Data,
)
if err != nil {
panic(err)
}
case 9:
err = plotutil.AddLines(p,
data[0].Label, data[0].Data,
data[1].Label, data[1].Data,
data[2].Label, data[2].Data,
data[3].Label, data[3].Data,
data[4].Label, data[4].Data,
data[5].Label, data[5].Data,
data[6].Label, data[6].Data,
data[7].Label, data[7].Data,
data[8].Label, data[8].Data,
)
if err != nil {
panic(err)
}
case 10:
err = plotutil.AddLines(p,
data[0].Label, data[0].Data,
data[1].Label, data[1].Data,
data[2].Label, data[2].Data,
data[3].Label, data[3].Data,
data[4].Label, data[4].Data,
data[5].Label, data[5].Data,
data[6].Label, data[6].Data,
data[7].Label, data[7].Data,
data[8].Label, data[8].Data,
data[9].Label, data[9].Data,
)
if err != nil {
panic(err)
}
case 11:
err = plotutil.AddLines(p,
data[0].Label, data[0].Data,
data[1].Label, data[1].Data,
data[2].Label, data[2].Data,
data[3].Label, data[3].Data,
data[4].Label, data[4].Data,
data[5].Label, data[5].Data,
data[6].Label, data[6].Data,
data[7].Label, data[7].Data,
data[8].Label, data[8].Data,
data[9].Label, data[9].Data,
data[10].Label, data[10].Data,
)
if err != nil {
panic(err)
}
case 12:
err = plotutil.AddLines(p,
data[0].Label, data[0].Data,
data[1].Label, data[1].Data,
data[2].Label, data[2].Data,
data[3].Label, data[3].Data,
data[4].Label, data[4].Data,
data[5].Label, data[5].Data,
data[6].Label, data[6].Data,
data[7].Label, data[7].Data,
data[8].Label, data[8].Data,
data[9].Label, data[9].Data,
data[10].Label, data[10].Data,
data[11].Label, data[11].Data,
)
if err != nil {
panic(err)
}
default:
panic("Length of LabeledData should be 1<=LabeledData<=12 ")
}
return p.WriterTo(vg.Length(w)*vg.Centimeter, vg.Length(h)*vg.Centimeter, "jpg")
}
*/ | ui/plot/plot_vsline.go | 0.684053 | 0.633453 | plot_vsline.go | starcoder |
package main
import (
"image"
"image/color"
"math"
"gocv.io/x/gocv"
)
var (
// Points of the polygon encasing the region of interest
vertices = []image.Point{
image.Point{X: 10, Y: 525},
image.Point{X: 10, Y: 325},
image.Point{X: 200, Y: 275},
image.Point{X: 600, Y: 275},
image.Point{X: 790, Y: 325},
image.Point{X: 790, Y: 525},
}
)
// Masks the captured screen by the region we are interested in defined by the
// points above
func regionOfInterest(img gocv.Mat, vertices []image.Point) gocv.Mat {
maskedImg := gocv.NewMat()
mask := gocv.NewMatWithSizeFromScalar(
gocv.NewScalar(0, 0, 0, 255),
img.Rows(), img.Cols(), img.Type(),
)
gocv.FillPoly(&mask, [][]image.Point{vertices}, color.RGBA{R: 255, G: 255, B: 255, A: 255})
gocv.BitwiseAnd(mask, img, &maskedImg)
return maskedImg
}
// Proccesses a given image through several manipulations and returns the
// processed image
func process(img gocv.Mat) gocv.Mat {
processedImg := gocv.NewMat()
gocv.CvtColor(img, &processedImg, gocv.ColorRGBToGray)
gocv.GaussianBlur(processedImg, &processedImg, image.Point{5, 5}, 0, 0, 0)
gocv.Canny(processedImg, &processedImg, 100.0, 300.0)
gocv.Dilate(processedImg, &processedImg, gocv.GetStructuringElement(0, image.Point{3, 3}))
processedImg = regionOfInterest(processedImg, vertices)
lane := findLane(processedImg)
drawLane(img, lane)
return img
}
// Uses hough lines and averages the lines with same signed slopes in order
// to best guess the two lines of the current lane
func findLane(img gocv.Mat) [][]float64 {
lines := gocv.NewMat()
gocv.HoughLinesPWithParams(img, &lines, 1.0, math.Pi/180.0, 100, 175.0, 5.0)
right, left := 0, 0
lane := [][]float64{{0, 0}, {0, 0}}
if !lines.Empty() {
for i := 0; i < lines.Rows(); i++ {
v := lines.GetVeciAt(0, i)
slope := (float64(v[1]) - float64(v[3])) / (float64(v[0]) - float64(v[2]))
intercept := float64(v[1]) - slope*float64(v[0])
if slope > 0 {
right++
lane[1][0] += slope
lane[1][1] += intercept
} else {
left++
lane[0][0] += slope
lane[0][1] += intercept
}
}
lane[0][0] = lane[0][0] / float64(left)
lane[0][1] = lane[0][1] / float64(left)
lane[1][0] = lane[1][0] / float64(right)
lane[1][1] = lane[1][1] / float64(right)
}
return lane
}
// Draws the lane lines on the given image
func drawLane(img gocv.Mat, lane [][]float64) {
if !math.IsNaN(lane[0][0]) && !math.IsNaN(lane[0][1]) && lane[0][0] != 0 && lane[0][1] != 0 {
gocv.Line(
&img,
image.Point{int(0), int(lane[0][1])},
image.Point{int((275 - lane[0][1]) / lane[0][0]), int(275)},
color.RGBA{R: 0, G: 255, B: 0, A: 255},
5,
)
}
if !math.IsNaN(lane[1][0]) && !math.IsNaN(lane[1][1]) && lane[1][0] != 0 && lane[1][1] != 0 {
gocv.Line(
&img,
image.Point{int((275 - lane[1][1]) / lane[1][0]), int(275)},
image.Point{int(800), int(lane[1][0]*800.0 + lane[1][1])},
color.RGBA{R: 0, G: 255, B: 0, A: 255},
5,
)
}
} | image.go | 0.723602 | 0.524334 | image.go | starcoder |
package types
import (
"strings"
"time"
"github.com/pkg/errors"
"github.com/spf13/pflag"
)
type Period struct {
Period string
Duration time.Duration `json:",omitempty"`
}
var _ pflag.Value = (*Period)(nil)
const (
EveryWeek = "everyWeek"
EveryDay = "everyDay"
EveryTwoWeeks = "everyTwoWeeks"
EveryMonth = "everyMonth"
EveryDuration = "everyDuration"
)
func (p *Period) String() string {
return p.Period
}
func (p *Period) Type() string {
return "period"
}
func (p *Period) Set(in string) error {
switch strings.ToLower(in) {
case EveryDay, "d", "day", "daily":
p.Period = EveryDay
case EveryWeek, "w", "week", "weekly":
p.Period = EveryWeek
case EveryTwoWeeks, "2weeks", "biweekly", "bi-weekly":
p.Period = EveryTwoWeeks
case EveryMonth, "m", "month", "monthly":
p.Period = EveryMonth
default:
Duration, err := time.ParseDuration(in)
if err != nil {
return errors.New(`period must be "daily", "workday", "weekly", "biweekly", "monthly", or a valid go duration`)
}
p.Period = EveryDuration
p.Duration = Duration
}
return nil
}
func (p *Period) ForNumber(beginning Time, num int) Time {
days, months := 0, 0
switch p.Period {
case EveryDuration:
reduced := beginning.Add(-p.Duration / 2)
return NewTime(reduced.Round(p.Duration))
case EveryDay:
days = 1 * num
case EveryWeek:
days = 7 * num
case EveryTwoWeeks:
days = 14 * num
case EveryMonth:
months = 1 * num
}
return NewTime(beginning.AddDate(0, months, days))
}
func (p *Period) ForTime(beginning, forTime Time) (int, Time) {
if forTime.Before(beginning.Time) {
return -1, Time{}
}
last := beginning
for n := 0; ; n++ {
if forTime.Before(beginning.Time) {
return n - 1, last
}
last = beginning
beginning = p.ForNumber(beginning, 1)
}
}
func (p *Period) AverageDuration() time.Duration {
return map[string]time.Duration{
EveryDuration: p.Duration,
EveryDay: 24 * time.Hour,
EveryWeek: 7 * 24 * time.Hour,
EveryTwoWeeks: 2 * 7 * 24 * time.Hour,
EveryMonth: time.Duration(730.08 * float64(time.Hour)),
}[p.Period]
} | server/utils/types/period.go | 0.624179 | 0.411584 | period.go | starcoder |
package rf
import (
"bytes"
"fmt"
"io/ioutil"
"math"
"github.com/wcharczuk/go-chart"
)
// Basic RF calculations
// FrequencyToWavelength calculates a wavelength from a frequency
func FrequencyToWavelength(freq Frequency) Wavelength {
return Wavelength(C / freq)
}
// WavelengthToFrequency calculates a frequency from a wavelength
func WavelengthToFrequency(wavelength Wavelength) Frequency {
return Frequency(C / wavelength)
}
// Power Decibel helpers
// See https://en.wikipedia.org/wiki/Decibel#Power_quantities
// DecibelMilliVoltToMilliWatt converts dBm to mW
// Note that this power decibels (10log10)
func DecibelMilliVoltToMilliWatt(dbm float64) float64 {
return math.Pow(10, dbm/10)
}
// MilliWattToDecibelMilliVolt converts mW to dBm
// Note that this power decibels (10log10)
func MilliWattToDecibelMilliVolt(mw float64) float64 {
return 10 * math.Log10(mw)
}
// Distance and Radius calculations
// CalculateDistance calculates the distance between two latitude and longitudes
// Using the haversine (flat earth) formula
// See: http://www.movable-type.co.uk/scripts/latlong.html
func CalculateDistance(lat1, lng1, lat2, lng2, radius float64) Distance {
φ1, λ1 := lat1/180*π, lng1/180*π
φ2, λ2 := lat2/180*π, lng2/180*π
Δφ, Δλ := math.Abs(φ2-φ1), math.Abs(λ2-λ1)
a := math.Pow(math.Sin(Δφ/2), 2) + math.Cos(φ1)*math.Cos(φ2)*math.Pow(math.Sin(Δλ/2), 2)
c := 2 * math.Atan2(math.Sqrt(a), math.Sqrt(1-a))
d := radius * c
return Distance(d)
}
// CalculateDistanceLOS calculates the approximate Line of Sight distance between two lat/lon/alt points
// This achieves this by wrapping the haversine formula with a flat-earth approximation for height
// difference. This will be very inaccurate with larger distances.
// TODO: surely there is a better (ie. not written by me) algorithm for this
func CalculateDistanceLOS(lat1, lng1, alt1, lat2, lng2, alt2 float64) Distance {
// Calculate average and delta heights (wrt. earth radius)
h := R + (alt1+alt2)/2
Δh := math.Abs(alt2 - alt1)
// Compute distance at average of altitudes
d := CalculateDistance(lat1, lng1, lat2, lng2, h)
// Apply transformation for altitude difference
los := math.Sqrt(math.Pow(float64(d), 2) + math.Pow(Δh, 2))
return Distance(los)
}
// FieldDBToAbs Converts field attenuation (20log10) to absolute values
func (a *Attenuation) FieldDBToAbs() float64 {
return math.Pow(10, float64(*a)/20)
}
// FieldAbsToDB Converts an absolute field attenuation (20log10) to decibels
func FieldAbsToDB(abs float64) Attenuation {
return Attenuation(20 * math.Log10(abs))
}
func Smooth(data []float64) []float64 {
smoothed := make([]float64, len(data)/2)
for i := range smoothed {
if len(smoothed) >= i*2+1 {
smoothed[i] = (data[i*2] + data[i*2+1]) / 2
} else {
smoothed[i] = data[i*2]
}
}
return smoothed
}
func SmoothN(n int, data []float64) []float64 {
for i := 0; i < n; i++ {
data = Smooth(data)
}
return data
}
// Convert terrain between two points of set heights into distances from the path between those points
func TerrainToPath(p1, p2 float64, d Distance, terrain []float64) (Δd, Δh, θ float64, diffs []float64) {
height := (p2 - p1)
θ = math.Sin(height / float64(d))
dist := math.Cos(θ) * float64(d)
Δh = height / float64(len(terrain)-1)
Δd = dist / float64(len(terrain)-1)
diffs = make([]float64, len(terrain))
fmt.Printf("height: %.4f dist: %.4f θ: %.4f Δh: %.4f Δd: %.4f\n", height, dist, θ, Δh, Δd)
for i, v := range terrain {
h := p1 + float64(i)*Δh
d := v - h
nh := math.Cos(θ) * d
fmt.Printf("Slice %d dist: %.4f height: %.4f terrain: %.4f diff: %.4f normalised: %.4f\n", i, float64(i)*Δd, h, v, d, nh)
diffs[i] = nh
}
return Δd, Δh, θ, diffs
}
// TerrainToPathXY Converts terrain between two points of set heights into distances from the path between those points
func TerrainToPathXY(p1, p2 float64, d Distance, terrain []float64) (x, y []float64, d2 float64) {
height := (p2 - p1)
θ := math.Atan2(height, float64(d))
d2 = math.Sqrt(math.Pow(float64(d), 2) + math.Pow(height, 2))
Δh := height / float64(len(terrain)-1)
Δd := float64(d) / float64(len(terrain)-1)
x = make([]float64, len(terrain))
y = make([]float64, len(terrain))
for i, terrainHeight := range terrain {
offsetHeight := float64(i) * Δh
offsetDist := Δd * float64(i)
verticalClearance := offsetHeight + p1 - terrainHeight
transformedX := math.Sin(θ) * verticalClearance
transformedY := math.Cos(θ) * verticalClearance
shiftX := offsetDist / math.Cos(θ)
x[i], y[i] = shiftX-transformedX, -transformedY
}
return x, y, d2
}
//UnNormalisePoint reverts a normalised (straight line between p1 and p2) point to a real world point
func UnNormalisePoint(p1, p2 float64, d Distance, x, y float64) (float64, float64) {
height := (p2 - p1)
θ := math.Atan2(height, float64(d))
x1 := math.Cos(θ) * x
y1 := math.Sin(θ) * x
x2 := math.Sin(θ) * y
y2 := math.Cos(θ) * y
x3 := x1 - x2
y3 := y1 + y2 + p1
return x3, y3
}
// GraphBullingtonFigure12 Graphs the terrain impingement calculated using the Bullington Figure 12 method
func GraphBullingtonFigure12(filename string, normalised bool, p1, p2 float64, d Distance, terrain []float64) error {
x, y, l := TerrainToPathXY(p1, p2, d, terrain)
θ1, θ2 := findBullingtonFigure12Angles(x, y, Distance(l))
dist, height := solveBullingtonFigureTwelveDist(θ1, θ2, Distance(l))
impingementX, impingementY := UnNormalisePoint(p1, p2, d, float64(dist), height)
terrainX := make([]float64, len(terrain))
for i := range terrain {
terrainX[i] = float64(d) / float64(len(terrain)) * float64(i)
}
graph := chart.Chart{
Width: 1280,
Height: 960,
DPI: 180,
XAxis: chart.XAxis{
Name: "Height",
NameStyle: chart.StyleShow(),
Style: chart.StyleShow(),
}, YAxis: chart.YAxis{
Name: "Distance",
NameStyle: chart.StyleShow(),
Style: chart.StyleShow(),
},
}
if !normalised {
graph.Series = []chart.Series{
chart.ContinuousSeries{
XValues: []float64{0, float64(d)},
YValues: []float64{p1, p2},
Name: "Line of Sight",
Style: chart.StyleShow(),
}, chart.ContinuousSeries{
XValues: terrainX,
YValues: terrain,
Name: "Terrain",
Style: chart.StyleShow(),
}, chart.ContinuousSeries{
XValues: []float64{0, impingementX, float64(d)},
YValues: []float64{p1, impingementY, p2},
Name: "Equivalent Knife Edge",
},
}
} else {
graph.Series = []chart.Series{
chart.ContinuousSeries{
XValues: []float64{0, l},
YValues: []float64{0, 0},
Name: "Line of Sight",
}, chart.ContinuousSeries{
XValues: x,
YValues: y,
Name: "Normalised Terrain",
}, chart.ContinuousSeries{
XValues: []float64{0, float64(dist), l},
YValues: []float64{0, height, 0},
Name: "Equivalent Knife Edge",
},
}
}
buffer := bytes.NewBuffer([]byte{})
err := graph.Render(chart.PNG, buffer)
if err != nil {
return err
}
err = ioutil.WriteFile(filename, buffer.Bytes(), 0766)
if err != nil {
return err
}
return nil
} | helpers.go | 0.834677 | 0.651147 | helpers.go | starcoder |
package main
import (
"encoding/binary"
"fmt"
"io"
)
// Endianess represents a byte order
type Endianess string
// All supported byte orders
const (
EndianessLittle Endianess = "little" // Little endian (x86)
EndianessBig Endianess = "big" // Big endian (PPC)
)
const MaxUint16 = ^uint16(0)
// SaveModel takes a parsed model and writes it in binary format using a provided byte order
func SaveModel(mesh Mesh, out io.Writer, boType Endianess) error {
var endianess binary.ByteOrder
switch boType {
case EndianessLittle:
endianess = binary.LittleEndian
case EndianessBig:
endianess = binary.BigEndian
default:
return fmt.Errorf("Unknown endianess: %s (supported: big, little)", boType)
}
//TODO Support multiple objects?
object := mesh.Objects[0]
// Chech that the object can fit in a BMB file
if len(mesh.Vertices) >= int(MaxUint16) {
return fmt.Errorf("Model has too many vertex positions (has %d, max is %d)", len(mesh.Vertices), MaxUint16)
}
if len(mesh.VertexNormals) >= int(MaxUint16) {
return fmt.Errorf("Model has too many vertex normals (has %d, max is %d)", len(mesh.VertexNormals), MaxUint16)
}
if len(mesh.TextureCoords) >= int(MaxUint16) {
return fmt.Errorf("Model has too many texture coordinates (has %d, max is %d)", len(mesh.TextureCoords), MaxUint16)
}
if len(object.Faces) >= int(MaxUint16) {
return fmt.Errorf("Model has too many faces (has %d, max is %d)", len(object.Faces), MaxUint16)
}
// Write header
binary.Write(out, endianess, uint16(len(mesh.Vertices)))
binary.Write(out, endianess, uint16(len(mesh.VertexNormals)))
binary.Write(out, endianess, uint16(len(mesh.TextureCoords)))
binary.Write(out, endianess, uint16(len(object.Faces)))
// Write vertices
for _, vertex := range mesh.Vertices {
binary.Write(out, endianess, vertex.X)
binary.Write(out, endianess, vertex.Y)
binary.Write(out, endianess, vertex.Z)
}
// Write normals
for _, normals := range mesh.VertexNormals {
binary.Write(out, endianess, normals.X)
binary.Write(out, endianess, normals.Y)
binary.Write(out, endianess, normals.Z)
}
// Write texture Coordinates
for _, uv := range mesh.TextureCoords {
binary.Write(out, endianess, uv.U)
binary.Write(out, endianess, 1.0-uv.V)
}
// Write faces
for _, face := range object.Faces {
for _, vcombo := range face {
binary.Write(out, endianess, vcombo.Vertex)
binary.Write(out, endianess, vcombo.TexCoord)
binary.Write(out, endianess, vcombo.Normal)
}
}
return nil
} | tools/objconv/binfmt.go | 0.553023 | 0.435121 | binfmt.go | starcoder |
package description
import (
"github.com/uncharted-distil/distil-compute/pipeline"
)
// InferenceStepData provides data for a pipeline description placeholder step,
// which marks the point at which a TA2 should be begin pipeline inference.
type InferenceStepData struct {
inputRefs map[string]DataRef
Inputs []string
Outputs []string
}
// NewInferenceStepData creates a InferenceStepData instance with default values.
func NewInferenceStepData(arguments map[string]DataRef) *InferenceStepData {
values := make([]string, len(arguments))
i := 0
for _, arg := range arguments {
values[i] = arg.RefString()
}
return &InferenceStepData{
Inputs: values,
Outputs: []string{"produce"},
inputRefs: arguments,
}
}
// GetPrimitive returns nil since there is no primitive associated with a placeholder
// step.
func (s *InferenceStepData) GetPrimitive() *pipeline.Primitive {
return nil
}
// GetArguments adapts the internal placeholder step argument type to the primitive
// step argument type.
func (s *InferenceStepData) GetArguments() map[string]DataRef {
return s.inputRefs
}
// GetHyperparameters returns an empty map since inference steps don't
// take hyper parameters.
func (s *InferenceStepData) GetHyperparameters() map[string]interface{} {
return map[string]interface{}{}
}
// GetOutputMethods returns a list of methods that will be called to generate
// primitive output. These feed into downstream primitives.
func (s *InferenceStepData) GetOutputMethods() []string {
return s.Outputs
}
// BuildDescriptionStep creates protobuf structures from a pipeline step
// definition.
func (s *InferenceStepData) BuildDescriptionStep() (*pipeline.PipelineDescriptionStep, error) {
// generate arguments entries
inputs := []*pipeline.StepInput{}
for _, v := range s.Inputs {
input := &pipeline.StepInput{
Data: v,
}
inputs = append(inputs, input)
}
// list of methods that will generate output - order matters because the steps are
// numbered
outputs := []*pipeline.StepOutput{}
for _, v := range s.Outputs {
output := &pipeline.StepOutput{
Id: v,
}
outputs = append(outputs, output)
}
// create the pipeline description structure
step := &pipeline.PipelineDescriptionStep{
Step: &pipeline.PipelineDescriptionStep_Placeholder{
Placeholder: &pipeline.PlaceholderPipelineDescriptionStep{
Inputs: inputs,
Outputs: outputs,
},
},
}
return step, nil
} | primitive/compute/description/inference_step_data.go | 0.799677 | 0.535766 | inference_step_data.go | starcoder |
// Utilizes a BSD-3-Clause license. Refer to the included LICENSE file for details.
// Package dlx implements Dancing Links (Algorithm X).
// The algorithm is described in the "Dancing Links" paper by <NAME>
// published in "Millennial Perspectives in Computer Science. P159. Volume 187"
// (2000).
package dlx
// Matrix represents a sparse matrix.
// The zero value of a Matrix is an empty matrix ready to use.
type Matrix struct {
h Element
o []*Element
solutions [][]string
}
// Init initializes the matrix, empty and ready to use.
func (m *Matrix) Init() *Matrix {
m.h.up = &m.h
m.h.down = &m.h
m.h.left = &m.h
m.h.right = &m.h
m.h.column = &m.h
m.o = nil
m.solutions = nil
return m
}
// New returns a pointer to a newly created and initialzed matrix.
func New() *Matrix { return new(Matrix).Init() }
// Head returns the first Head element from the matrix, or nil if empty.
func (m *Matrix) Head() *Element {
if m.h.right == &m.h {
return nil
}
return m.h.right
}
// Lazy initialization
func (m *Matrix) lazyInit() {
if m.h.right == nil {
m.Init()
}
}
// Helper function to insert a Head element into the matrix and returns a
// pointer to the element.
func (m *Matrix) insertHead(e, at *Element) *Element {
// Positional pointers
n := at.right
at.right = e
e.left = at
e.right = n
n.left = e
e.up = e
e.down = e
// Structural pointers
e.matrix = m
e.column = e
return e
}
// Helper function to insert a given value into the header of the matrix at the
// given head element.
func (m *Matrix) insertHeadValue(v interface{}, at *Element) *Element {
return m.insertHead(&Element{Value: v}, at)
}
// PushHead pushes a Head element onto the matrix with the given name and
// returns a pointer to the element.
func (m *Matrix) PushHead(name string) *Element {
m.lazyInit()
head := Head{name, 0}
return m.insertHeadValue(head, m.h.left)
}
// Inserts an element at the given row and column and returns a pointer to the
// element.
func (m *Matrix) insertItem(e, atR *Element, atC *Element) *Element {
if atR == nil {
e.left = e
e.right = e
} else {
n := atR.right
atR.right = e
e.left = atR
e.right = n
n.left = e
}
ch := atC.down
atC.down = e
e.up = atC
e.down = ch
ch.up = e
// Structural pointers
e.matrix = m
e.column = ch
// Update Column Header
// Utilizes workaround for Go issue 3117
ch.Value = Head{ch.Value.(Head).name, ch.Value.(Head).size + 1}
return e
}
// PushItem pushes the given row onto the matrix under the given column head
// element and returns a pointer to the row element.
func (m *Matrix) PushItem(row, colHead *Element) *Element {
return m.insertItem(&Element{Value: true}, row, colHead.up)
}
// Finds any solutions within the matrix at the given level.
func (m *Matrix) search(k int, maxSolutions int) {
if maxSolutions > 0 && len(m.solutions) >= maxSolutions {
return
}
if m.Head() == nil {
solStr := make([]string, len(m.o))
for i := range m.o {
j := 0
rowStr := m.o[i].column.Value.(Head).name
for e := m.o[i].Right(); e != m.o[i]; e = e.Right() {
j++
rowStr += (" " + e.column.Value.(Head).name)
}
solStr[i] = rowStr
}
m.solutions = append(m.solutions, solStr)
return
}
c := m.getColumn()
m.cover(c)
for r := c.Down(); r != c; r = r.Down() {
m.o = append(m.o, r)
for j := r.Right(); j != r; j = j.Right() {
m.cover(j.column)
}
m.search(k+1, maxSolutions)
r = m.o[k]
m.o[k] = nil
m.o = m.o[0 : len(m.o)-1]
c = r.column
for j := r.Left(); j != r; j = j.Left() {
m.uncover(j.column)
}
}
m.uncover(c)
return
}
// Solve invokes a search for solutions from the root (level 0) and returns
// a slice of all found solutions as a slice of strings denoting valid
// constraint options that exactly covers the problem space.
func (m *Matrix) Solve(maxSolutions int) [][]string {
m.search(0, maxSolutions)
return m.solutions
}
// Returns a pointer to the head element of the column with the smallest size.
func (m *Matrix) getColumn() *Element {
var c *Element
s := uint64(18446744073709551615)
for ce := m.Head(); ce != nil; ce = ce.Right() {
ces := ce.Value.(Head).size
if ces < s {
c = ce
s = ces
}
}
return c
}
// The cover operation of algorithm X.
func (m *Matrix) cover(c *Element) {
c.right.left = c.left
c.left.right = c.right
for i := c.Down(); i != c; i = i.Down() {
for j := i.Right(); j != i; j = j.Right() {
j.down.up = j.up
j.up.down = j.down
j.column.Value = Head{j.column.Value.(Head).name, j.column.Value.(Head).size - 1}
}
}
}
// The uncover operation of algorithm X.
func (m *Matrix) uncover(c *Element) {
for i := c.Up(); i != c; i = i.Up() {
for j := i.Left(); j != i; j = j.Left() {
j.column.Value = Head{j.column.Value.(Head).name, j.column.Value.(Head).size + 1}
j.down.up = j
j.up.down = j
}
}
c.right.left = c
c.left.right = c
}
// Element is an element of a matrix. Contains a Value interface{}.
type Element struct {
// Pointers in the matrix of elements.
// Column points to the column head.
up, down, left, right, column *Element
// The matrix to which the element belongs.
matrix *Matrix
Value interface{}
}
// Up returns the above matrix element or nil.
func (e *Element) Up() *Element {
if p := e.up; e.matrix != nil && p != &e.matrix.h {
return p
}
return nil
}
// Down returns the below matrix element or nil.
func (e *Element) Down() *Element {
if p := e.down; e.matrix != nil && p != &e.matrix.h {
return p
}
return nil
}
// Left returns the left matrix element or nil.
func (e *Element) Left() *Element {
if p := e.left; e.matrix != nil && p != &e.matrix.h {
return p
}
return nil
}
// Right returns the right matrix element or nil.
func (e *Element) Right() *Element {
if p := e.right; e.matrix != nil && p != &e.matrix.h {
return p
}
return nil
}
// Head represents a header element for the matrix.
type Head struct {
name string
size uint64
}
// Name returns the column name.
func (h Head) Name() string {
return h.name
}
// Size returns the column size.
func (h Head) Size() uint64 {
return h.size
} | dlx.go | 0.884539 | 0.634487 | dlx.go | starcoder |
package collection
// Set is the classic `set` data structure
type Set[T comparable] Map[T, struct{}]
// Add will add the element to the set
func (s Set[T]) Add(element T) {
s[element] = struct{}{}
}
// Clear will delete all the set elements.
func (s Set[T]) Clear() {
for k := range s {
delete(s, k)
}
}
// Delete will remove the element from the set
func (s Set[T]) Delete(elem T) {
delete(s, elem)
}
// Difference will return a new set that will contain only the elements of the receiver that are not in the other
func (s Set[T]) Difference(other Set[T]) Set[T] {
result := Set[T]{}
for elem := range s {
if !other.Has(elem) {
result.Add(elem)
}
}
return result
}
// Has checks if the element is in the set.
func (m Set[T]) Has(element T) bool {
_, ok := m[element]
return ok
}
// Intersection will return a new set with all the elements that are part of both the sets
func (s Set[T]) Intersection(other Set[T]) Set[T] {
result := Set[T]{}
if s.Len() > other.Len() {
s, other = other, s
}
for elem := range s {
if other.Has(elem) {
result.Add(elem)
}
}
return result
}
// IsEmpty checks if the set is empty
func (s Set[T]) IsEmpty() bool {
return len(s) == 0
}
// Len is an alias to `len`
func (s Set[T]) Len() int {
return len(s)
}
// ToVec will collect the elements of the set to a `Vec`
func (s Set[T]) ToVec() Vec[T] {
v := make(Vec[T], 0, s.Len())
for e := range s {
v = append(v, e)
}
return v
}
// Union returns a new set with all the the elements of both sets
func (s Set[T]) Union(other Set[T]) Set[T] {
result := Set[T]{}
for e := range s {
result.Add(e)
}
for e := range other {
result.Add(e)
}
return result
}
// NewSet returns a new set from the list of values
func NewSet[T comparable](values ...T) Set[T] {
set := Set[T]{}
for _, v := range values {
set.Add(v)
}
return set
}
// NewSetFromIter will collect all the values of the iterator to a set
func NewSetFromIter[T comparable](it Iterator[T]) Set[T] {
set := Set[T]{}
for v, ok := it(); ok; v, ok = it() {
set.Add(v)
}
return set
} | set.go | 0.798187 | 0.587499 | set.go | starcoder |
package main
/*
1. think of pre-processing steps: sort, arrange the data, index the data, prefix sums!
2. split into small functions which you will implement later
3. solution scanning and offer alternatives (always talk about complexity in space and time)
1. pattern matching (find similar problems)
2. simplify and generalize (start with a simpler problem)
3. iterate through programming paradigms (greedy, divide and conquer, dynamic programming)
4. iterate through all data structures (lists, arrays, stacks, queues, heap, hash, tree, trie, bloom filter, union_find)
5. try free primitive and see if you make progress (sorting, bfs, dfs, strongly connected components, shortest path)
4. BUD optimisation:
1. bottleneck
2. unnecessary work
3. duplicate work
5. identify pain points: array indices, loop termination conditions.
*/
import "fmt"
import "math"
func Solution(A []int) int {
stack := empty
for _, a := range A {
stack = stack.push(a)
isPartial, b, c := stack.doublePeek()
if !isPartial && b != c {
_, _, stack = stack.pop()
_, _, stack = stack.pop()
}
}
isEmpty, candidate := stack.peek()
if isEmpty {
return -1
}
count := 0
index := 0
for i, a := range A {
if a == candidate {
count += 1
index = i
}
}
if count >= int(math.Floor(float64(len(A)) / 2)) + 1 {
return index
}
return -1
}
type node struct {
val int
next *node
}
var empty = &node{0, nil}
func (n *node) debug() string {
if n == empty {
return "|"
}
return fmt.Sprintf("%d->%s", n.val, n.next.debug())
}
func (n *node) peek() (isEmpty bool, val int) {
if n == empty {
return true, 0
}
return false, n.val
}
func (n *node) pop() (isEmpty bool, val int, head *node) {
if n == empty {
return true, 0, empty
}
return false, n.val, n.next
}
func (n *node) push(val int) *node {
return &node{val, n}
}
func (n *node) doublePeek() (isPartial bool, h1, h2 int) {
isEmpty, h1 := n.peek()
if isEmpty {
return true, 0, 0
}
isEmpty, h2 = n.next.peek()
if isEmpty {
return true, h1, 0
}
return false, h1, h2
}
func main() {} | go/interview/codility/8_1_dominator/main.go | 0.635222 | 0.606382 | main.go | starcoder |
package query
import (
"fmt"
"reflect"
"sort"
"strings"
"time"
"github.com/fishedee/tools/decimal"
"github.com/fishedee/tools/kind"
)
// SelectReflect 反射实现
func SelectReflect[T, R any](data []T, selectFuctor func(a T) R) []R {
dataValue := reflect.ValueOf(data)
dataLen := dataValue.Len()
selectFuctorValue := reflect.ValueOf(selectFuctor)
selectFuctorType := selectFuctorValue.Type()
selectFuctorOuterType := selectFuctorType.Out(0)
resultType := reflect.SliceOf(selectFuctorOuterType)
resultValue := reflect.MakeSlice(resultType, dataLen, dataLen)
callArgument := []reflect.Value{{}}
for i := 0; i != dataLen; i++ {
singleDataValue := dataValue.Index(i)
callArgument[0] = singleDataValue
singleResultValue := selectFuctorValue.Call(callArgument)[0]
resultValue.Index(i).Set(singleResultValue)
}
return resultValue.Interface().([]R)
}
// WhereReflect 反射
func WhereReflect[T any](data []T, whereFuctor func(T) bool) []T {
dataValue := reflect.ValueOf(data)
dataType := dataValue.Type()
dataLen := dataValue.Len()
whereFuctorValue := reflect.ValueOf(whereFuctor)
resultType := reflect.SliceOf(dataType.Elem())
resultValue := reflect.MakeSlice(resultType, 0, 0)
for i := 0; i != dataLen; i++ {
singleDataValue := dataValue.Index(i)
singleResultValue := whereFuctorValue.Call([]reflect.Value{singleDataValue})[0]
if singleResultValue.Bool() {
resultValue = reflect.Append(resultValue, singleDataValue)
}
}
return resultValue.Interface().([]T)
}
// SortReflect 反射
func SortReflect[T any](data []T, sortType string) []T {
//拷贝一份
dataValue := reflect.ValueOf(data)
dataType := dataValue.Type()
dataElemType := dataType.Elem()
dataValueLen := dataValue.Len()
dataResult := reflect.MakeSlice(dataType, dataValueLen, dataValueLen)
reflect.Copy(dataResult, dataValue)
//排序
targetCompares := getQueryExtractAndCompares(dataElemType, sortType)
targetCompare := combineQueryCompare(targetCompares)
result := dataResult.Interface()
swapper := reflect.Swapper(result)
SortInternal(dataValueLen, func(i int, j int) int {
left := dataResult.Index(i)
right := dataResult.Index(j)
return targetCompare(left, right)
}, swapper)
return result.([]T)
}
func getQueryExtractAndCompares(dataType reflect.Type, sortTypeStr string) []queryCompare {
sortName, sortType := analyseSort(sortTypeStr)
targetCompare := []queryCompare{}
for index, singleSortName := range sortName {
singleSortType := sortType[index]
singleCompare := getQueryExtractAndCompare(dataType, singleSortName)
if !singleSortType {
//逆序
singleTempCompare := singleCompare
singleCompare = func(left reflect.Value, right reflect.Value) int {
return singleTempCompare(right, left)
}
}
targetCompare = append(targetCompare, singleCompare)
}
return targetCompare
}
func getQueryExtractAndCompare(dataType reflect.Type, name string) queryCompare {
fieldType, extract := GetQueryExtract(dataType, name)
compare := getQueryCompare(fieldType)
return func(left reflect.Value, right reflect.Value) int {
return compare(extract(left), extract(right))
}
}
func getQueryCompare(fieldType reflect.Type) queryCompare {
typeKind := kind.GetTypeKind(fieldType)
if typeKind == kind.TypeKind.BOOL {
return func(left reflect.Value, right reflect.Value) int {
leftBool := left.Bool()
rightBool := right.Bool()
if leftBool == rightBool {
return 0
} else if !leftBool {
return -1
} else {
return 1
}
}
} else if typeKind == kind.TypeKind.INT {
return func(left reflect.Value, right reflect.Value) int {
leftInt := left.Int()
rightInt := right.Int()
if leftInt < rightInt {
return -1
} else if leftInt > rightInt {
return 1
} else {
return 0
}
}
} else if typeKind == kind.TypeKind.UINT {
return func(left reflect.Value, right reflect.Value) int {
leftUint := left.Uint()
rightUint := right.Uint()
if leftUint < rightUint {
return -1
} else if leftUint > rightUint {
return 1
} else {
return 0
}
}
} else if typeKind == kind.TypeKind.FLOAT {
return func(left reflect.Value, right reflect.Value) int {
leftFloat := left.Float()
rightFloat := right.Float()
if leftFloat < rightFloat {
return -1
} else if leftFloat > rightFloat {
return 1
} else {
return 0
}
}
} else if typeKind == kind.TypeKind.STRING {
if fieldType == reflect.TypeOf(decimal.Decimal("")) {
return func(left reflect.Value, right reflect.Value) int {
leftDecimal := left.Interface().(decimal.Decimal)
rightDecimal := right.Interface().(decimal.Decimal)
return leftDecimal.Cmp(rightDecimal)
}
}
return func(left reflect.Value, right reflect.Value) int {
leftString := left.String()
rightString := right.String()
if leftString < rightString {
return -1
} else if leftString > rightString {
return 1
} else {
return 0
}
}
} else if typeKind == kind.TypeKind.STRUCT && fieldType == reflect.TypeOf(time.Time{}) {
return func(left reflect.Value, right reflect.Value) int {
leftTime := left.Interface().(time.Time)
rightTime := right.Interface().(time.Time)
if leftTime.Before(rightTime) {
return -1
} else if leftTime.After(rightTime) {
return 1
} else {
return 0
}
}
} else {
panic(fieldType.Name() + " can not compare")
}
}
type QueryExtract func(reflect.Value) reflect.Value
func GetQueryExtract(dataType reflect.Type, name string) (reflect.Type, QueryExtract) {
if name == "." {
return dataType, func(v reflect.Value) reflect.Value {
return v
}
}
field, ok := kind.GetFieldByName(dataType, name)
if !ok {
panic(dataType.Name() + " has not name " + name)
}
fieldIndex := field.Index
fieldType := field.Type
return fieldType, func(v reflect.Value) reflect.Value {
return v.FieldByIndex(fieldIndex)
}
}
func analyseSort(sortType string) (result1 []string, result2 []bool) {
sortTypeArray := strings.Split(sortType, ",")
for _, singleSortTypeArray := range sortTypeArray {
singleSortTypeArrayTemp := strings.Split(singleSortTypeArray, " ")
singleSortTypeArray := []string{}
for _, singleSort := range singleSortTypeArrayTemp {
singleSort = strings.Trim(singleSort, " ")
if singleSort == "" {
continue
}
singleSortTypeArray = append(singleSortTypeArray, singleSort)
}
var singleSortName string
var singleSortType bool
if len(singleSortTypeArray) >= 2 {
singleSortName = singleSortTypeArray[0]
singleSortType = (strings.ToLower(strings.Trim(singleSortTypeArray[1], " ")) == "asc")
} else {
singleSortName = singleSortTypeArray[0]
singleSortType = true
}
result1 = append(result1, singleSortName)
result2 = append(result2, singleSortType)
}
return result1, result2
}
type queryCompare func(reflect.Value, reflect.Value) int
func combineQueryCompare(targetCompare []queryCompare) queryCompare {
return func(left reflect.Value, right reflect.Value) int {
for _, singleCompare := range targetCompare {
compareResult := singleCompare(left, right)
if compareResult < 0 {
return -1
} else if compareResult > 0 {
return 1
}
}
return 0
}
}
// SortInternal 排序
func SortInternal(length int, lessHandler func(i, j int) int, swapHandler func(i, j int)) {
sort.Stable(&sortInterface{
lenHandler: func() int {
return length
},
lessHandler: func(i int, j int) bool {
return lessHandler(i, j) < 0
},
swapHandler: swapHandler,
})
}
// sortInterface 基础类函数QuerySort
type sortInterface struct {
lenHandler func() int
lessHandler func(i int, j int) bool
swapHandler func(i int, j int)
}
// Len 长度
func (si *sortInterface) Len() int {
return si.lenHandler()
}
// Less 比较
func (si *sortInterface) Less(i int, j int) bool {
return si.lessHandler(i, j)
}
// Swap 交换
func (si *sortInterface) Swap(i int, j int) {
si.swapHandler(i, j)
}
// JoinReflect 联表
func JoinReflect[L, R, LR any](leftData []L, rightData []R, joinPlace, joinType string, joinFuctor func(L, R) LR) []LR {
//解析配置
leftJoinType, rightJoinType := analyseJoin(joinType)
leftDataValue := reflect.ValueOf(leftData)
leftDataType := leftDataValue.Type()
leftDataElemType := leftDataType.Elem()
leftDataValueLen := leftDataValue.Len()
leftDataJoinType, leftDataJoinExtract := GetQueryExtract(leftDataElemType, leftJoinType)
rightDataValue := reflect.ValueOf(rightData)
rightDataType := rightDataValue.Type()
rightDataElemType := rightDataType.Elem()
rightDataValueLen := rightDataValue.Len()
_, rightDataJoinExtract := GetQueryExtract(rightDataElemType, rightJoinType)
joinFuctorValue := reflect.ValueOf(joinFuctor)
joinFuctorType := joinFuctorValue.Type()
resultValue := reflect.MakeSlice(reflect.SliceOf(joinFuctorType.Out(0)), 0, 0)
//执行join
emptyLeftValue := reflect.New(leftDataElemType).Elem()
emptyRightValue := reflect.New(rightDataElemType).Elem()
joinPlace = strings.Trim(strings.ToLower(joinPlace), " ")
nextData := make([]int, rightDataValueLen)
mapDataNext := reflect.MakeMapWithSize(reflect.MapOf(leftDataJoinType, reflect.TypeOf(1)), rightDataValueLen)
mapDataFirst := reflect.MakeMapWithSize(reflect.MapOf(leftDataJoinType, reflect.TypeOf(1)), rightDataValueLen)
tempValueInt := reflect.New(reflect.TypeOf(1)).Elem()
for i := 0; i != rightDataValueLen; i++ {
tempValueInt.SetInt(int64(i))
fieldValue := rightDataJoinExtract(rightDataValue.Index(i))
lastNextIndex := mapDataNext.MapIndex(fieldValue)
if lastNextIndex.IsValid() {
nextData[int(lastNextIndex.Int())] = i
} else {
mapDataFirst.SetMapIndex(fieldValue, tempValueInt)
}
nextData[i] = -1
mapDataNext.SetMapIndex(fieldValue, tempValueInt)
}
rightHaveJoin := make([]bool, rightDataValueLen)
for i := 0; i != leftDataValueLen; i++ {
leftValue := leftDataValue.Index(i)
fieldValue := leftDataJoinExtract(leftDataValue.Index(i))
rightIndex := mapDataFirst.MapIndex(fieldValue)
if rightIndex.IsValid() {
//找到右值
j := int(rightIndex.Int())
for nextData[j] != -1 {
singleResult := joinFuctorValue.Call([]reflect.Value{leftValue, rightDataValue.Index(j)})[0]
resultValue = reflect.Append(resultValue, singleResult)
rightHaveJoin[j] = true
j = nextData[j]
}
singleResult := joinFuctorValue.Call([]reflect.Value{leftValue, rightDataValue.Index(j)})[0]
resultValue = reflect.Append(resultValue, singleResult)
rightHaveJoin[j] = true
} else {
//找不到右值
if joinPlace == "left" || joinPlace == "outer" {
singleResult := joinFuctorValue.Call([]reflect.Value{leftValue, emptyRightValue})[0]
resultValue = reflect.Append(resultValue, singleResult)
}
}
}
if joinPlace == "right" || joinPlace == "outer" {
for j := 0; j != rightDataValueLen; j++ {
if rightHaveJoin[j] {
continue
}
singleResult := joinFuctorValue.Call([]reflect.Value{emptyLeftValue, rightDataValue.Index(j)})[0]
resultValue = reflect.Append(resultValue, singleResult)
}
}
return resultValue.Interface().([]LR)
}
func analyseJoin(joinType string) (string, string) {
joinTypeArray := strings.Split(joinType, "=")
leftJoinType := strings.Trim(joinTypeArray[0], " ")
rightJoinType := strings.Trim(joinTypeArray[1], " ")
return leftJoinType, rightJoinType
}
// GroupReflect 反射
func GroupReflect[T, E any, R *E | []E](data []T, groupType string, groupFunctor func([]T) E) R {
groupFuctorValue := reflect.ValueOf(groupFunctor)
groupFuctorType := groupFuctorValue.Type()
//解析输入数据
dataValueLen := reflect.ValueOf(data).Len()
//计算最终数据
var resultValue reflect.Value
resultType := groupFuctorType.Out(0)
if resultType.Kind() == reflect.Slice {
resultValue = reflect.MakeSlice(resultType, 0, dataValueLen)
} else {
resultValue = reflect.MakeSlice(reflect.SliceOf(resultType), 0, dataValueLen)
}
//执行分组操作
GroupWalkReflect(data, groupType, func(data reflect.Value) {
singleResult := groupFuctorValue.Call([]reflect.Value{data})[0]
if singleResult.Kind() == reflect.Slice {
resultValue = reflect.AppendSlice(resultValue, singleResult)
} else {
resultValue = reflect.Append(resultValue, singleResult)
}
})
if resultType.Kind() == reflect.Slice {
res := reflect.New(resultType).Elem()
res.Set(resultValue)
return res.Addr().Interface().(R)
}
return resultValue.Interface().(R)
}
type GroupWalkHandler func(data reflect.Value)
func GroupWalkReflect(data interface{}, groupType string, groupWalkHandler GroupWalkHandler) {
//解析输入数据
dataValue := reflect.ValueOf(data)
dataType := dataValue.Type()
dataElemType := dataType.Elem()
dataValueLen := dataValue.Len()
//分组操作
groupType = strings.Trim(groupType, " ")
dataFieldType, dataFieldExtract := GetQueryExtract(dataElemType, groupType)
findMap := reflect.MakeMapWithSize(reflect.MapOf(dataFieldType, reflect.TypeOf(1)), dataValueLen)
bufferData := reflect.MakeSlice(dataType, dataValueLen, dataValueLen)
tempValueInt := reflect.New(reflect.TypeOf(1)).Elem()
nextData := make([]int, dataValueLen)
for i := 0; i != dataValueLen; i++ {
fieldValue := dataFieldExtract(dataValue.Index(i))
lastIndex := findMap.MapIndex(fieldValue)
if lastIndex.IsValid() {
nextData[int(lastIndex.Int())] = i
}
nextData[i] = -1
tempValueInt.SetInt(int64(i))
findMap.SetMapIndex(fieldValue, tempValueInt)
}
k := 0
for i := 0; i != dataValueLen; i++ {
j := i
if nextData[j] == 0 {
continue
}
kbegin := k
for nextData[j] != -1 {
nextJ := nextData[j]
bufferData.Index(k).Set(dataValue.Index(j))
nextData[j] = 0
j = nextJ
k++
}
bufferData.Index(k).Set(dataValue.Index(j))
k++
nextData[j] = 0
groupWalkHandler(bufferData.Slice(kbegin, k))
}
}
// ColumnReflect 反射
func ColumnReflect[T, R any](data []T, column string) []R {
dataValue := reflect.ValueOf(data)
dataType := dataValue.Type().Elem()
dataLen := dataValue.Len()
column = strings.Trim(column, " ")
dataFieldType, dataFieldExtract := GetQueryExtract(dataType, column)
resultValue := reflect.MakeSlice(reflect.SliceOf(dataFieldType), dataLen, dataLen)
for i := 0; i != dataLen; i++ {
singleDataValue := dataValue.Index(i)
singleResultValue := dataFieldExtract(singleDataValue)
resultValue.Index(i).Set(singleResultValue)
}
return resultValue.Interface().([]R)
}
// ColumnMapReflect 反射
func ColumnMapReflect[T any, K comparable, R map[K]T | map[K][]T](data []T, column string) R {
column = strings.Trim(column, " ")
if len(column) >= 2 && column[0:2] == "[]" {
column = column[2:]
return columnMapReflectSlice(data, column).(R)
} else {
return columnMapReflectSingle(data, column).(R)
}
}
func columnMapReflectSlice(data interface{}, column string) interface{} {
dataValue := reflect.ValueOf(data)
dataValueType := dataValue.Type()
dataType := dataValue.Type().Elem()
dataLen := dataValue.Len()
column = strings.Trim(column, " ")
dataFieldType, dataFieldExtract := GetQueryExtract(dataType, column)
resultValue := reflect.MakeMapWithSize(reflect.MapOf(dataFieldType, dataValueType), dataLen)
GroupWalkReflect(data, column, func(group reflect.Value) {
singleResultValue := dataFieldExtract(group.Index(0))
resultValue.SetMapIndex(singleResultValue, group)
})
return resultValue.Interface()
}
func columnMapReflectSingle(data interface{}, column string) interface{} {
dataValue := reflect.ValueOf(data)
dataType := dataValue.Type().Elem()
dataLen := dataValue.Len()
column = strings.Trim(column, " ")
dataFieldType, dataFieldExtract := GetQueryExtract(dataType, column)
resultValue := reflect.MakeMapWithSize(reflect.MapOf(dataFieldType, dataType), dataLen)
for i := dataLen - 1; i >= 0; i-- {
singleDataValue := dataValue.Index(i)
singleResultValue := dataFieldExtract(singleDataValue)
resultValue.SetMapIndex(singleResultValue, singleDataValue)
}
return resultValue.Interface()
}
// CombineReflect 反射
func CombineReflect[L, R, LR any](leftData []L, rightData []R, combineFuctor func(L, R) LR) []LR {
leftValue := reflect.ValueOf(leftData)
rightValue := reflect.ValueOf(rightData)
if leftValue.Len() != rightValue.Len() {
panic(fmt.Sprintf("len dos not equal %v != %v", leftValue.Len(), rightValue.Len()))
}
dataLen := leftValue.Len()
combineFuctorValue := reflect.ValueOf(combineFuctor)
resultType := combineFuctorValue.Type().Out(0)
result := reflect.MakeSlice(reflect.SliceOf(resultType), dataLen, dataLen)
for i := 0; i != dataLen; i++ {
singleResultValue := combineFuctorValue.Call([]reflect.Value{leftValue.Index(i), rightValue.Index(i)})
result.Index(i).Set(singleResultValue[0])
}
return result.Interface().([]LR)
} | query/internal/query/query.go | 0.517327 | 0.425426 | query.go | starcoder |
package abi
import (
"encoding/hex"
"math/big"
)
const (
// HashLength is the expected length of the hash
HashLength = 32
// AddressLength is the expected length of the address
AddressLength = 20
)
const (
// number of bits in a big.Word
wordBits = 32 << (uint64(^big.Word(0)) >> 63)
// number of bytes in a big.Word
wordBytes = wordBits / 8
)
var (
tt255 = BigPow(2, 255)
tt256 = BigPow(2, 256)
tt256m1 = new(big.Int).Sub(tt256, big.NewInt(1))
tt63 = BigPow(2, 63)
MaxBig256 = new(big.Int).Set(tt256m1)
MaxBig63 = new(big.Int).Sub(tt63, big.NewInt(1))
)
// BigPow returns a ** b as a big integer.
func BigPow(a, b int64) *big.Int {
r := big.NewInt(a)
return r.Exp(r, big.NewInt(b), nil)
}
type Hash [HashLength]byte
type Address [AddressLength]byte
// Bytes gets the byte representation of the underlying hash.
func (h Hash) Bytes() []byte { return h[:] }
// Hex converts a hash to a hex string.
func (h Hash) Hex() string { return Encode(h[:]) }
// Encode encodes b as a hex string with 0x prefix.
func Encode(b []byte) string {
enc := make([]byte, len(b)*2+2)
copy(enc, "0x")
hex.Encode(enc[2:], b)
return string(enc)
}
func BytesToHash(b []byte) Hash {
var h Hash
h.SetBytes(b)
return h
}
func (h *Hash) SetBytes(b []byte) {
if len(b) > len(h) {
b = b[len(b)-HashLength:]
}
copy(h[HashLength-len(b):], b)
}
func BytesToAddress(b []byte) Address {
var a Address
a.SetBytes(b)
return a
}
func (a *Address) SetBytes(b []byte) {
if len(b) > len(a) {
b = b[len(b)-AddressLength:]
}
copy(a[AddressLength-len(b):], b)
}
// U256 encodes as a 256 bit two's complement number. This operation is destructive.
func NewU256(x *big.Int) *big.Int {
return x.And(x, tt256m1)
}
var (
Big1 = big.NewInt(1)
Big2 = big.NewInt(2)
Big3 = big.NewInt(3)
Big0 = big.NewInt(0)
Big32 = big.NewInt(32)
Big256 = big.NewInt(256)
Big257 = big.NewInt(257)
)
// RightPadBytes zero-pads slice to the right up to length l.
func RightPadBytes(slice []byte, l int) []byte {
if l <= len(slice) {
return slice
}
padded := make([]byte, l)
copy(padded, slice)
return padded
}
// LeftPadBytes zero-pads slice to the left up to length l.
func LeftPadBytes(slice []byte, l int) []byte {
if l <= len(slice) {
return slice
}
padded := make([]byte, l)
copy(padded[l-len(slice):], slice)
return padded
}
// PaddedBigBytes encodes a big integer as a big-endian byte slice. The length
// of the slice is at least n bytes.
func PaddedBigBytes(bigint *big.Int, n int) []byte {
if bigint.BitLen()/8 >= n {
return bigint.Bytes()
}
ret := make([]byte, n)
ReadBits(bigint, ret)
return ret
}
func ReadBits(bigint *big.Int, buf []byte) {
i := len(buf)
for _, d := range bigint.Bits() {
for j := 0; j < wordBytes && i > 0; j++ {
i--
buf[i] = byte(d)
d >>= 8
}
}
} | hyperledger/burrow/abi/common.go | 0.693265 | 0.528838 | common.go | starcoder |
package logic
// Operand is the template for operands.
type Operand interface {
Evaluate(ctx interface{}) (bool, error)
}
// AndOperator is the template for boolean AND operators.
type AndOperator struct {
Operands []Operand
}
// Evaluate performs the evaluation of the boolean AND operator.
func (o AndOperator) Evaluate(ctx interface{}) (bool, error) {
// fmt.Println("And.Evaluate()")
for _, operand := range o.Operands {
var t bool
var err error
if t, err = operand.Evaluate(ctx); err != nil {
return false, err
}
if !t {
return false, nil
}
}
return true, nil
}
// And generates a new AndOperator with the given set of operands.
func And(operands ...Operand) AndOperator {
return AndOperator{
operands,
}
}
// All generates a new AndOperator with the given set of operands.
func All(operands ...Operand) AndOperator {
return All(operands...)
}
// OrOperator is the template for boolean OR operators.
type OrOperator struct {
Operands []Operand
}
// Evaluate performs the evaluation of the boolean OR operator.
func (o OrOperator) Evaluate(ctx interface{}) (bool, error) {
// fmt.Println("Or.Evaluate()")
for _, operand := range o.Operands {
var t bool
var err error
if t, err = operand.Evaluate(ctx); err != nil {
return false, err
}
if t {
return true, nil
}
}
return false, nil
}
// Or generates a new OrOperator with the given set of operands.
func Or(operands ...Operand) OrOperator {
return OrOperator{
operands,
}
}
// Any generates a new OrOperator with the given set of operands.
func Any(operands ...Operand) OrOperator {
return Or(operands...)
}
// NotOperator is the template for boolean NOT operators.
type NotOperator struct {
Operand Operand
}
// Evaluate performs the evaluation of the boolean NOT operator.
func (o NotOperator) Evaluate(ctx interface{}) (bool, error) {
// fmt.Println("Not.Evaluate()")
result, err := o.Operand.Evaluate(ctx)
return !result, err
}
// Not generates a new NotOperator with the given operand.
func Not(operand Operand) NotOperator {
return NotOperator{
operand,
}
}
// BoolOperand is the Operand version of the boolean native values.
type BoolOperand struct {
Value bool
}
// Evaluate performs the evaluation of the wrapper around the native boolean type.
func (o BoolOperand) Evaluate(cxt interface{}) (bool, error) {
return o.Value, nil
}
var (
// True is the wrapper of the native boolean true value.
True = BoolOperand{
Value: true,
}
// False is the wrapper of the native boolean false value.
False = BoolOperand{
Value: false,
}
) | logic/logic.go | 0.797911 | 0.460107 | logic.go | starcoder |
package core
import (
"sync"
"github.com/OneOfOne/cmap/hashers"
)
// shardCount must be a power of 2.
// Higher shardCount will improve concurrency but will consume more memory.
const shardCount = 1 << 8
// shardMask is the mask we apply to hash functions below.
const shardMask = shardCount - 1
// targetMap is a concurrent safe sharded map to scale on multiple cores.
// It's a fully specialised version of cmap.CMap for our most commonly used types.
type targetMap struct {
shards []*targetLMap
}
// newTargetMap creates a new targetMap.
func newTargetMap() *targetMap {
cm := &targetMap{
shards: make([]*targetLMap, shardCount),
}
for i := range cm.shards {
cm.shards[i] = newTargetLMapSize(shardCount)
}
return cm
}
// Set is the equivalent of `map[key] = val`.
// It returns true if the item was inserted, false if it already existed (in which case it won't be inserted)
func (cm *targetMap) Set(key BuildLabel, val *BuildTarget) bool {
h := hashBuildLabel(key)
return cm.shards[h&shardMask].Set(key, val)
}
// GetOK is the equivalent of `val, ok := map[key]`.
func (cm *targetMap) GetOK(key BuildLabel) (val *BuildTarget, ok bool) {
h := hashBuildLabel(key)
return cm.shards[h&shardMask].GetOK(key)
}
// Values returns a slice of all the current values in the map.
// This is a view that an observer could potentially have had at some point around the calling of this function,
// but no particular consistency guarantees are made.
func (cm *targetMap) Values() BuildTargets {
ret := BuildTargets{}
for _, lm := range cm.shards {
ret = append(ret, lm.Values()...)
}
return ret
}
func hashBuildLabel(key BuildLabel) uint32 {
return hashers.Fnv32(key.Subrepo) ^ hashers.Fnv32(key.PackageName) ^ hashers.Fnv32(key.Name)
}
// targetLMap is a simple sync.RWMutex locked map.
// Used by targetMap internally for sharding.
type targetLMap struct {
m map[BuildLabel]*BuildTarget
l sync.RWMutex
}
// newTargetLMapSize is the equivalent of `m := make(map[BuildLabel]*BuildTarget, cap)`
func newTargetLMapSize(cap int) *targetLMap {
return &targetLMap{
m: make(map[BuildLabel]*BuildTarget, cap),
}
}
// Set is the equivalent of `map[key] = val`.
// It returns true if the item was inserted, false if it already existed (in which case it won't be inserted)
func (lm *targetLMap) Set(key BuildLabel, v *BuildTarget) bool {
lm.l.Lock()
defer lm.l.Unlock()
if _, present := lm.m[key]; present {
return false
}
lm.m[key] = v
return true
}
// GetOK is the equivalent of `val, ok := map[key]`.
func (lm *targetLMap) GetOK(key BuildLabel) (*BuildTarget, bool) {
lm.l.RLock()
defer lm.l.RUnlock()
v, ok := lm.m[key]
return v, ok
}
// Values returns a copy of all the values currently in the map.
func (lm *targetLMap) Values() []*BuildTarget {
lm.l.RLock()
defer lm.l.RUnlock()
ret := make([]*BuildTarget, 0, len(lm.m))
for _, v := range lm.m {
ret = append(ret, v)
}
return ret
} | src/core/cmap_targets.go | 0.735452 | 0.401394 | cmap_targets.go | starcoder |
package trilinear
import (
"errors"
"image"
"image/color"
"math"
"github.com/wayneashleyberry/lut/pkg/colorcube"
"github.com/wayneashleyberry/lut/pkg/parallel"
)
// bits per channel (we're assuming 8 bits).
const bpc = 0xff
// Interpolate will apply color transformations to the provided image using
// trilinear interpolation (taking the intensity multiplier into account).
func Interpolate(src image.Image, cube colorcube.Cube, intensity float64) (image.Image, error) {
if intensity < 0 || intensity > 1 {
return src, errors.New("intensity must be between 0 and 1")
}
bounds := src.Bounds()
out := image.NewNRGBA(image.Rectangle{
image.Point{0, 0},
image.Point{bounds.Max.X, bounds.Max.Y},
})
k := (float64(cube.Size) - 1) / bpc
space := &image.NRGBA{}
model := space.ColorModel()
dKR := cube.DomainMax[0] - cube.DomainMin[0]
dKG := cube.DomainMax[1] - cube.DomainMin[1]
dKB := cube.DomainMax[2] - cube.DomainMin[2]
width, height := bounds.Dx(), bounds.Dy()
parallel.Line(height, func(start, end int) {
for y := start; y < end; y++ {
for x := 0; x < width; x++ {
px := src.At(x, y)
c := model.Convert(px).(color.NRGBA)
rgb := getFromRGBTrilinear(
int(c.R),
int(c.G),
int(c.B),
cube.Size,
k,
cube,
)
o := color.NRGBA{}
o.R = uint8(float64(c.R)*(1-intensity) + float64(toIntCh(rgb[0]*dKR))*intensity)
o.G = uint8(float64(c.G)*(1-intensity) + float64(toIntCh(rgb[1]*dKG))*intensity)
o.B = uint8(float64(c.B)*(1-intensity) + float64(toIntCh(rgb[2]*dKB))*intensity)
o.A = c.A
out.Set(x, y, o)
}
}
})
return out, nil
}
func trilerp(x, y, z, c000, c001, c010, c011, c100, c101, c110, c111, x0, x1, y0, y1, z0, z1 float64) float64 {
xd := (x - x0) / (x1 - x0)
yd := (y - y0) / (y1 - y0)
zd := (z - z0) / (z1 - z0)
c00 := c000*(1.0-xd) + c100*xd
c01 := c001*(1.0-xd) + c101*xd
c10 := c010*(1.0-xd) + c110*xd
c11 := c011*(1.0-xd) + c111*xd
c0 := c00*(1.0-yd) + c10*yd
c1 := c01*(1.0-yd) + c11*yd
c := c0*(1.0-zd) + c1*zd
return c
}
func clampToChannelSize(x int) int {
switch {
case x <= 0:
return 0
case x >= bpc:
return bpc
default:
return x
}
}
func toIntCh(x float64) int {
return clampToChannelSize(int(math.Floor(x * float64(bpc))))
}
func getFromRGBTrilinear(r, g, b, size int, k float64, cube colorcube.Cube) []float64 {
iR := float64(r) * k
var fR1 int
if iR >= float64(size)-1 {
fR1 = clampToChannelSize(size - 1)
} else {
fR1 = clampToChannelSize(int(math.Floor(iR + 1)))
}
var fR0 int
if iR > 0 {
fR0 = clampToChannelSize(int(math.Floor(iR - 1)))
}
iG := float64(g) * k
var fG1 int
if iG >= float64(size)-1 {
fG1 = clampToChannelSize(size - 1)
} else {
fG1 = clampToChannelSize(int(math.Floor(iG + 1)))
}
var fG0 int
if iG > 0 {
fG0 = clampToChannelSize(int(math.Floor(iG - 1)))
}
iB := float64(b) * k
var fB1 int
if iB >= float64(size)-1 {
fB1 = clampToChannelSize(size - 1)
} else {
fB1 = clampToChannelSize(int(math.Floor(iB + 1)))
}
var fB0 int
if iB > 0 {
fB0 = clampToChannelSize(int(math.Floor(iB - 1)))
}
c000 := cube.Get(fR0, fG0, fB0)
c010 := cube.Get(fR0, fG1, fB0)
c001 := cube.Get(fR0, fG0, fB1)
c011 := cube.Get(fR0, fG1, fB1)
c101 := cube.Get(fR1, fG0, fB1)
c100 := cube.Get(fR1, fG0, fB0)
c110 := cube.Get(fR1, fG1, fB0)
c111 := cube.Get(fR1, fG1, fB1)
rx := trilerp(
iR, iG, iB, c000[0], c001[0], c010[0], c011[0],
c100[0], c101[0], c110[0], c111[0],
float64(fR0), float64(fR1), float64(fG0), float64(fG1), float64(fB0), float64(fB1),
)
gx := trilerp(
iR, iG, iB, c000[1], c001[1], c010[1], c011[1],
c100[1], c101[1], c110[1], c111[1],
float64(fR0), float64(fR1), float64(fG0), float64(fG1), float64(fB0), float64(fB1),
)
bx := trilerp(
iR, iG, iB, c000[2], c001[2], c010[2], c011[2],
c100[2], c101[2], c110[2], c111[2],
float64(fR0), float64(fR1), float64(fG0), float64(fG1), float64(fB0), float64(fB1),
)
return []float64{rx, gx, bx}
} | pkg/trilinear/trilinear.go | 0.690142 | 0.50238 | trilinear.go | starcoder |
package wolfenstein
import (
"github.com/llgcode/draw2d/draw2dimg"
"github.com/llgcode/draw2d/draw2dkit"
"image/color"
"math"
)
type GameState struct {
level []int
mapSize int
blockSize int
player Player
}
type Player struct {
position Point
delta Point
}
type Point struct {
x float64
y float64
angle float64
}
func NewGameState(width, height int) (*GameState, error) {
var gs GameState
// silly level
gs.level = []int{
1, 1, 1, 1, 1, 1, 1, 1,
1, 0, 1, 0, 0, 0, 0, 1,
1, 0, 1, 0, 0, 0, 0, 1,
1, 0, 1, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 1, 0, 1,
1, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1,
}
gs.mapSize = 8
gs.blockSize = 64
gs.player = Player{
position: Point{
float64(gs.mapSize * gs.blockSize / 2),
float64(gs.mapSize * gs.blockSize / 2),
0.0,
},
delta: Point{0, 0, 0.0},
}
gs.updateDelta()
return &gs, nil
}
func (gs *GameState) GetMapSize() int {
return gs.mapSize
}
func (gs *GameState) GetLevel() []int {
return gs.level
}
func (gs *GameState) GetPlayer() Player {
return gs.player
}
func (gs *GameState) GetPlayerPosition() (x, y, deltaX, deltaY float64) {
return gs.player.position.x, gs.player.position.y, gs.player.delta.x, gs.player.delta.y
}
func (gs *GameState) GetBlockSize() int {
return gs.blockSize
}
func (gs *GameState) GetPlayerAngle() float64 {
return gs.player.position.angle
}
func (gs *GameState) MoveUp() {
gs.player.position.x += gs.player.delta.x
gs.player.position.y += gs.player.delta.y
}
func (gs *GameState) MoveDown() {
gs.player.position.x -= gs.player.delta.x
gs.player.position.y -= gs.player.delta.y
}
func (gs *GameState) MoveLeft() {
gs.player.position.angle -= 0.1
if gs.player.position.angle < 0 {
gs.player.position.angle += 2 * math.Pi
}
gs.updateDelta()
}
func (gs *GameState) MoveRight() {
gs.player.position.angle += 0.1
if gs.player.position.angle > 2*math.Pi {
gs.player.position.angle -= 2 * math.Pi
}
gs.updateDelta()
}
func (gs *GameState) updateDelta() {
gs.player.delta.x = math.Cos(gs.player.position.angle) * 5
gs.player.delta.y = math.Sin(gs.player.position.angle) * 5
}
func (gs *GameState) RenderRay(gc *draw2dimg.GraphicContext) {
// player position as origin
posX := gs.player.position.x
posY := gs.player.position.y
posAngle := gs.player.position.angle
blockSize := gs.GetBlockSize()
var rayX, rayY float64
for x := 0; x < 1; x++ {
//which cell of the map we're in
mapX := int(math.Trunc(posX / float64(blockSize)))*blockSize
// right
if posAngle < math.Pi/2 || posAngle > 3*math.Pi/2 {
rayX = posX + (float64(blockSize) - (posX - float64(mapX)))
rayY = posY + (float64(blockSize)-(posX-float64(mapX)))*math.Tan(posAngle)
}
// left
if posAngle > math.Pi/2 && posAngle < 3*math.Pi/2 {
rayX = posX - (float64(blockSize) - (posX - float64(mapX)))
rayY = posY + (float64(blockSize)-(posX-float64(mapX)))*-math.Tan(posAngle)
}
gc.SetFillColor(color.RGBA{0x00, 0x00, 0xff, 0xff})
gc.SetStrokeColor(color.RGBA{0x00, 0x00, 0xff, 0xff})
draw2dkit.Circle(gc, rayX, rayY, 2)
gc.FillStroke()
}
} | src/wolfenstein/gameState.go | 0.614278 | 0.407481 | gameState.go | starcoder |
package advent
import (
. "github.com/davidparks11/advent2021/internal/advent/day5"
"github.com/davidparks11/advent2021/internal/coordinate"
)
type hydrothermalVenture struct {
dailyProblem
}
func NewHydrothermalVenture() Problem {
return &hydrothermalVenture{
dailyProblem{
day: 5,
},
}
}
func (h *hydrothermalVenture) Solve() interface{} {
input := h.GetInputLines()
var results []int
results = append(results, h.countCrosses(input))
results = append(results, h.countAllCross(input))
return results
}
/*
You come across a field of hydrothermal vents on the ocean floor! These vents constantly produce large, opaque clouds, so it would be best to avoid them if possible.
They tend to form in lines; the submarine helpfully produces a list of nearby lines of vents (your puzzle input) for you to review. For example:
0,9 -> 5,9
8,0 -> 0,8
9,4 -> 3,4
2,2 -> 2,1
7,0 -> 7,4
6,4 -> 2,0
0,9 -> 2,9
3,4 -> 1,4
0,0 -> 8,8
5,5 -> 8,2
Each Line of vents is given as a Line segment in the format x1,y1 -> x2,y2 where x1,y1 are the coordinates of one end the Line segment and x2,y2 are the coordinates of the other end. These Line segments include the points at both ends. In other words:
An entry like 1,1 -> 1,3 covers points 1,1, 1,2, and 1,3.
An entry like 9,7 -> 7,7 covers points 9,7, 8,7, and 7,7.
For now, only consider horizontal and vertical lines: lines where either x1 = x2 or y1 = y2.
So, the horizontal and vertical lines from the above list would produce the following diagram:
.......1..
..1....1..
..1....1..
.......1..
.112111211
..........
..........
..........
..........
222111....
In this diagram, the top left corner is 0,0 and the bottom right corner is 9,9. Each position is shown as the number of lines which cover that Point or . if no Line covers that Point. The top-left pair of 1s, for example, comes from 2,2 -> 2,1; the very bottom row is formed by the overlapping lines 0,9 -> 5,9 and 0,9 -> 2,9.
To avoid the most dangerous areas, you need to determine the number of points where at least two lines overlap. In the above example, this is anywhere in the diagram with a 2 or larger - a total of 5 points.
Consider only horizontal and vertical lines. At how many points do at least two lines overlap?
*/
func (h *hydrothermalVenture) countCrosses(input []string) int {
lines := ParseInput(input, false)
pointCrosses := make(map[coordinate.Point]int)
for _, l := range lines {
points := l.Points()
for _, p := range points {
pointCrosses[*p]++
}
}
crossCount := 0
for _, c := range pointCrosses {
if c > 1 {
crossCount++
}
}
return crossCount
}
/*
Unfortunately, considering only horizontal and vertical lines doesn't give you the full picture; you need to also consider diagonal lines.
Because of the limits of the hydrothermal vent mapping system, the lines in your list will only ever be horizontal, vertical, or a diagonal Line at exactly 45 degrees. In other words:
An entry like 1,1 -> 3,3 covers points 1,1, 2,2, and 3,3.
An entry like 9,7 -> 7,9 covers points 9,7, 8,8, and 7,9.
Considering all lines from the above example would now produce the following diagram:
1.1....11.
.111...2..
..2.1.111.
...1.2.2..
.112313211
...1.2....
..1...1...
.1.....1..
1.......1.
222111....
You still need to determine the number of points where at least two lines overlap. In the above example, this is still anywhere in the diagram with a 2 or larger - now a total of 12 points.
Consider all of the lines. At how many points do at least two lines overlap?
*/
func (h *hydrothermalVenture) countAllCross(input []string) int {
lines := ParseInput(input, true)
pointCrosses := make(map[coordinate.Point]int)
for _, l := range lines {
points := l.Points()
for _, p := range points {
pointCrosses[*p]++
}
}
crossCount := 0
for _, c := range pointCrosses {
if c > 1 {
crossCount++
}
}
return crossCount
} | internal/advent/day5.go | 0.795142 | 0.492249 | day5.go | starcoder |
package gomath
import (
"sort"
)
const (
kXNotIncreasing = "X values must be strictly increasing"
kZeroValueSpline = "Operation not allowed on zero value spline"
)
// Point represents a single (x, y) point
type Point struct {
X float64
Y float64
}
// Spline represents a cubic spline.
type Spline struct {
points []Point
polys []polyType
}
// NewSpline returns a new cubic spline going through each point in points.
// The second derivative of the spline at the first and last point is 0.
// The x values in points must be strictly increasing.
func NewSpline(points []Point) *Spline {
points = checkAndCopySplinePoints(points)
return &Spline{points: points, polys: splineNormal(points)}
}
// NewSplineWithSlopes returns a new cubic spline going through each point in
// points. The x values in points must be strictly increasing. beginSlope and
// endSlope specify the slope of the spline at the first point and last point
// respectively.
func NewSplineWithSlopes(
points []Point, beginSlope, endSlope float64) *Spline {
points = checkAndCopySplinePoints(points)
return &Spline{
points: points,
polys: splineSlopes(points, beginSlope, endSlope)}
}
// Eval evaluates this cubic spline at x. Eval panics if x doesn't fall
// between what MinX and MaxX return. Eval also panics if called on the zero
// value Spline.
func (s *Spline) Eval(x float64) float64 {
if s.points == nil {
panic(kZeroValueSpline)
}
if x < s.MinX() || x > s.MaxX() {
panic("x value out of range for spline")
}
idx := sort.Search(
len(s.points), func(i int) bool { return x < s.points[i].X }) - 1
return s.polys[idx].eval(x - s.points[idx].X)
}
// MinX returns the minimum value of x for this cubic spline. MinX panics if
// called on the zero value Spline.
func (s *Spline) MinX() float64 {
if s.points == nil {
panic(kZeroValueSpline)
}
return s.points[0].X
}
// MaxX returns the maximum value of x for this cubic spline. MaxX panics if
// called on the zero value Spline.
func (s *Spline) MaxX() float64 {
if s.points == nil {
panic(kZeroValueSpline)
}
return s.points[len(s.points)-1].X
}
func checkAndCopySplinePoints(points []Point) []Point {
if len(points) < 2 {
panic("points must have length of at least 2")
}
for i := 1; i < len(points); i++ {
if points[i].X <= points[i-1].X {
panic(kXNotIncreasing)
}
}
result := make([]Point, len(points))
copy(result, points)
return result
}
func computeSpline(points []Point, xcoef, x2coef float64) []polyType {
var result []polyType
cubic := polyType{points[0].Y, xcoef, x2coef, 0.0}
for i := 0; i < len(points)-1; i++ {
xdiff := points[i+1].X - points[i].X
cubic.fit(xdiff, points[i+1].Y)
result = append(result, cubic)
cubic = cubic.shift(xdiff)
}
result = append(result, cubic)
return result
}
func splineNormal(points []Point) []polyType {
spline0 := computeSpline(points, 0.0, 0.0)
spline1 := computeSpline(points, 1.0, 0.0)
plen := len(points)
end2nd0 := spline0[plen-1][2]
end2nd1 := spline1[plen-1][2]
start1st := -end2nd0 / (end2nd1 - end2nd0)
return computeSpline(points, start1st, 0.0)
}
func splineSlopes(points []Point, beginSlope, endSlope float64) []polyType {
spline0 := computeSpline(points, beginSlope, 0.0)
spline1 := computeSpline(points, beginSlope, 1.0)
plen := len(points)
end1st0 := spline0[plen-1][1]
end1st1 := spline1[plen-1][1]
start2nd := (endSlope - end1st0) / (end1st1 - end1st0)
return computeSpline(points, beginSlope, start2nd)
}
type polyType [4]float64
func (p *polyType) eval(x float64) float64 {
sum := 0.0
for i := 3; i >= 0; i-- {
sum = sum*x + p[i]
}
return sum
}
func (p *polyType) fit(x, y float64) {
p[3] = 0.0
actual := p.eval(x)
p[3] = (y - actual) / (x * x * x)
}
func (p *polyType) shift(x float64) polyType {
// p'(x)
p1 := polyType{p[1], 2.0 * p[2], 3.0 * p[3], 0.0}
// p''(x) / 2.0
p2 := polyType{p[2], 3.0 * p[3], 0.0, 0.0}
return polyType{p.eval(x), p1.eval(x), p2.eval(x), 0.0}
} | spline.go | 0.832815 | 0.60964 | spline.go | starcoder |
package bat
import (
"fmt"
"reflect"
)
// SliceStrIdx returns first index of `x` in `slice` and -1 if `x` is not present.
func SliceStrIdx(slice []string, x string) int {
for i, v := range slice {
if v == x {
return i
}
}
return -1
}
// SliceIntIdx returns first index of `x` in `slice` and -1 if `x` is not present.
func SliceIntIdx(slice []int, x int) int {
for i, v := range slice {
if v == x {
return i
}
}
return -1
}
// SliceStrSliceIdx returns first index of `x` in `slices` and -1 if `x` is not present.
func SliceStrSliceIdx(slices [][]string, x []string) int {
for k, s := range slices {
if len(s) != len(x) {
goto out
}
for i := range s {
if s[i] != x[i] {
goto out
}
}
return k
out:
}
return -1
}
// SliceIdx returns first index of `x` in `slice` and -1 if `x` is not present.
func SliceIdx(slice []interface{}, x interface{}) int {
for i, v := range slice {
if v == x {
return i
}
}
return -1
}
// SliceStrConcat concats all given string slices into a new slice
func SliceStrConcat(slices ...[]string) []string {
var r []string
for _, s := range slices {
r = append(r, s...)
}
return r
}
// SliceConcat concats all given slices into a new slice
func SliceConcat(slices ...[]interface{}) []interface{} {
var r []interface{}
for _, s := range slices {
r = append(r, s...)
}
return r
}
// StrsEq checks if two string slices has the same content
func StrsEq(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
// StrsOnlyWhitespace checks if input consists of only whitespace
func StrsOnlyWhitespace(ss []string) bool {
for _, s := range ss {
if !StrIsWhitespace(s) {
return false
}
}
return true
}
// StrsSame behaves like StringsEq but order doesn't matter
func StrsSame(a, b []string) bool {
if len(a) != len(b) {
return false
}
c := map[string]struct{}{}
for _, v := range a {
c[v] = struct{}{}
}
for _, v := range b {
if _, ok := c[v]; !ok {
return false
}
}
return true
}
// ToInterfaceSlice converts given slice into empty interface slice.
// Mostly for batch DB operations
func ToInterfaceSlice(i interface{}) ([]interface{}, error) {
switch reflect.TypeOf(i).Kind() {
case reflect.Slice:
s := reflect.ValueOf(i)
result := make([]interface{}, s.Len())
for j := 0; j < s.Len(); j++ {
result[j] = s.Index(j).Interface()
}
return result, nil
default:
return nil, fmt.Errorf("Cannot convert %T to []interface{}", i)
}
}
// SliceStrUniqueAppend appends `strs` strings into source without excluding elements which
// alread exist in `src`.
func SliceStrUniqueAppend(src []string, strs ...string) []string {
for _, s := range strs {
if SliceStrIdx(src, s) < 0 {
src = append(src, s)
}
}
return src
}
// SliceStrAsNil returns initialized empty slice as nil
func SliceStrAsNil(ls []string) []string {
if len(ls) == 0 && ls != nil {
return nil
}
return ls
} | slice.go | 0.707203 | 0.430925 | slice.go | starcoder |
package device
const RamStartLocation = 0x200
// Givena a chip-8 adress, calculate the location
// of the corresponding value in the ram array.
func calculateRAMOffset(address uint16) uint16 {
return address - RamStartLocation
}
// Check if an address is in the reserved range.
func isAddressInReservedRange(address uint16) bool {
return address < RamStartLocation
}
type chip8Memory struct {
// Reserved for the Interpreter.
reserved [512]byte
// Program space
ram [3584]byte
}
// Read a single cell from memory.
func (m *chip8Memory) ReadMemory(address uint16) byte {
if isAddressInReservedRange(address) {
return m.reserved[address]
} else {
return m.ram[calculateRAMOffset(address)]
}
}
// Write a single cell of memory.
func (m *chip8Memory) WriteMemory(address uint16, value byte) bool {
if isAddressInReservedRange(address) {
return false
} else {
m.ram[calculateRAMOffset(address)] = value
return true
}
}
// Read from a block of memory between the start and stop addresses.
func (m *chip8Memory) BlockWriteToMemory(start uint16, stop uint16, data []byte) bool {
if start < 0x200 {
// Reserved memory cannot be manipulated.
return false
}
// Calculate the destination slice.
dst := m.ram[calculateRAMOffset(start):calculateRAMOffset(stop)]
// And copy the data.
copy(dst, data)
return true
}
// Read a block of memory between the start and stop adresses.
func (m *chip8Memory) BlockReadFromMemory(start uint16, stop uint16) []byte {
// Create a buffer to hold copied values
// Preallocate it as well.
buffer := make([]byte, stop-start)
// Since the read request may cross the reserved boundry.
// We must calculate the ranges we will copy from them.
var reservedStart, reservedStop, ramStart, ramStop uint16
if isAddressInReservedRange(start) {
reservedStart = start
} else {
ramStart = calculateRAMOffset(start)
}
if isAddressInReservedRange(stop) {
reservedStop = stop
} else {
ramStop = calculateRAMOffset(stop)
}
// Finally we must calculate the stop point of the reserved
// Copy in the buffer.
seperator := reservedStop - reservedStart
// Now we can finally copy our values.
copy(buffer[:seperator], m.reserved[reservedStart:reservedStop])
copy(buffer[seperator:], m.ram[ramStart:ramStop])
return buffer
}
// Load a program to the chip8 memory given the size and the data
// of the program. isETI660 is used to determine the loading start
// location.
func (m *chip8Memory) loadProgram(program []byte, programSize uint16, isETI660 bool) {
var startLocation uint16 = RamStartLocation // The RAM start location
if isETI660 {
// ETI600 programs start in another location.
startLocation += 0x400
}
// Write the program to memory.
m.BlockWriteToMemory(startLocation, startLocation+programSize, program)
}
// Load a traditional Chip-8 program to the memory
// Given its data and program size.
func (m *chip8Memory) LoadProgram(program []byte, programSize uint16) {
m.loadProgram(program, programSize, false)
}
// Load an ETI600 Chip-8 program to the memory
// Given its data and program size.
func (m *chip8Memory) LoadETIProgram(program []byte, programSize uint16) {
m.loadProgram(program, programSize, true)
}
// Load the reserved sections of the memory
// With apporpirate data.
func (m *chip8Memory) LoadReserved() {
characterSprites := []uint8{
0xF0, 0x90, 0x90, 0x90, 0xF0, // 0
0x20, 0x60, 0x20, 0x20, 0x70, // 1
0xF0, 0x10, 0xF0, 0x80, 0xF0, // 2
0xF0, 0x10, 0xF0, 0x10, 0xF0, // 3
0x90, 0x90, 0xF0, 0x10, 0x10, // 4
0xF0, 0x80, 0xF0, 0x10, 0xF0, // 5
0xF0, 0x80, 0xF0, 0x90, 0xF0, // 6
0xF0, 0x10, 0x20, 0x40, 0x40, // 7
0xF0, 0x90, 0xF0, 0x90, 0xF0, // 8
0xF0, 0x90, 0xF0, 0x10, 0xF0, // 9
0xF0, 0x90, 0xF0, 0x90, 0x90, // A
0xE0, 0x90, 0xE0, 0x90, 0xE0, // B
0xF0, 0x80, 0x80, 0x80, 0xF0, // C
0xE0, 0x90, 0x90, 0x90, 0xE0, // D
0xF0, 0x80, 0xF0, 0x80, 0xF0, // E
0xF0, 0x80, 0xF0, 0x80, 0x80, // F
}
copy(m.reserved[0:81], characterSprites)
}
func newMemory() *chip8Memory {
memory := new(chip8Memory)
memory.LoadReserved()
return memory
} | pkg/emulator/device/memory.go | 0.759761 | 0.580203 | memory.go | starcoder |
package gofa
// Coord
// Galactic Coordinates
/*
Icrs2g Transformation from ICRS to Galactic Coordinates.
Given:
dr float64 ICRS right ascension (radians)
dd float64 ICRS declination (radians)
Returned:
dl float64 galactic longitude (radians)
db float64 galactic latitude (radians)
Notes:
1) The IAU 1958 system of Galactic coordinates was defined with
respect to the now obsolete reference system FK4 B1950.0. When
interpreting the system in a modern context, several factors have
to be taken into account:
. The inclusion in FK4 positions of the E-terms of aberration.
. The distortion of the FK4 proper motion system by differential
Galactic rotation.
. The use of the B1950.0 equinox rather than the now-standard
J2000.0.
. The frame bias between ICRS and the J2000.0 mean place system.
The Hipparcos Catalogue (Perryman & ESA 1997) provides a rotation
matrix that transforms directly between ICRS and Galactic
coordinates with the above factors taken into account. The
matrix is derived from three angles, namely the ICRS coordinates
of the Galactic pole and the longitude of the ascending node of
the galactic equator on the ICRS equator. They are given in
degrees to five decimal places and for canonical purposes are
regarded as exact. In the Hipparcos Catalogue the matrix
elements are given to 10 decimal places (about 20 microarcsec).
In the present SOFA function the matrix elements have been
recomputed from the canonical three angles and are given to 30
decimal places.
2) The inverse transformation is performed by the function G2icrs.
Called:
Anp normalize angle into range 0 to 2pi
Anpm normalize angle into range +/- pi
S2c spherical coordinates to unit vector
Rxp product of r-matrix and p-vector
C2s p-vector to spherical
Reference:
<NAME>. & ESA, 1997, ESA SP-1200, The Hipparcos and Tycho
catalogues. Astrometric and photometric star catalogues
derived from the ESA Hipparcos Space Astrometry Mission. ESA
Publications Division, Noordwijk, Netherlands.
*/
func Icrs2g(dr, dd float64, dl, db *float64) {
var v1, v2 [3]float64
/*
L2,B2 system of galactic coordinates in the form presented in the
Hipparcos Catalogue. In degrees:
P = 192.85948 right ascension of the Galactic north pole in ICRS
Q = 27.12825 declination of the Galactic north pole in ICRS
R = 32.93192 Galactic longitude of the ascending node of
the Galactic equator on the ICRS equator
ICRS to galactic rotation matrix, obtained by computing
R_3(-R) R_1(pi/2-Q) R_3(pi/2+P) to the full precision shown:
*/
r := [3][3]float64{
{-0.054875560416215368492398900454, -0.873437090234885048760383168409, -0.483835015548713226831774175116},
{+0.494109427875583673525222371358, -0.444829629960011178146614061616, +0.746982244497218890527388004556},
{-0.867666149019004701181616534570, -0.198076373431201528180486091412, +0.455983776175066922272100478348},
}
/* Spherical to Cartesian. */
S2c(dr, dd, &v1)
/* ICRS to Galactic. */
Rxp(r, v1, &v2)
/* Cartesian to spherical. */
C2s(v2, dl, db)
/* Express in conventional ranges. */
*dl = Anp(*dl)
*db = Anpm(*db)
}
/*
G2icrsTransformation from Galactic Coordinates to ICRS.
Given:
dl float64 galactic longitude (radians)
db float64 galactic latitude (radians)
Returned:
dr float64 ICRS right ascension (radians)
dd float64 ICRS declination (radians)
Notes:
1) The IAU 1958 system of Galactic coordinates was defined with
respect to the now obsolete reference system FK4 B1950.0. When
interpreting the system in a modern context, several factors have
to be taken into account:
. The inclusion in FK4 positions of the E-terms of aberration.
. The distortion of the FK4 proper motion system by differential
Galactic rotation.
. The use of the B1950.0 equinox rather than the now-standard
J2000.0.
. The frame bias between ICRS and the J2000.0 mean place system.
The Hipparcos Catalogue (Perryman & ESA 1997) provides a rotation
matrix that transforms directly between ICRS and Galactic
coordinates with the above factors taken into account. The
matrix is derived from three angles, namely the ICRS coordinates
of the Galactic pole and the longitude of the ascending node of
the galactic equator on the ICRS equator. They are given in
degrees to five decimal places and for canonical purposes are
regarded as exact. In the Hipparcos Catalogue the matrix
elements are given to 10 decimal places (about 20 microarcsec).
In the present SOFA function the matrix elements have been
recomputed from the canonical three angles and are given to 30
decimal places.
2) The inverse transformation is performed by the function Icrs2g.
Called:
Anp normalize angle into range 0 to 2pi
Anpm normalize angle into range +/- pi
S2c spherical coordinates to unit vector
Trxp product of transpose of r-matrix and p-vector
C2s p-vector to spherical
Reference:
<NAME>. & ESA, 1997, ESA SP-1200, The Hipparcos and Tycho
catalogues. Astrometric and photometric star catalogues
derived from the ESA Hipparcos Space Astrometry Mission. ESA
Publications Division, Noordwijk, Netherlands.
*/
func G2icrs(dl, db float64, dr, dd *float64) {
var v1, v2 [3]float64
/*
L2,B2 system of galactic coordinates in the form presented in the
Hipparcos Catalogue. In degrees:
P = 192.85948 right ascension of the Galactic north pole in ICRS
Q = 27.12825 declination of the Galactic north pole in ICRS
R = 32.93192 Galactic longitude of the ascending node of
the Galactic equator on the ICRS equator
ICRS to galactic rotation matrix, obtained by computing
R_3(-R) R_1(pi/2-Q) R_3(pi/2+P) to the full precision shown:
*/
r := [3][3]float64{
{-0.054875560416215368492398900454, -0.873437090234885048760383168409, -0.483835015548713226831774175116},
{+0.494109427875583673525222371358, -0.444829629960011178146614061616, +0.746982244497218890527388004556},
{-0.867666149019004701181616534570, -0.198076373431201528180486091412, +0.455983776175066922272100478348},
}
/* Spherical to Cartesian. */
S2c(dl, db, &v1)
/* Galactic to ICRS. */
Trxp(r, v1, &v2)
/* Cartesian to spherical. */
C2s(v2, dr, dd)
/* Express in conventional ranges. */
*dr = Anp(*dr)
*dd = Anpm(*dd)
}
/*
Ae2hd Horizon to equatorial coordinates, transform azimuth and altitude to hour angle and declination.
Given:
az float64 azimuth
el float64 altitude (informally, elevation)
phi float64 site latitude
Returned:
ha float64 hour angle (local)
dec float64 declination
Notes:
1) All the arguments are angles in radians.
2) The sign convention for azimuth is north zero, east +pi/2.
3) HA is returned in the range +/-pi. Declination is returned in
the range +/-pi/2.
4) The latitude phi is pi/2 minus the angle between the Earth's
rotation axis and the adopted zenith. In many applications it
will be sufficient to use the published geodetic latitude of the
site. In very precise (sub-arcsecond) applications, phi can be
corrected for polar motion.
5) The azimuth az must be with respect to the rotational north pole,
as opposed to the ITRS pole, and an azimuth with respect to north
on a map of the Earth's surface will need to be adjusted for
polar motion if sub-arcsecond accuracy is required.
6) Should the user wish to work with respect to the astronomical
zenith rather than the geodetic zenith, phi will need to be
adjusted for deflection of the vertical (often tens of
arcseconds), and the zero point of ha will also be affected.
7) The transformation is the same as Ve = Ry(phi-pi/2)*Rz(pi)*Vh,
where Ve and Vh are lefthanded unit vectors in the (ha,dec) and
(az,el) systems respectively and Rz and Ry are rotations about
first the z-axis and then the y-axis. (n.b. Rz(pi) simply
reverses the signs of the x and y components.) For efficiency,
the algorithm is written out rather than calling other utility
functions. For applications that require even greater
efficiency, additional savings are possible if constant terms
such as functions of latitude are computed once and for all.
8) Again for efficiency, no range checking of arguments is carried
out.
*/
func Ae2hd(az, el, phi float64, ha, dec *float64) {
var sa, ca, se, ce, sp, cp, x, y, z, r float64
/* Useful trig functions. */
sa = sin(az)
ca = cos(az)
se = sin(el)
ce = cos(el)
sp = sin(phi)
cp = cos(phi)
/* HA,Dec unit vector. */
x = -ca*ce*sp + se*cp
y = -sa * ce
z = ca*ce*cp + se*sp
/* To spherical. */
r = sqrt(x*x + y*y)
if r != 0.0 {
*ha = atan2(y, x)
} else {
*ha = 0.0
}
*dec = atan2(z, r)
}
/*
Hd2ae Equatorial to horizon coordinates: transform hour angle and declination to azimuth and altitude.
Given:
ha float64 hour angle (local)
dec float64 declination
phi float64 site latitude
Returned:
az float64 azimuth
el float64 altitude (informally, elevation)
Notes:
1) All the arguments are angles in radians.
2) Azimuth is returned in the range 0-2pi; north is zero, and east
is +pi/2. Altitude is returned in the range +/- pi/2.
3) The latitude phi is pi/2 minus the angle between the Earth's
rotation axis and the adopted zenith. In many applications it
will be sufficient to use the published geodetic latitude of the
site. In very precise (sub-arcsecond) applications, phi can be
corrected for polar motion.
4) The returned azimuth az is with respect to the rotational north
pole, as opposed to the ITRS pole, and for sub-arcsecond
accuracy will need to be adjusted for polar motion if it is to
be with respect to north on a map of the Earth's surface.
5) Should the user wish to work with respect to the astronomical
zenith rather than the geodetic zenith, phi will need to be
adjusted for deflection of the vertical (often tens of
arcseconds), and the zero point of the hour angle ha will also
be affected.
6) The transformation is the same as Vh = Rz(pi)*Ry(pi/2-phi)*Ve,
where Vh and Ve are lefthanded unit vectors in the (az,el) and
(ha,dec) systems respectively and Ry and Rz are rotations about
first the y-axis and then the z-axis. (n.b. Rz(pi) simply
reverses the signs of the x and y components.) For efficiency,
the algorithm is written out rather than calling other utility
functions. For applications that require even greater
efficiency, additional savings are possible if constant terms
such as functions of latitude are computed once and for all.
7) Again for efficiency, no range checking of arguments is carried
out.
*/
func Hd2ae(ha, dec, phi float64, az, el *float64) {
var sh, ch, sd, cd, sp, cp, x, y, z, r, a float64
/* Useful trig functions. */
sh = sin(ha)
ch = cos(ha)
sd = sin(dec)
cd = cos(dec)
sp = sin(phi)
cp = cos(phi)
/* Az,Alt unit vector. */
x = -ch*cd*sp + sd*cp
y = -sh * cd
z = ch*cd*cp + sd*sp
/* To spherical. */
r = sqrt(x*x + y*y)
if r != 0.0 {
a = atan2(y, x)
} else {
a = 0.0
}
if a < 0.0 {
*az = a + D2PI
} else {
*az = a
}
*el = atan2(z, r)
}
/*
Hd2pa Parallactic angle for a given hour angle and declination.
Given:
ha float64 hour angle
dec float64 declination
phi float64 site latitude
Returned (function value):
float64 parallactic angle
Notes:
1) All the arguments are angles in radians.
2) The parallactic angle at a point in the sky is the position
angle of the vertical, i.e. the angle between the directions to
the north celestial pole and to the zenith respectively.
3) The result is returned in the range -pi to +pi.
4) At the pole itself a zero result is returned.
5) The latitude phi is pi/2 minus the angle between the Earth's
rotation axis and the adopted zenith. In many applications it
will be sufficient to use the published geodetic latitude of the
site. In very precise (sub-arcsecond) applications, phi can be
corrected for polar motion.
6) Should the user wish to work with respect to the astronomical
zenith rather than the geodetic zenith, phi will need to be
adjusted for deflection of the vertical (often tens of
arcseconds), and the zero point of the hour angle ha will also
be affected.
Reference:
<NAME>., "Spherical Astronomy", Cambridge University Press,
6th edition (Green, 1977), p49.
*/
func Hd2pa(ha, dec, phi float64) float64 {
var cp, cqsz, sqsz float64
cp = cos(phi)
sqsz = cp * sin(ha)
cqsz = sin(phi)*cos(dec) - cp*sin(dec)*cos(ha)
if sqsz != 0.0 || cqsz != 0.0 {
return atan2(sqsz, cqsz)
}
return 0.0
}
/*
Ecm06 ICRS equatorial to ecliptic rotation matrix, IAU 2006.
Given:
date1,date2 float64 TT as a 2-part Julian date (Note 1)
Returned:
rm [3][3]float64 ICRS to ecliptic rotation matrix
Notes:
1) The TT date date1+date2 is a Julian Date, apportioned in any
convenient way between the two arguments. For example,
JD(TT)=2450123.7 could be expressed in any of these ways,
among others:
date1 date2
2450123.7 0.0 (JD method)
2451545.0 -1421.3 (J2000 method)
2400000.5 50123.2 (MJD method)
2450123.5 0.2 (date & time method)
The JD method is the most natural and convenient to use in
cases where the loss of several decimal digits of resolution
is acceptable. The J2000 method is best matched to the way
the argument is handled internally and will deliver the
optimum resolution. The MJD method and the date & time methods
are both good compromises between resolution and convenience.
2) The matrix is in the sense
E_ep = rm x P_ICRS,
where P_ICRS is a vector with respect to ICRS right ascension
and declination axes and E_ep is the same vector with respect to
the (inertial) ecliptic and equinox of date.
3) P_ICRS is a free vector, merely a direction, typically of unit
magnitude, and not bound to any particular spatial origin, such
as the Earth, Sun or SSB. No assumptions are made about whether
it represents starlight and embodies astrometric effects such as
parallax or aberration. The transformation is approximately that
between mean J2000.0 right ascension and declination and ecliptic
longitude and latitude, with only frame bias (always less than
25 mas) to disturb this classical picture.
Called:
Obl06 mean obliquity, IAU 2006
Pmat06 PB matrix, IAU 2006
Ir initialize r-matrix to identity
Rx rotate around X-axis
Rxr product of two r-matrices
*/
func Ecm06(date1, date2 float64, rm *[3][3]float64) {
var ob float64
var bp, e [3][3]float64
/* Obliquity, IAU 2006. */
ob = Obl06(date1, date2)
/* Precession-bias matrix, IAU 2006. */
Pmat06(date1, date2, &bp)
/* Equatorial of date to ecliptic matrix. */
Ir(&e)
Rx(ob, &e)
/* ICRS to ecliptic coordinates rotation matrix, IAU 2006. */
Rxr(e, bp, rm)
}
/*
Eqec06 Transformation from ICRS equatorial coordinates to ecliptic coordinates
(mean equinox and ecliptic of date) using IAU 2006 precession model.
Given:
date1,date2 float64 TT as a 2-part Julian date (Note 1)
dr,dd float64 ICRS right ascension and declination (radians)
Returned:
dl,db float64 ecliptic longitude and latitude (radians)
Notes:
1) The TT date date1+date2 is a Julian Date, apportioned in any
convenient way between the two arguments. For example,
JD(TT)=2450123.7 could be expressed in any of these ways,
among others:
date1 date2
2450123.7 0.0 (JD method)
2451545.0 -1421.3 (J2000 method)
2400000.5 50123.2 (MJD method)
2450123.5 0.2 (date & time method)
The JD method is the most natural and convenient to use in
cases where the loss of several decimal digits of resolution
is acceptable. The J2000 method is best matched to the way
the argument is handled internally and will deliver the
optimum resolution. The MJD method and the date & time methods
are both good compromises between resolution and convenience.
2) No assumptions are made about whether the coordinates represent
starlight and embody astrometric effects such as parallax or
aberration.
3) The transformation is approximately that from mean J2000.0 right
ascension and declination to ecliptic longitude and latitude
(mean equinox and ecliptic of date), with only frame bias (always
less than 25 mas) to disturb this classical picture.
Called:
S2c spherical coordinates to unit vector
Ecm06 J2000.0 to ecliptic rotation matrix, IAU 2006
Rxp product of r-matrix and p-vector
C2s unit vector to spherical coordinates
Anp normalize angle into range 0 to 2pi
Anpm normalize angle into range +/- pi
*/
func Eqec06(date1, date2 float64, dr, dd float64, dl, db *float64) {
var rm [3][3]float64
var v1, v2 [3]float64
var a, b float64
/* Spherical to Cartesian. */
S2c(dr, dd, &v1)
/* Rotation matrix, ICRS equatorial to ecliptic. */
Ecm06(date1, date2, &rm)
/* The transformation from ICRS to ecliptic. */
Rxp(rm, v1, &v2)
/* Cartesian to spherical. */
C2s(v2, &a, &b)
/* Express in conventional ranges. */
*dl = Anp(a)
*db = Anpm(b)
}
/*
Eceq06 Transformation from ecliptic coordinates (mean equinox and ecliptic
of date) to ICRS RA,Dec, using the IAU 2006 precession model.
Given:
date1,date2 float64 TT as a 2-part Julian date (Note 1)
dl,db float64 ecliptic longitude and latitude (radians)
Returned:
dr,dd float64 ICRS right ascension and declination (radians)
Notes:
1) The TT date date1+date2 is a Julian Date, apportioned in any
convenient way between the two arguments. For example,
JD(TT)=2450123.7 could be expressed in any of these ways,
among others:
date1 date2
2450123.7 0.0 (JD method)
2451545.0 -1421.3 (J2000 method)
2400000.5 50123.2 (MJD method)
2450123.5 0.2 (date & time method)
The JD method is the most natural and convenient to use in
cases where the loss of several decimal digits of resolution
is acceptable. The J2000 method is best matched to the way
the argument is handled internally and will deliver the
optimum resolution. The MJD method and the date & time methods
are both good compromises between resolution and convenience.
2) No assumptions are made about whether the coordinates represent
starlight and embody astrometric effects such as parallax or
aberration.
3) The transformation is approximately that from ecliptic longitude
and latitude (mean equinox and ecliptic of date) to mean J2000.0
right ascension and declination, with only frame bias (always
less than 25 mas) to disturb this classical picture.
Called:
S2c spherical coordinates to unit vector
Ecm06 J2000.0 to ecliptic rotation matrix, IAU 2006
Trxp product of transpose of r-matrix and p-vector
C2s unit vector to spherical coordinates
Anp normalize angle into range 0 to 2pi
Anpm normalize angle into range +/- pi
*/
func Eceq06(date1, date2 float64, dl, db float64, dr, dd *float64) {
var rm [3][3]float64
var v1, v2 [3]float64
var a, b float64
/* Spherical to Cartesian. */
S2c(dl, db, &v1)
/* Rotation matrix, ICRS equatorial to ecliptic. */
Ecm06(date1, date2, &rm)
/* The transformation from ecliptic to ICRS. */
Trxp(rm, v1, &v2)
/* Cartesian to spherical. */
C2s(v2, &a, &b)
/* Express in conventional ranges. */
*dr = Anp(a)
*dd = Anpm(b)
}
/*
Ltecm ICRS equatorial to ecliptic rotation matrix, long-term.
Given:
epj float64 Julian epoch (TT)
Returned:
rm [3][3]float64 ICRS to ecliptic rotation matrix
Notes:
1) The matrix is in the sense
E_ep = rm x P_ICRS,
where P_ICRS is a vector with respect to ICRS right ascension
and declination axes and E_ep is the same vector with respect to
the (inertial) ecliptic and equinox of epoch epj.
2) P_ICRS is a free vector, merely a direction, typically of unit
magnitude, and not bound to any particular spatial origin, such
as the Earth, Sun or SSB. No assumptions are made about whether
it represents starlight and embodies astrometric effects such as
parallax or aberration. The transformation is approximately that
between mean J2000.0 right ascension and declination and ecliptic
longitude and latitude, with only frame bias (always less than
25 mas) to disturb this classical picture.
3) The Vondrak et al. (2011, 2012) 400 millennia precession model
agrees with the IAU 2006 precession at J2000.0 and stays within
100 microarcseconds during the 20th and 21st centuries. It is
accurate to a few arcseconds throughout the historical period,
worsening to a few tenths of a degree at the end of the
+/- 200,000 year time span.
Called:
Ltpequ equator pole, long term
Ltpecl ecliptic pole, long term
Pxp vector product
Pn normalize vector
References:
<NAME>., <NAME>. and <NAME>., 2011, New precession
expressions, valid for long time intervals, Astron.Astrophys. 534,
A22
<NAME>., <NAME>. and <NAME>., 2012, New precession
expressions, valid for long time intervals (Corrigendum),
Astron.Astrophys. 541, C1
*/
func Ltecm(epj float64, rm *[3][3]float64) {
/* Frame bias (IERS Conventions 2010, Eqs. 5.21 and 5.33) */
dx := -0.016617 * DAS2R
de := -0.0068192 * DAS2R
dr := -0.0146 * DAS2R
var p, z, w, x, y [3]float64
var s float64
/* Equator pole. */
Ltpequ(epj, &p)
/* Ecliptic pole (bottom row of equatorial to ecliptic matrix). */
Ltpecl(epj, &z)
/* Equinox (top row of matrix). */
Pxp(p, z, &w)
Pn(w, &s, &x)
/* Middle row of matrix. */
Pxp(z, x, &y)
/* Combine with frame bias. */
rm[0][0] = x[0] - x[1]*dr + x[2]*dx
rm[0][1] = x[0]*dr + x[1] + x[2]*de
rm[0][2] = -x[0]*dx - x[1]*de + x[2]
rm[1][0] = y[0] - y[1]*dr + y[2]*dx
rm[1][1] = y[0]*dr + y[1] + y[2]*de
rm[1][2] = -y[0]*dx - y[1]*de + y[2]
rm[2][0] = z[0] - z[1]*dr + z[2]*dx
rm[2][1] = z[0]*dr + z[1] + z[2]*de
rm[2][2] = -z[0]*dx - z[1]*de + z[2]
}
/*
Lteqec Transformation from ICRS equatorial coordinates to ecliptic coordinates
(mean equinox and ecliptic of date) using a long-term precession model.
Given:
epj float64 Julian epoch (TT)
dr,dd float64 ICRS right ascension and declination (radians)
Returned:
dl,db float64 ecliptic longitude and latitude (radians)
Notes:
1) No assumptions are made about whether the coordinates represent
starlight and embody astrometric effects such as parallax or
aberration.
2) The transformation is approximately that from mean J2000.0 right
ascension and declination to ecliptic longitude and latitude
(mean equinox and ecliptic of date), with only frame bias (always
less than 25 mas) to disturb this classical picture.
3) The Vondrak et al. (2011, 2012) 400 millennia precession model
agrees with the IAU 2006 precession at J2000.0 and stays within
100 microarcseconds during the 20th and 21st centuries. It is
accurate to a few arcseconds throughout the historical period,
worsening to a few tenths of a degree at the end of the
+/- 200,000 year time span.
Called:
S2c spherical coordinates to unit vector
Ltecm J2000.0 to ecliptic rotation matrix, long term
Rxp product of r-matrix and p-vector
C2s unit vector to spherical coordinates
Anp normalize angle into range 0 to 2pi
Anpm normalize angle into range +/- pi
References:
<NAME>., <NAME>. and <NAME>., 2011, New precession
expressions, valid for long time intervals, Astron.Astrophys. 534,
A22
<NAME>., <NAME>. and <NAME>., 2012, New precession
expressions, valid for long time intervals (Corrigendum),
Astron.Astrophys. 541, C1
*/
func Lteqec(epj float64, dr, dd float64, dl, db *float64) {
var rm [3][3]float64
var v1, v2 [3]float64
var a, b float64
/* Spherical to Cartesian. */
S2c(dr, dd, &v1)
/* Rotation matrix, ICRS equatorial to ecliptic. */
Ltecm(epj, &rm)
/* The transformation from ICRS to ecliptic. */
Rxp(rm, v1, &v2)
/* Cartesian to spherical. */
C2s(v2, &a, &b)
/* Express in conventional ranges. */
*dl = Anp(a)
*db = Anpm(b)
}
/*
Lteceq Transformation from ecliptic coordinates (mean equinox and ecliptic
of date) to ICRS RA,Dec, using a long-term precession model.
Given:
epj float64 Julian epoch (TT)
dl,db float64 ecliptic longitude and latitude (radians)
Returned:
dr,dd float64 ICRS right ascension and declination (radians)
Notes:
1) No assumptions are made about whether the coordinates represent
starlight and embody astrometric effects such as parallax or
aberration.
2) The transformation is approximately that from ecliptic longitude
and latitude (mean equinox and ecliptic of date) to mean J2000.0
right ascension and declination, with only frame bias (always
less than 25 mas) to disturb this classical picture.
3) The Vondrak et al. (2011, 2012) 400 millennia precession model
agrees with the IAU 2006 precession at J2000.0 and stays within
100 microarcseconds during the 20th and 21st centuries. It is
accurate to a few arcseconds throughout the historical period,
worsening to a few tenths of a degree at the end of the
+/- 200,000 year time span.
Called:
S2c spherical coordinates to unit vector
Ltecm J2000.0 to ecliptic rotation matrix, long term
Trxp product of transpose of r-matrix and p-vector
C2s unit vector to spherical coordinates
Anp normalize angle into range 0 to 2pi
Anpm normalize angle into range +/- pi
References:
<NAME>., <NAME>. and <NAME>., 2011, New precession
expressions, valid for long time intervals, Astron.Astrophys. 534,
A22
<NAME>., <NAME>. and <NAME>., 2012, New precession
expressions, valid for long time intervals (Corrigendum),
Astron.Astrophys. 541, C1
*/
func Lteceq(epj float64, dl, db float64, dr, dd *float64) {
var rm [3][3]float64
var v1, v2 [3]float64
var a, b float64
/* Spherical to Cartesian. */
S2c(dl, db, &v1)
/* Rotation matrix, ICRS equatorial to ecliptic. */
Ltecm(epj, &rm)
/* The transformation from ecliptic to ICRS. */
Trxp(rm, v1, &v2)
/* Cartesian to spherical. */
C2s(v2, &a, &b)
/* Express in conventional ranges. */
*dr = Anp(a)
*dd = Anpm(b)
}
/*
Eform a,f for a nominated Earth reference ellipsoid
Given:
n int ellipsoid identifier (Note 1)
Returned:
a float64 equatorial radius (meters, Note 2)
f float64 flattening (Note 2)
Returned (function value):
int status: 0 = OK
-1 = illegal identifier (Note 3)
Notes:
1) The identifier n is a number that specifies the choice of
reference ellipsoid. The following are supported:
n ellipsoid
1 WGS84
2 GRS80
3 WGS72
The n value has no significance outside the SOFA software. For
convenience, symbols WGS84 etc. are defined in sofam.h.
2) The ellipsoid parameters are returned in the form of equatorial
radius in meters (a) and flattening (f). The latter is a number
around 0.00335, i.e. around 1/298.
3) For the case where an unsupported n value is supplied, zero a and
f are returned, as well as error status.
References:
Department of Defense World Geodetic System 1984, National
Imagery and Mapping Agency Technical Report 8350.2, Third
Edition, p3-2.
<NAME>., Bull. Geodesique 66-2, 187 (1992).
The Department of Defense World Geodetic System 1972, World
Geodetic System Committee, May 1974.
Explanatory Supplement to the Astronomical Almanac,
P. <NAME> (ed), University Science Books (1992),
p220.
*/
func Eform(n int, a, f *float64) int {
/* Look up a and f for the specified reference ellipsoid. */
switch n {
case WGS84:
*a = 6378137.0
*f = 1.0 / 298.257223563
break
case GRS80:
*a = 6378137.0
*f = 1.0 / 298.257222101
break
case WGS72:
*a = 6378135.0
*f = 1.0 / 298.26
break
default:
/* Invalid identifier. */
*a = 0.0
*f = 0.0
return -1
}
/* OK status. */
return 0
}
/*
Gc2gd Transform geocentric coordinates to geodetic using the specified
reference ellipsoid.
Given:
n int ellipsoid identifier (Note 1)
xyz [3]float64 geocentric vector (Note 2)
Returned:
elong float64 longitude (radians, east +ve, Note 3)
phi float64 latitude (geodetic, radians, Note 3)
height float64 height above ellipsoid (geodetic, Notes 2,3)
Returned (function value):
int status: 0 = OK
-1 = illegal identifier (Note 3)
-2 = internal error (Note 3)
Notes:
1) The identifier n is a number that specifies the choice of
reference ellipsoid. The following are supported:
n ellipsoid
1 WGS84
2 GRS80
3 WGS72
The n value has no significance outside the SOFA software. For
convenience, symbols WGS84 etc. are defined in sofam.h.
2) The geocentric vector (xyz, given) and height (height, returned)
are in meters.
3) An error status -1 means that the identifier n is illegal. An
error status -2 is theoretically impossible. In all error cases,
all three results are set to -1e9.
4) The inverse transformation is performed in the function iauGd2gc.
Called:
Eform Earth reference ellipsoids
Gc2gde geocentric to geodetic transformation, general
*/
func Gc2gd(n int, xyz [3]float64, elong, phi, height *float64) int {
var j int
var a, f float64
/* Obtain reference ellipsoid parameters. */
j = Eform(n, &a, &f)
/* If OK, transform x,y,z to longitude, geodetic latitude, height. */
if j == 0 {
j = Gc2gde(a, f, xyz, elong, phi, height)
if j < 0 {
j = -2
}
}
/* Deal with any errors. */
if j < 0 {
*elong = -1e9
*phi = -1e9
*height = -1e9
}
/* Return the status. */
return j
}
/*
Gc2gde Transform geocentric coordinates to geodetic for a reference
ellipsoid of specified form.
Given:
a float64 equatorial radius (Notes 2,4)
f float64 flattening (Note 3)
xyz [3]float64 geocentric vector (Note 4)
Returned:
elong float64 longitude (radians, east +ve)
phi float64 latitude (geodetic, radians)
height float64 height above ellipsoid (geodetic, Note 4)
Returned (function value):
int status: 0 = OK
-1 = illegal f
-2 = illegal a
Notes:
1) This function is based on the GCONV2H Fortran subroutine by
<NAME> (see reference).
2) The equatorial radius, a, can be in any units, but meters is
the conventional choice.
3) The flattening, f, is (for the Earth) a value around 0.00335,
i.e. around 1/298.
4) The equatorial radius, a, and the geocentric vector, xyz,
must be given in the same units, and determine the units of
the returned height, height.
5) If an error occurs (status < 0), elong, phi and height are
unchanged.
6) The inverse transformation is performed in the function
iauGd2gce.
7) The transformation for a standard ellipsoid (such as WGS84) can
more conveniently be performed by calling iauGc2gd, which uses a
numerical code to identify the required A and F values.
Reference:
<NAME>., "Transformation from Cartesian to geodetic
coordinates accelerated by Halley's method", J.Geodesy (2006)
79: 689-693
*/
func Gc2gde(a, f float64, xyz [3]float64, elong, phi, height *float64) int {
var aeps2, e2, e4t, ec2, ec, b, x, y, z, p2, absz, p, s0, pn, zc,
c0, c02, c03, s02, s03, a02, a0, a03, d0, f0, b0, s1,
cc, s12, cc2 float64
/* ------------- */
/* Preliminaries */
/* ------------- */
/* Validate ellipsoid parameters. */
if f < 0.0 || f >= 1.0 {
return -1
}
if a <= 0.0 {
return -2
}
/* Functions of ellipsoid parameters (with further validation of f). */
aeps2 = a * a * 1e-32
e2 = (2.0 - f) * f
e4t = e2 * e2 * 1.5
ec2 = 1.0 - e2
if ec2 <= 0.0 {
return -1
}
ec = sqrt(ec2)
b = a * ec
/* Cartesian components. */
x = xyz[0]
y = xyz[1]
z = xyz[2]
/* Distance from polar axis squared. */
p2 = x*x + y*y
/* Longitude. */
// *elong = p2 > 0.0 ? atan2(y, x) : 0.0;
if p2 > 0.0 {
*elong = atan2(y, x)
} else {
*elong = 0.0
}
/* Unsigned z-coordinate. */
absz = fabs(z)
/* Proceed unless polar case. */
if p2 > aeps2 {
/* Distance from polar axis. */
p = sqrt(p2)
/* Normalization. */
s0 = absz / a
pn = p / a
zc = ec * s0
/* Prepare Newton correction factors. */
c0 = ec * pn
c02 = c0 * c0
c03 = c02 * c0
s02 = s0 * s0
s03 = s02 * s0
a02 = c02 + s02
a0 = sqrt(a02)
a03 = a02 * a0
d0 = zc*a03 + e2*s03
f0 = pn*a03 - e2*c03
/* Prepare Halley correction factor. */
b0 = e4t * s02 * c02 * pn * (a0 - ec)
s1 = d0*f0 - b0*s0
cc = ec * (f0*f0 - b0*c0)
/* Evaluate latitude and height. */
*phi = atan(s1 / cc)
s12 = s1 * s1
cc2 = cc * cc
*height = (p*cc + absz*s1 - a*sqrt(ec2*s12+cc2)) / sqrt(s12+cc2)
} else {
/* Exception: pole. */
*phi = DPI / 2.0
*height = absz - b
}
/* Restore sign of latitude. */
if z < 0 {
*phi = -*phi
}
/* OK status. */
return 0
}
/*
Gd2gc Transform geodetic coordinates to geocentric using the specified
reference ellipsoid.
Given:
n int ellipsoid identifier (Note 1)
elong float64 longitude (radians, east +ve)
phi float64 latitude (geodetic, radians, Note 3)
height float64 height above ellipsoid (geodetic, Notes 2,3)
Returned:
xyz [3]float64 geocentric vector (Note 2)
Returned (function value):
int status: 0 = OK
-1 = illegal identifier (Note 3)
-2 = illegal case (Note 3)
Notes:
1) The identifier n is a number that specifies the choice of
reference ellipsoid. The following are supported:
n ellipsoid
1 WGS84
2 GRS80
3 WGS72
The n value has no significance outside the SOFA software. For
convenience, symbols WGS84 etc. are defined in sofam.h.
2) The height (height, given) and the geocentric vector (xyz,
returned) are in meters.
3) No validation is performed on the arguments elong, phi and
height. An error status -1 means that the identifier n is
illegal. An error status -2 protects against cases that would
lead to arithmetic exceptions. In all error cases, xyz is set
to zeros.
4) The inverse transformation is performed in the function iauGc2gd.
Called:
Eform Earth reference ellipsoids
Gd2gce geodetic to geocentric transformation, general
Zp zero p-vector
*/
func Gd2gc(n int, elong, phi, height float64, xyz *[3]float64) int {
var j int
var a, f float64
/* Obtain reference ellipsoid parameters. */
j = Eform(n, &a, &f)
/* If OK, transform longitude, geodetic latitude, height to x,y,z. */
if j == 0 {
j = Gd2gce(a, f, elong, phi, height, xyz)
if j != 0 {
j = -2
}
}
/* Deal with any errors. */
if j != 0 {
Zp(xyz)
}
/* Return the status. */
return j
}
/*
Gd2gce Transform geodetic coordinates to geocentric for a reference
ellipsoid of specified form.
Given:
a float64 equatorial radius (Notes 1,4)
f float64 flattening (Notes 2,4)
elong float64 longitude (radians, east +ve)
phi float64 latitude (geodetic, radians, Note 4)
height float64 height above ellipsoid (geodetic, Notes 3,4)
Returned:
xyz [3]float64 geocentric vector (Note 3)
Returned (function value):
int status: 0 = OK
-1 = illegal case (Note 4)
Notes:
1) The equatorial radius, a, can be in any units, but meters is
the conventional choice.
2) The flattening, f, is (for the Earth) a value around 0.00335,
i.e. around 1/298.
3) The equatorial radius, a, and the height, height, must be
given in the same units, and determine the units of the
returned geocentric vector, xyz.
4) No validation is performed on individual arguments. The error
status -1 protects against (unrealistic) cases that would lead
to arithmetic exceptions. If an error occurs, xyz is unchanged.
5) The inverse transformation is performed in the function
iauGc2gde.
6) The transformation for a standard ellipsoid (such as WGS84) can
more conveniently be performed by calling iauGd2gc, which uses a
numerical code to identify the required a and f values.
References:
<NAME>., Spherical Astronomy, Cambridge University Press,
(1985) Section 4.5, p96.
Explanatory Supplement to the Astronomical Almanac,
P. <NAME> (ed), University Science Books (1992),
Section 4.22, p202.
*/
func Gd2gce(a, f float64, elong, phi, height float64, xyz *[3]float64) int {
var sp, cp, w, d, ac, as, r float64
/* Functions of geodetic latitude. */
sp = sin(phi)
cp = cos(phi)
w = 1.0 - f
w = w * w
d = cp*cp + w*sp*sp
if d <= 0.0 {
return -1
}
ac = a / sqrt(d)
as = w * ac
/* Geocentric vector. */
r = (ac + height) * cp
xyz[0] = r * cos(elong)
xyz[1] = r * sin(elong)
xyz[2] = (as + height) * sp
/* Success. */
return 0
} | coord.go | 0.810891 | 0.65087 | coord.go | starcoder |
package element
const BigNum = `
{{/* Only used for the Pornin Extended GCD Inverse Algorithm*/}}
{{if eq .NoCarry true}}
func (z *{{.ElementName}}) neg(x *{{.ElementName}}, xHi uint64) uint64 {
var b uint64
z[0], b = bits.Sub64(0, x[0], 0)
{{- range $i := .NbWordsIndexesNoZero}}
z[{{$i}}], b = bits.Sub64(0, x[{{$i}}], b)
{{- end}}
xHi, _ = bits.Sub64(0, xHi, b)
return xHi
}
// regular multiplication by one word regular (non montgomery)
// Fewer additions than the branch-free for positive y. Could be faster on some architectures
func (z *{{.ElementName}}) mulWRegular(x *{{.ElementName}}, y int64) uint64 {
// w := abs(y)
m := y >> 63
w := uint64((y^m)-m)
var c uint64
c, z[0] = bits.Mul64(x[0], w)
{{- range $i := .NbWordsIndexesNoZero }}
c, z[{{$i}}] = madd1(x[{{$i}}], w, c)
{{- end}}
if y < 0 {
c = z.neg(z, c)
}
return c
}
/*
Removed: seems slower
// mulWRegular branch-free regular multiplication by one word (non montgomery)
func (z *{{.ElementName}}) mulWRegularBf(x *{{.ElementName}}, y int64) uint64 {
w := uint64(y)
allNeg := uint64(y >> 63) // -1 if y < 0, 0 o.w
// s[0], s[1] so results are not stored immediately in z.
// x[i] will be needed in the i+1 th iteration. We don't want to overwrite it in case x = z
var s [2]uint64
var h [2]uint64
h[0], s[0] = bits.Mul64(x[0], w)
c := uint64(0)
b := uint64(0)
{{- range $i := .NbWordsIndexesNoZero}}
{
const curI = {{$i}} % 2
const prevI = 1 - curI
const iMinusOne = {{$i}} - 1
h[curI], s[curI] = bits.Mul64(x[{{$i}}], w)
s[curI], c = bits.Add64(s[curI], h[prevI], c)
s[curI], b = bits.Sub64(s[curI], allNeg & x[iMinusOne], b)
z[iMinusOne] = s[prevI]
}
{{- end}}
{
const curI = {{.NbWords}} % 2
const prevI = 1 - curI
const iMinusOne = {{.NbWordsLastIndex}}
s[curI], _ = bits.Sub64(h[prevI], allNeg & x[iMinusOne], b)
z[iMinusOne] = s[prevI]
return s[curI] + c
}
}*/
// Requires NoCarry
func (z *{{.ElementName}}) linearCombNonModular(x *{{.ElementName}}, xC int64, y *{{.ElementName}}, yC int64) uint64 {
var yTimes {{.ElementName}}
yHi := yTimes.mulWRegular(y, yC)
xHi := z.mulWRegular(x, xC)
carry := uint64(0)
{{- range $i := .NbWordsIndexesFull}}
z[{{$i}}], carry = bits.Add64(z[{{$i}}], yTimes[{{$i}}], carry)
{{- end}}
yHi, _ = bits.Add64(xHi, yHi, carry)
return yHi
}
{{- end}}
` | field/internal/templates/element/bignum.go | 0.716417 | 0.505005 | bignum.go | starcoder |
package hrplot
import (
"fmt"
"io/ioutil"
"math"
"sort"
"github.com/loov/plot"
"github.com/loov/plot/plotsvg"
)
// Benchmark declares interface for benchmarks it can plot.
type Benchmark interface {
Name() string
Unit() string
Float64s() []float64
}
// Option is for declaring options to plotting.
type Option interface{ apply(*plotOptions) }
type plotOptions struct {
Width float64
Height float64
LineClip float64
DensityClip float64
PercentileClip float64
}
func applyAll(opts ...Option) plotOptions {
result := defaultOptions
for _, opt := range opts {
opt.apply(&result)
}
return result
}
var defaultOptions = plotOptions{
Width: 800,
Height: 300,
LineClip: 0.9999,
DensityClip: 0.99,
PercentileClip: 0.9995,
}
type optionFunc func(*plotOptions)
func (fn optionFunc) apply(options *plotOptions) { fn(options) }
// ClipPercentile specifies how to clip the values.
func ClipPercentile(percentile float64) Option {
return optionFunc(func(options *plotOptions) {
options.LineClip = percentile
options.DensityClip = percentile
options.PercentileClip = percentile
})
}
// label formats the label to be used on plots.
func label(kind string, b Benchmark) string {
return fmt.Sprintf("%s [%s] %s", b.Name(), b.Unit(), kind)
}
// All plots line, density and percentiles plot on a single image.
func All(svgfile string, b Benchmark, opts ...Option) error {
measurements := b.Float64s()
if len(measurements) == 0 {
return nil
}
options := applyAll(opts...)
p := plot.New()
stack := plot.NewVStack()
stack.Margin = plot.R(5, 5, 5, 5)
p.Add(stack)
line := plot.NewAxisGroup(lineOptimized(b, measurements)...)
line.Y.Max = percentile(measurements, options.LineClip)
stack.Add(line)
density := plot.NewAxisGroup(density(b, measurements)...)
density.X.Max = percentile(measurements, options.DensityClip)
stack.Add(density)
percentiles := plot.NewAxisGroup(percentiles(b, measurements)...)
percentiles.X = plot.NewPercentilesAxis()
percentiles.X.Transform = plot.NewPercentileTransform(4)
percentiles.Y.Min, percentiles.Y.Max = 0, percentile(measurements, options.PercentileClip)
stack.Add(percentiles)
svg := plotsvg.New(options.Width, options.Height*3)
p.Draw(svg)
return ioutil.WriteFile(svgfile, svg.Bytes(), 0755)
}
// Line draws a line graph in timing order.
func Line(svgfile string, b Benchmark, opts ...Option) error {
measurements := b.Float64s()
if len(measurements) == 0 {
return nil
}
options := applyAll(opts...)
p := plot.New()
p.Margin = plot.R(5, 0, 0, 5)
p.Y.Max = percentile(measurements, options.LineClip)
p.AddGroup(lineOptimized(b, measurements)...)
svg := plotsvg.New(options.Width, options.Height)
p.Draw(svg)
return ioutil.WriteFile(svgfile, svg.Bytes(), 0755)
}
// Density draws a density plot out of benchmark measurements.
func Density(svgfile string, b Benchmark, opts ...Option) error {
measurements := b.Float64s()
if len(measurements) == 0 {
return nil
}
options := applyAll(opts...)
p := plot.New()
p.Margin = plot.R(5, 0, 0, 5)
p.X.Max = percentile(measurements, options.DensityClip)
p.AddGroup(density(b, measurements)...)
svg := plotsvg.New(options.Width, options.Height)
p.Draw(svg)
return ioutil.WriteFile(svgfile, svg.Bytes(), 0755)
}
// Percentiles draws a percentiles plot out of benchmark measurements.
func Percentiles(svgfile string, b Benchmark, opts ...Option) error {
measurements := b.Float64s()
if len(measurements) == 0 {
return nil
}
options := applyAll(opts...)
p := plot.New()
p.Margin = plot.R(5, 0, 0, 5)
p.X = plot.NewPercentilesAxis()
p.X.Transform = plot.NewPercentileTransform(4)
p.Y.Min, p.Y.Max = 0, percentile(measurements, options.PercentileClip)
p.AddGroup(percentiles(b, measurements)...)
svg := plotsvg.New(options.Width, options.Height)
p.Draw(svg)
return ioutil.WriteFile(svgfile, svg.Bytes(), 0755)
}
func percentile(measurements []float64, p float64) float64 {
sorted := append(measurements[:0:0], measurements...)
sort.Float64s(sorted)
k := int(math.Ceil(p * float64(len(sorted))))
if k >= len(sorted) {
k = len(sorted) - 1
}
return sorted[k]
}
func line(b Benchmark, measurements []float64) []plot.Element {
return []plot.Element{
plot.NewGrid(),
plot.NewGizmo(),
plot.NewLine(b.Unit(), plot.Points(nil, measurements)),
plot.NewTickLabels(),
plot.NewTextbox(label("line", b)),
}
}
func lineOptimized(b Benchmark, measurements []float64) []plot.Element {
return []plot.Element{
plot.NewGrid(),
plot.NewGizmo(),
plot.NewOptimizedLine(b.Unit(), plot.Points(nil, measurements), 2),
plot.NewTickLabels(),
plot.NewTextbox(label("line", b)),
}
}
func density(b Benchmark, measurements []float64) []plot.Element {
return []plot.Element{
plot.NewGrid(),
plot.NewGizmo(),
plot.NewDensity(b.Unit(), measurements),
plot.NewTickLabels(),
plot.NewTextbox(label("density", b)),
}
}
func percentiles(b Benchmark, measurements []float64) []plot.Element {
return []plot.Element{
plot.NewGrid(),
plot.NewGizmo(),
plot.NewPercentiles(b.Unit(), measurements),
plot.NewTickLabels(),
plot.NewTextbox(label("percentiles", b)),
}
} | hrplot/plot.go | 0.778228 | 0.424054 | plot.go | starcoder |
package gosu
import (
"errors"
"net/url"
)
// BeatmapCall is used to build an API call to retrieve metadata on one beatmap.
type BeatmapCall struct {
// ID of the beatmap
BeatmapID string
// Specific game-mode.
// 0 = standard, 1 = taiko, 2 = ctb, 3 = mania
Mode string
// Whether converted beatmaps are included
// 0 = not included, 1 = included
Converted string
// The beatmap hash
Hash string
}
// Beatmap stores the data of a beatmap.
type Beatmap struct {
// The status of the beatmap's ranking.
// 4 = loved, 3 = qualified, 2 = approved, 1 = ranked, 0 = pending, -1 = WIP, -2 = graveyard
Approved int `json:"approved,string"`
// Date the beatmap was ranked, in UTC.
ApprovedDate string `json:"approved_date"`
// Date the beatmap was last updated, in UTC.
LastUpdate string `json:"last_update"`
// Artist of the song used in the beatmap.
Artist string `json:"artist"`
// ID of the beatmap.
BeatmapID string `json:"beatmap_id"`
// ID of the beatmap set the beatmap is contained in.
BeatmapSetID string `json:"beatmapset_id"`
// The BPM of the beatmap.
BPM int `json:"bpm,string"`
// The creator of the beatmap.
Creator string `json:"creator"`
// ID of the beatmap's creator.
CreatorID string `json:"creator_id"`
// The star rating of the beatmap.
DifficultyRating float64 `json:"difficultyrating,string"`
DifficultyAim float64 `json:"diff_aim,string"`
DifficultySpeed float64 `json:"diff_speed,string"`
// The circle size used in the beatmap.
CircleSize float64 `json:"diff_size,string"`
// The overall difficulty used in the beatmap.
OverallDifficulty float64 `json:"diff_overall,string"`
// The approach rate used in the beatmap.
ApproachRate float64 `json:"diff_approach,string"`
// The health drain used in the beatmap.
HealthDrain float64 `json:"diff_drain,string"`
// The number of seconds from the first note to the last note, not including breaks.
HitLength int `json:"hit_length,string"`
// The source of the song used in the beatmap.
Source string `json:"source"`
// ID of the genre of the song used in the beatmap.
// 0 = any, 1 = unspecified, 2 = video game, 3 = anime, 4 = rock, 5 = pop, 6 = other, 7 = novelty, 9 = hip hop, 10 = electronic (note that there's no 8)
GenreID int `json:"genre_id,string"`
// ID of the language used in the song used in the beatmap.
// 0 = any, 1 = other, 2 = english, 3 = japanese, 4 = chinese, 5 = instrumental, 6 = korean, 7 = french, 8 = german, 9 = swedish, 10 = spanish, 11 = italian
LanguageID int `json:"language_id,string"`
// The name of the song used in the beatmap.
Title string `json:"title"`
// The number of seconds from the first note to the last note, including breaks.
TotalLength int `json:"total_length,string"`
// The name of the beatmap's difficulty.
Version string `json:"version"`
// MD5 hash of the beatmap.
FileMD5 string `json:"file_md5"`
// The game mode the beatmap utilizes.
// 0 = standard, 1 = taiko, 2 = ctb, 3 = mania
Mode string `json:"mode"`
// The tags of the beatmap separated by spaces.
Tags string `json:"tags"`
// The number of times the beatmap has been favored.
FavouriteCount int `json:"favourite_count,string"`
// The number of times the beatmap has been played.
PlayCount int `json:"playcount,string"`
// The number of times the beatmap has been passed, completed.
PassCount int `json:"passcount,string"`
// The number of circles in the map.
CircleCount int `json:"count_normal,string"`
// The number of sliders in the map.
SliderCount int `json:"count_slider,string"`
// The number of spinners in the map.
SpinnerCount int `json:"count_spinner,string"`
// The maximum combo a user can reach playing the beatmap.
MaxCombo int `json:"max_combo,string"`
// If the download for this beatmap is unavailable (old map, etc.)
DownloadUnavailable int `json:"download_unavailable,string"`
// If the audio for this beatmap is unavailable (DMCA takedown, etc.)
AudioUnavailable int `json:"audio_unavailable,string"`
// API Call URL.
apiURL string
// Session fetched from
session *session
apiCall BeatmapCall
}
// FetchBeatmap returns metadata about one beatmap
func (s *session) FetchBeatmap(call BeatmapCall) (Beatmap, error) {
beatmap := *new([]Beatmap)
v := url.Values{}
v.Add(endpointAPIKey, s.key)
switch {
case call.BeatmapID != "":
v.Add(endpointParamBeatmapID, call.BeatmapID)
default:
return Beatmap{}, errors.New("no identifying param given (BeatmapID)")
}
if call.Mode != "" {
v.Add(endpointParamMode, call.Mode)
}
if call.Converted != "" {
v.Add(endpointParamConverted, call.Converted)
}
if call.Hash != "" {
v.Add(endpointParamHash, call.Hash)
}
err := s.parseJSON(s.buildCall(endpointBeatmaps, v), &beatmap)
if err != nil {
return Beatmap{}, err
}
if len(beatmap) == 0 {
return Beatmap{}, errors.New("no beatmaps found")
}
beatmap[0].apiURL = s.buildCall(endpointBeatmaps, v)
beatmap[0].session = s
return beatmap[0], nil
}
// Update updates a beatmap.
func (b *Beatmap) Update() error {
temp, err := b.session.FetchBeatmap(b.apiCall)
*b = temp
if err != nil {
return err
}
return nil
} | gosu/beatmap.go | 0.558809 | 0.416856 | beatmap.go | starcoder |
package ari
// Logging represents a communication path to an
// Asterisk server for working with logging resources
type Logging interface {
// Create creates a new log. The levels are a comma-separated list of
// logging levels on which this channel should operate. The name of the
// channel should be the key's ID.
Create(key *Key, levels string) (*LogHandle, error)
// Data retrives the data for a logging channel
Data(key *Key) (*LogData, error)
// Data retrives the data for a logging channel
Get(key *Key) *LogHandle
// List the logs
List(filter *Key) ([]*Key, error)
// Rotate rotates the log
Rotate(key *Key) error
// Delete deletes the log
Delete(key *Key) error
}
// LogData represents the log data
type LogData struct {
// Key is the cluster-unique identifier for this logging channel
Key *Key `json:"key"`
// Name is the name of the logging channel
Name string `json:"channel"`
// Levels is a comma-separated list of logging levels for this channel
Levels string `json:"levels"`
// Type indicates the type of logs for this channel
Types string `json:"types"`
// Status indicates whether this logging channel is enabled
Status string `json:"status"`
}
// NewLogHandle builds a new log handle given the `Key` and `Logging`` client
func NewLogHandle(key *Key, l Logging) *LogHandle {
return &LogHandle{
key: key,
c: l,
}
}
// LogHandle provides an interface to manipulate a logging channel
type LogHandle struct {
key *Key
c Logging
}
// ID returns the ID (name) of the logging channel
func (l *LogHandle) ID() string {
return l.key.ID
}
// Key returns the Key of the logging channel
func (l *LogHandle) Key() *Key {
return l.key
}
// Data returns the data for the logging channel
func (l *LogHandle) Data() (*LogData, error) {
return l.c.Data(l.key)
}
// Rotate causes the logging channel's logfiles to be rotated
func (l *LogHandle) Rotate() error {
return l.c.Rotate(l.key)
}
// Delete removes the logging channel from Asterisk
func (l *LogHandle) Delete() error {
return l.c.Delete(l.key)
} | logging.go | 0.826887 | 0.405096 | logging.go | starcoder |
package main
import (
"unsafe"
"github.com/go-gl/gl/v3.3-core/gl"
"github.com/go-gl/mathgl/mgl32"
)
const (
triVerts = 3 // The number of vertices in a triangle.
floatSize = 4 // The size of a float32 in bytes.
positionAttribute = 1
colorAttribute = 1
positionElements = 3 // The number of floats describing a position(x, y, z).
colorElements = 3 // The number of floats describing a color(r, g, b).
/* Each vertex in the the triangle has two attributes position and color.
* Offsets enable the right geometry and colors rendered.
* vertex [ position[x, y, z], color[r, g, b] ]
* float index [ 0 1 2 3 4 5 ]
* float value [ x y z r g b ]
* offfset: ^ ^
* postion ____| |
* color __________|
*/
positionOffset = 0
colorOffset = positionElements * floatSize
)
// Window dimesnsions
var winWidth, winHeight int
type Triangle struct {
attributes int
data []float32
angle float32
}
func (t *Triangle) Draw() {
// Load the shader program into the rendering pipeline.
gl.UseProgram(program)
t.mvp()
// Bind to the data in the buffer
gl.BindVertexArray(vao)
// Render the data
gl.DrawArrays(gl.TRIANGLES, 0, int32(triVerts))
// Done with the buffer and program so unbind them
gl.BindVertexArray(0)
gl.UseProgram(0)
}
func (t *Triangle) Data() unsafe.Pointer {
// Return the address of the array containing all of the vertex data.
return gl.Ptr(t.data)
}
func (t *Triangle) Stride() int32 {
// Return the total number of bytes of data that describes each vertex.
return int32(triVerts * t.attributes * floatSize)
}
func (t *Triangle) Size() int {
// Return size of the data in number of bytes.
return len(t.data) * floatSize
}
func (t *Triangle) SetAngle(a float32) {
t.angle = a
}
func (t *Triangle) mvp() {
// Get 4x4 identity matrix for the model's transformations
model := mgl32.Ident4()
// Apply the change in angle to the model's set of transformations
model = mgl32.HomogRotate3DY(t.angle)
// Set the handle to point to the address of the model matrix.
gl.UniformMatrix4fv(modelIndex, 1, false, &model[0])
// Get 4x4 projection matrix with a 60 degree field of view, an aspect ratio
// of the window dimensions, near clipping plane, and a far clipping plane.
projection := mgl32.Perspective(
mgl32.DegToRad(40.0), float32(winWidth/winHeight), 0.1, -1.0,
)
// Set the handle to point to the address of the projection matrix.
gl.UniformMatrix4fv(projectionIndex, 1, false, &projection[0])
// Get 4x4 view matrix with an eye position, target position,
// and the up direction with a positive bias in the y-axis.
// Right-handed coordinate system.
view := mgl32.LookAtV(
mgl32.Vec3{0, 1, 2}, mgl32.Vec3{0, 0, 0}, mgl32.Vec3{0, 1, 0},
)
// Set the handle to point to the address of the view matrix.
gl.UniformMatrix4fv(viewIndex, 1, false, &view[0])
} | gtk-examples/glarea/triangle.go | 0.824002 | 0.717358 | triangle.go | starcoder |
package box2d
import (
"fmt"
"math"
)
/// Wheel joint definition. This requires defining a line of
/// motion using an axis and an anchor point. The definition uses local
/// anchor points and a local axis so that the initial configuration
/// can violate the constraint slightly. The joint translation is zero
/// when the local anchor points coincide in world space. Using local
/// anchors and a local axis helps when saving and loading a game.
type B2WheelJointDef struct {
B2JointDef
/// The local anchor point relative to bodyA's origin.
LocalAnchorA B2Vec2
/// The local anchor point relative to bodyB's origin.
LocalAnchorB B2Vec2
/// The local translation axis in bodyA.
LocalAxisA B2Vec2
/// Enable/disable the joint limit.
EnableLimit bool
/// The lower translation limit, usually in meters.
LowerTranslation float64
/// The upper translation limit, usually in meters.
UpperTranslation float64
/// Enable/disable the joint motor.
EnableMotor bool
/// The maximum motor torque, usually in N-m.
MaxMotorTorque float64
/// The desired motor speed in radians per second.
MotorSpeed float64
/// Suspension stiffness. Typically in units N/m.
Stiffness float64
/// Suspension damping. Typically in units of N*s/m.
Damping float64
}
func MakeB2WheelJointDef() B2WheelJointDef {
res := B2WheelJointDef{
B2JointDef: MakeB2JointDef(),
}
res.Type = B2JointType.E_wheelJoint
res.LocalAnchorA.SetZero()
res.LocalAnchorB.SetZero()
res.LocalAxisA.Set(1.0, 0.0)
res.EnableLimit = false
res.LowerTranslation = 0.0
res.UpperTranslation = 0.0
res.EnableMotor = false
res.MaxMotorTorque = 0.0
res.MotorSpeed = 0.0
res.Stiffness = 0.0
res.Damping = 0.0
return res
}
/// A wheel joint. This joint provides two degrees of freedom: translation
/// along an axis fixed in bodyA and rotation in the plane. In other words, it is a point to
/// line constraint with a rotational motor and a linear spring/damper. The spring/damper is
/// initialized upon creation. This joint is designed for vehicle suspensions.
type B2WheelJoint struct {
*B2Joint
M_localAnchorA B2Vec2
M_localAnchorB B2Vec2
M_localXAxisA B2Vec2
M_localYAxisA B2Vec2
M_impulse float64
M_motorImpulse float64
M_springImpulse float64
M_lowerImpulse float64
M_upperImpulse float64
M_translation float64
M_lowerTranslation float64
M_upperTranslation float64
M_maxMotorTorque float64
M_motorSpeed float64
M_enableLimit bool
M_enableMotor bool
M_stiffness float64
M_damping float64
// Solver temp
M_indexA int
M_indexB int
M_localCenterA B2Vec2
M_localCenterB B2Vec2
M_invMassA float64
M_invMassB float64
M_invIA float64
M_invIB float64
M_ax B2Vec2
M_ay B2Vec2
M_sAx float64
M_sBx float64
M_sAy float64
M_sBy float64
M_mass float64
M_motorMass float64
M_axialMass float64
M_springMass float64
M_bias float64
M_gamma float64
}
/// The local anchor point relative to bodyA's origin.
func (joint B2WheelJoint) GetLocalAnchorA() B2Vec2 {
return joint.M_localAnchorA
}
/// The local anchor point relative to bodyB's origin.
func (joint B2WheelJoint) GetLocalAnchorB() B2Vec2 {
return joint.M_localAnchorB
}
/// The local joint axis relative to bodyA.
func (joint B2WheelJoint) GetLocalAxisA() B2Vec2 {
return joint.M_localXAxisA
}
func (joint B2WheelJoint) GetMotorSpeed() float64 {
return joint.M_motorSpeed
}
func (joint B2WheelJoint) GetMaxMotorTorque() float64 {
return joint.M_maxMotorTorque
}
// Linear constraint (point-to-line)
// d = pB - pA = xB + rB - xA - rA
// C = dot(ay, d)
// Cdot = dot(d, cross(wA, ay)) + dot(ay, vB + cross(wB, rB) - vA - cross(wA, rA))
// = -dot(ay, vA) - dot(cross(d + rA, ay), wA) + dot(ay, vB) + dot(cross(rB, ay), vB)
// J = [-ay, -cross(d + rA, ay), ay, cross(rB, ay)]
// Spring linear constraint
// C = dot(ax, d)
// Cdot = = -dot(ax, vA) - dot(cross(d + rA, ax), wA) + dot(ax, vB) + dot(cross(rB, ax), vB)
// J = [-ax -cross(d+rA, ax) ax cross(rB, ax)]
// Motor rotational constraint
// Cdot = wB - wA
// J = [0 0 -1 0 0 1]
func (def *B2WheelJointDef) Initialize(bA *B2Body, bB *B2Body, anchor B2Vec2, axis B2Vec2) {
def.BodyA = bA
def.BodyB = bB
def.LocalAnchorA = def.BodyA.GetLocalPoint(anchor)
def.LocalAnchorB = def.BodyB.GetLocalPoint(anchor)
def.LocalAxisA = def.BodyA.GetLocalVector(axis)
}
func MakeB2WheelJoint(def *B2WheelJointDef) *B2WheelJoint {
res := B2WheelJoint{
B2Joint: MakeB2Joint(def),
}
res.M_localAnchorA = def.LocalAnchorA
res.M_localAnchorB = def.LocalAnchorB
res.M_localXAxisA = def.LocalAxisA
res.M_localYAxisA = B2Vec2CrossScalarVector(1.0, res.M_localXAxisA)
res.M_mass = 0.0
res.M_impulse = 0.0
res.M_motorMass = 0.0
res.M_motorImpulse = 0.0
res.M_springMass = 0.0
res.M_springImpulse = 0.0
res.M_axialMass = 0.0
res.M_lowerImpulse = 0.0
res.M_upperImpulse = 0.0
res.M_lowerTranslation = def.LowerTranslation
res.M_upperTranslation = def.UpperTranslation
res.M_enableLimit = def.EnableLimit
res.M_maxMotorTorque = def.MaxMotorTorque
res.M_motorSpeed = def.MotorSpeed
res.M_enableMotor = def.EnableMotor
res.M_bias = 0.0
res.M_gamma = 0.0
res.M_ax.SetZero()
res.M_ay.SetZero()
res.M_stiffness = def.Stiffness
res.M_damping = def.Damping
return &res
}
func (joint *B2WheelJoint) InitVelocityConstraints(data B2SolverData) {
joint.M_indexA = joint.M_bodyA.M_islandIndex
joint.M_indexB = joint.M_bodyB.M_islandIndex
joint.M_localCenterA = joint.M_bodyA.M_sweep.LocalCenter
joint.M_localCenterB = joint.M_bodyB.M_sweep.LocalCenter
joint.M_invMassA = joint.M_bodyA.M_invMass
joint.M_invMassB = joint.M_bodyB.M_invMass
joint.M_invIA = joint.M_bodyA.M_invI
joint.M_invIB = joint.M_bodyB.M_invI
mA := joint.M_invMassA
mB := joint.M_invMassB
iA := joint.M_invIA
iB := joint.M_invIB
cA := data.Positions[joint.M_indexA].C
aA := data.Positions[joint.M_indexA].A
vA := data.Velocities[joint.M_indexA].V
wA := data.Velocities[joint.M_indexA].W
cB := data.Positions[joint.M_indexB].C
aB := data.Positions[joint.M_indexB].A
vB := data.Velocities[joint.M_indexB].V
wB := data.Velocities[joint.M_indexB].W
qA := MakeB2RotFromAngle(aA)
qB := MakeB2RotFromAngle(aB)
// Compute the effective masses.
rA := B2RotVec2Mul(qA, B2Vec2Sub(joint.M_localAnchorA, joint.M_localCenterA))
rB := B2RotVec2Mul(qB, B2Vec2Sub(joint.M_localAnchorB, joint.M_localCenterB))
d := B2Vec2Sub(B2Vec2Sub(B2Vec2Add(cB, rB), cA), rA)
// Point to line constraint
{
joint.M_ay = B2RotVec2Mul(qA, joint.M_localYAxisA)
joint.M_sAy = B2Vec2Cross(B2Vec2Add(d, rA), joint.M_ay)
joint.M_sBy = B2Vec2Cross(rB, joint.M_ay)
joint.M_mass = mA + mB + iA*joint.M_sAy*joint.M_sAy + iB*joint.M_sBy*joint.M_sBy
if joint.M_mass > 0.0 {
joint.M_mass = 1.0 / joint.M_mass
}
}
// Spring constraint
joint.M_ax = B2RotVec2Mul(qA, joint.M_localXAxisA)
joint.M_sAx = B2Vec2Cross(B2Vec2Add(d, rA), joint.M_ax)
joint.M_sBx = B2Vec2Cross(rB, joint.M_ax)
invMass := mA + mB + iA*joint.M_sAx*joint.M_sAx + iB*joint.M_sBx*joint.M_sBx
if invMass > 0.0 {
joint.M_axialMass = 1.0 / invMass
} else {
joint.M_axialMass = 0.0
}
joint.M_springMass = 0.0
joint.M_bias = 0.0
joint.M_gamma = 0.0
if joint.M_stiffness > 0.0 && invMass > 0.0 {
joint.M_springMass = 1.0 / invMass
C := B2Vec2Dot(d, joint.M_ax)
// magic formulas
h := data.Step.Dt
joint.M_gamma = h * (joint.M_damping + h*joint.M_stiffness)
if joint.M_gamma > 0.0 {
joint.M_gamma = 1.0 / joint.M_gamma
}
joint.M_bias = C * h * joint.M_stiffness * joint.M_gamma
joint.M_springMass = invMass + joint.M_gamma
if joint.M_springMass > 0.0 {
joint.M_springMass = 1.0 / joint.M_springMass
}
} else {
joint.M_springImpulse = 0.0
}
if joint.M_enableLimit {
joint.M_translation = B2Vec2Dot(joint.M_ax, d)
} else {
joint.M_lowerImpulse = 0.0
joint.M_upperImpulse = 0.0
}
if joint.M_enableMotor {
joint.M_motorMass = iA + iB
if joint.M_motorMass > 0.0 {
joint.M_motorMass = 1.0 / joint.M_motorMass
}
} else {
joint.M_motorMass = 0.0
joint.M_motorImpulse = 0.0
}
if data.Step.WarmStarting {
// Account for variable time step.
joint.M_impulse *= data.Step.DtRatio
joint.M_springImpulse *= data.Step.DtRatio
joint.M_motorImpulse *= data.Step.DtRatio
axialImpulse := joint.M_springImpulse + joint.M_lowerImpulse - joint.M_upperImpulse
P := B2Vec2Add(B2Vec2MulScalar(joint.M_impulse, joint.M_ay), B2Vec2MulScalar(axialImpulse, joint.M_ax))
LA := joint.M_impulse*joint.M_sAy + axialImpulse*joint.M_sAx + joint.M_motorImpulse
LB := joint.M_impulse*joint.M_sBy + axialImpulse*joint.M_sBx + joint.M_motorImpulse
vA.OperatorMinusInplace(B2Vec2MulScalar(joint.M_invMassA, P))
wA -= joint.M_invIA * LA
vB.OperatorPlusInplace(B2Vec2MulScalar(joint.M_invMassB, P))
wB += joint.M_invIB * LB
} else {
joint.M_impulse = 0.0
joint.M_springImpulse = 0.0
joint.M_motorImpulse = 0.0
joint.M_lowerImpulse = 0.0
joint.M_upperImpulse = 0.0
}
data.Velocities[joint.M_indexA].V = vA
data.Velocities[joint.M_indexA].W = wA
data.Velocities[joint.M_indexB].V = vB
data.Velocities[joint.M_indexB].W = wB
}
func (joint *B2WheelJoint) SolveVelocityConstraints(data B2SolverData) {
mA := joint.M_invMassA
mB := joint.M_invMassB
iA := joint.M_invIA
iB := joint.M_invIB
vA := data.Velocities[joint.M_indexA].V
wA := data.Velocities[joint.M_indexA].W
vB := data.Velocities[joint.M_indexB].V
wB := data.Velocities[joint.M_indexB].W
// Solve spring constraint
{
Cdot := B2Vec2Dot(joint.M_ax, B2Vec2Sub(vB, vA)) + joint.M_sBx*wB - joint.M_sAx*wA
impulse := -joint.M_springMass * (Cdot + joint.M_bias + joint.M_gamma*joint.M_springImpulse)
joint.M_springImpulse += impulse
P := B2Vec2MulScalar(impulse, joint.M_ax)
LA := impulse * joint.M_sAx
LB := impulse * joint.M_sBx
vA.OperatorMinusInplace(B2Vec2MulScalar(mA, P))
wA -= iA * LA
vB.OperatorPlusInplace(B2Vec2MulScalar(mB, P))
wB += iB * LB
}
// Solve rotational motor constraint
{
Cdot := wB - wA - joint.M_motorSpeed
impulse := -joint.M_motorMass * Cdot
oldImpulse := joint.M_motorImpulse
maxImpulse := data.Step.Dt * joint.M_maxMotorTorque
joint.M_motorImpulse = B2FloatClamp(joint.M_motorImpulse+impulse, -maxImpulse, maxImpulse)
impulse = joint.M_motorImpulse - oldImpulse
wA -= iA * impulse
wB += iB * impulse
}
if joint.M_enableLimit {
// Lower limit
{
C := joint.M_translation - joint.M_lowerTranslation
Cdot := B2Vec2Dot(joint.M_ax, B2Vec2Sub(vB, vA)) + joint.M_sBx*wB - joint.M_sAx*wA
impulse := -joint.M_axialMass * (Cdot + math.Max(C, 0.0)*data.Step.Inv_dt)
oldImpulse := joint.M_lowerImpulse
joint.M_lowerImpulse = math.Max(joint.M_lowerImpulse+impulse, 0.0)
impulse = joint.M_lowerImpulse - oldImpulse
P := B2Vec2MulScalar(impulse, joint.M_ax)
LA := impulse * joint.M_sAx
LB := impulse * joint.M_sBx
vA.OperatorMinusInplace(B2Vec2MulScalar(mA, P))
wA -= iA * LA
vB.OperatorPlusInplace(B2Vec2MulScalar(mB, P))
wB += iB * LB
}
// Upper limit
// Note: signs are flipped to keep C positive when the constraint is satisfied.
// This also keeps the impulse positive when the limit is active.
{
C := joint.M_upperTranslation - joint.M_translation
Cdot := B2Vec2Dot(joint.M_ax, B2Vec2Sub(vA, vB)) + joint.M_sAx*wA - joint.M_sBx*wB
impulse := -joint.M_axialMass * (Cdot + math.Max(C, 0.0)*data.Step.Inv_dt)
oldImpulse := joint.M_upperImpulse
joint.M_upperImpulse = math.Max(joint.M_upperImpulse+impulse, 0.0)
impulse = joint.M_upperImpulse - oldImpulse
P := B2Vec2MulScalar(impulse, joint.M_ax)
LA := impulse * joint.M_sAx
LB := impulse * joint.M_sBx
vA.OperatorPlusInplace(B2Vec2MulScalar(mA, P))
wA += iA * LA
vB.OperatorMinusInplace(B2Vec2MulScalar(mB, P))
wB -= iB * LB
}
}
// Solve point to line constraint
{
Cdot := B2Vec2Dot(joint.M_ay, B2Vec2Sub(vB, vA)) + joint.M_sBy*wB - joint.M_sAy*wA
impulse := -joint.M_mass * Cdot
joint.M_impulse += impulse
P := B2Vec2MulScalar(impulse, joint.M_ay)
LA := impulse * joint.M_sAy
LB := impulse * joint.M_sBy
vA.OperatorMinusInplace(B2Vec2MulScalar(mA, P))
wA -= iA * LA
vB.OperatorPlusInplace(B2Vec2MulScalar(mB, P))
wB += iB * LB
}
data.Velocities[joint.M_indexA].V = vA
data.Velocities[joint.M_indexA].W = wA
data.Velocities[joint.M_indexB].V = vB
data.Velocities[joint.M_indexB].W = wB
}
func (joint *B2WheelJoint) SolvePositionConstraints(data B2SolverData) bool {
cA := data.Positions[joint.M_indexA].C
aA := data.Positions[joint.M_indexA].A
cB := data.Positions[joint.M_indexB].C
aB := data.Positions[joint.M_indexB].A
linearError := 0.0
if joint.M_enableLimit {
qA := MakeB2RotFromAngle(aA)
qB := MakeB2RotFromAngle(aB)
rA := B2RotVec2Mul(qA, B2Vec2Sub(joint.M_localAnchorA, joint.M_localCenterA))
rB := B2RotVec2Mul(qB, B2Vec2Sub(joint.M_localAnchorB, joint.M_localCenterB))
d := B2Vec2Sub(B2Vec2Add(B2Vec2Sub(cB, cA), rB), rA)
ax := B2RotVec2Mul(qA, joint.M_localXAxisA)
sAx := B2Vec2Cross(B2Vec2Add(d, rA), joint.M_ax)
sBx := B2Vec2Cross(rB, joint.M_ax)
C := 0.0
translation := B2Vec2Dot(ax, d)
if math.Abs(joint.M_upperTranslation-joint.M_lowerTranslation) < 2.0*B2_linearSlop {
C = translation
} else if translation <= joint.M_lowerTranslation {
C = math.Min(translation-joint.M_lowerTranslation, 0.0)
} else if translation >= joint.M_upperTranslation {
C = math.Max(translation-joint.M_upperTranslation, 0.0)
}
if C != 0.0 {
invMass := joint.M_invMassA + joint.M_invMassB + joint.M_invIA*sAx*sAx + joint.M_invIB*sBx*sBx
impulse := 0.0
if invMass != 0.0 {
impulse = -C / invMass
}
P := B2Vec2MulScalar(impulse, ax)
LA := impulse * sAx
LB := impulse * sBx
cA.OperatorMinusInplace(B2Vec2MulScalar(joint.M_invMassA, P))
aA -= joint.M_invIA * LA
cB.OperatorPlusInplace(B2Vec2MulScalar(joint.M_invMassB, P))
aB += joint.M_invIB * LB
linearError = math.Abs(C)
}
}
// Solve perpendicular constraint
{
qA := MakeB2RotFromAngle(aA)
qB := MakeB2RotFromAngle(aB)
rA := B2RotVec2Mul(qA, B2Vec2Sub(joint.M_localAnchorA, joint.M_localCenterA))
rB := B2RotVec2Mul(qB, B2Vec2Sub(joint.M_localAnchorB, joint.M_localCenterB))
d := B2Vec2Sub(B2Vec2Add(B2Vec2Sub(cB, cA), rB), rA)
ay := B2RotVec2Mul(qA, joint.M_localYAxisA)
sAy := B2Vec2Cross(B2Vec2Add(d, rA), ay)
sBy := B2Vec2Cross(rB, ay)
C := B2Vec2Dot(d, ay)
invMass := joint.M_invMassA + joint.M_invMassB + joint.M_invIA*joint.M_sAy*joint.M_sAy + joint.M_invIB*joint.M_sBy*joint.M_sBy
impulse := 0.0
if invMass != 0.0 {
impulse = -C / invMass
}
P := B2Vec2MulScalar(impulse, ay)
LA := impulse * sAy
LB := impulse * sBy
cA.OperatorMinusInplace(B2Vec2MulScalar(joint.M_invMassA, P))
aA -= joint.M_invIA * LA
cB.OperatorPlusInplace(B2Vec2MulScalar(joint.M_invMassB, P))
aB += joint.M_invIB * LB
linearError = math.Max(linearError, math.Abs(C))
}
data.Positions[joint.M_indexA].C = cA
data.Positions[joint.M_indexA].A = aA
data.Positions[joint.M_indexB].C = cB
data.Positions[joint.M_indexB].A = aB
return linearError <= B2_linearSlop
}
func (joint B2WheelJoint) GetAnchorA() B2Vec2 {
return joint.M_bodyA.GetWorldPoint(joint.M_localAnchorA)
}
func (joint B2WheelJoint) GetAnchorB() B2Vec2 {
return joint.M_bodyB.GetWorldPoint(joint.M_localAnchorB)
}
func (joint B2WheelJoint) GetReactionForce(inv_dt float64) B2Vec2 {
return B2Vec2MulScalar(inv_dt, B2Vec2Add(B2Vec2MulScalar(joint.M_impulse, joint.M_ay), B2Vec2MulScalar(joint.M_springImpulse+joint.M_lowerImpulse-joint.M_upperImpulse, joint.M_ax)))
}
func (joint B2WheelJoint) GetReactionTorque(inv_dt float64) float64 {
return inv_dt * joint.M_motorImpulse
}
func (joint B2WheelJoint) GetJointTranslation() float64 {
bA := joint.M_bodyA
bB := joint.M_bodyB
pA := bA.GetWorldPoint(joint.M_localAnchorA)
pB := bB.GetWorldPoint(joint.M_localAnchorB)
d := B2Vec2Sub(pB, pA)
axis := bA.GetWorldVector(joint.M_localXAxisA)
translation := B2Vec2Dot(d, axis)
return translation
}
func (joint B2WheelJoint) GetJointLinearSpeed() float64 {
bA := joint.M_bodyA
bB := joint.M_bodyB
rA := B2RotVec2Mul(bA.M_xf.Q, B2Vec2Sub(joint.M_localAnchorA, bA.M_sweep.LocalCenter))
rB := B2RotVec2Mul(bB.M_xf.Q, B2Vec2Sub(joint.M_localAnchorB, bB.M_sweep.LocalCenter))
p1 := B2Vec2Add(bA.M_sweep.C, rA)
p2 := B2Vec2Add(bB.M_sweep.C, rB)
d := B2Vec2Sub(p2, p1)
axis := B2RotVec2Mul(bA.M_xf.Q, joint.M_localXAxisA)
vA := bA.M_linearVelocity
vB := bB.M_linearVelocity
wA := bA.M_angularVelocity
wB := bB.M_angularVelocity
speed := B2Vec2Dot(d, B2Vec2CrossScalarVector(wA, axis)) + B2Vec2Dot(axis, B2Vec2Sub(B2Vec2Sub(B2Vec2Add(vB, B2Vec2CrossScalarVector(wB, rB)), vA), B2Vec2CrossScalarVector(wA, rA)))
return speed
}
func (joint B2WheelJoint) GetJointAngle() float64 {
bA := joint.M_bodyA
bB := joint.M_bodyB
return bB.M_sweep.A - bA.M_sweep.A
}
func (joint B2WheelJoint) GetJointAngularSpeed() float64 {
wA := joint.M_bodyA.M_angularVelocity
wB := joint.M_bodyB.M_angularVelocity
return wB - wA
}
/// Is the joint limit enabled?
func (joint B2WheelJoint) IsLimitEnabled() bool {
return joint.M_enableLimit
}
/// Enable/disable the joint translation limit.
func (joint B2WheelJoint) EnableLimit(flag bool) {
if flag != joint.M_enableLimit {
joint.M_bodyA.SetAwake(true)
joint.M_bodyB.SetAwake(true)
joint.M_enableLimit = flag
joint.M_lowerImpulse = 0.0
joint.M_upperImpulse = 0.0
}
}
/// Get the lower joint translation limit, usually in meters.
func (joint B2WheelJoint) GetLowerLimit() float64 {
return joint.M_lowerTranslation
}
/// Get the upper joint translation limit, usually in meters.
func (joint B2WheelJoint) GetUpperLimit() float64 {
return joint.M_upperTranslation
}
/// Set the joint translation limits, usually in meters.
func (joint B2WheelJoint) SetLimits(lower float64, upper float64) {
B2Assert(lower <= upper)
if lower != joint.M_lowerTranslation || upper != joint.M_upperTranslation {
joint.M_bodyA.SetAwake(true)
joint.M_bodyB.SetAwake(true)
joint.M_lowerTranslation = lower
joint.M_upperTranslation = upper
joint.M_lowerImpulse = 0.0
joint.M_upperImpulse = 0.0
}
}
func (joint B2WheelJoint) IsMotorEnabled() bool {
return joint.M_enableMotor
}
func (joint *B2WheelJoint) EnableMotor(flag bool) {
if flag != joint.M_enableMotor {
joint.M_bodyA.SetAwake(true)
joint.M_bodyB.SetAwake(true)
joint.M_enableMotor = flag
}
}
func (joint *B2WheelJoint) SetMotorSpeed(speed float64) {
if speed != joint.M_motorSpeed {
joint.M_bodyA.SetAwake(true)
joint.M_bodyB.SetAwake(true)
joint.M_motorSpeed = speed
}
}
func (joint *B2WheelJoint) SetMaxMotorTorque(torque float64) {
if torque != joint.M_maxMotorTorque {
joint.M_bodyA.SetAwake(true)
joint.M_bodyB.SetAwake(true)
joint.M_maxMotorTorque = torque
}
}
func (joint B2WheelJoint) GetMotorTorque(inv_dt float64) float64 {
return inv_dt * joint.M_motorImpulse
}
/// Access spring stiffness
func (joint *B2WheelJoint) SetStiffness(stiffness float64) {
joint.M_stiffness = stiffness
}
/// Access spring stiffness
func (joint B2WheelJoint) GetStiffness() float64 {
return joint.M_stiffness
}
/// Access damping
func (joint *B2WheelJoint) SetDamping(damping float64) {
joint.M_damping = damping
}
/// Access damping
func (joint B2WheelJoint) GetDamping() float64 {
return joint.M_damping
}
func (joint *B2WheelJoint) Dump() {
indexA := joint.M_bodyA.M_islandIndex
indexB := joint.M_bodyB.M_islandIndex
fmt.Printf(" b2WheelJointDef jd;\n")
fmt.Printf(" jd.bodyA = bodies[%d];\n", indexA)
fmt.Printf(" jd.bodyB = bodies[%d];\n", indexB)
fmt.Printf(" jd.collideConnected = bool(%v);\n", joint.M_collideConnected)
fmt.Printf(" jd.localAnchorA.Set(%.15f, %.15f);\n", joint.M_localAnchorA.X, joint.M_localAnchorA.Y)
fmt.Printf(" jd.localAnchorB.Set(%.15f, %.15f);\n", joint.M_localAnchorB.X, joint.M_localAnchorB.Y)
fmt.Printf(" jd.localAxisA.Set(%.15f, %.15f);\n", joint.M_localXAxisA.X, joint.M_localXAxisA.Y)
fmt.Printf(" jd.enableMotor = bool(%v);\n", joint.M_enableMotor)
fmt.Printf(" jd.motorSpeed = %.15f;\n", joint.M_motorSpeed)
fmt.Printf(" jd.maxMotorTorque = %.15f;\n", joint.M_maxMotorTorque)
fmt.Printf(" jd.jd.stiffness = %.15f;\n", joint.M_stiffness)
fmt.Printf(" jd.damping = %.15f;\n", joint.M_damping)
fmt.Printf(" joints[%d] = m_world.CreateJoint(&jd);\n", joint.M_index)
} | DynamicsB2JointWheel.go | 0.895922 | 0.772187 | DynamicsB2JointWheel.go | starcoder |
package wadlib
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/sha1"
"encoding/binary"
"errors"
"fmt"
)
type WADFile struct {
ContentRecord
RawData []byte
}
func (w *WAD) LoadData(data []byte) error {
// Each content within the data section is aligned to a 0x40/64-byte boundary.
r := readable{
data: data,
}
contents := w.TMD.Contents
// TODO: We naively assume that the index will be accurately indexed from 0.
// All observed Nintendo files follow this, but external tools may not follow this format.
// We should most likely apply max index validation and sort,
// otherwise data will read out of order in these cases.
// All data contents will be the same amount as the number of contents per TMD.
wads := make([]WADFile, len(contents))
for _, content := range contents {
// It's okay to cast this from a uint64 as the WAD file format
// cannot exceed the maximum uint32 value within the data section.
// We read aligned to 16 bytes as the encrypted data is stored with padding.
// Not all contents meet the expected 16-byte boundary.
paddedSize := uint32(content.Size)
leftover := paddedSize % 16
if leftover != 0 {
paddedSize += 16 - leftover
}
// Read the padded amount as aligned to 64 bytes.
encryptedData := r.getRange(paddedSize)
file := WADFile{
ContentRecord: content,
RawData: encryptedData,
}
wads[file.Index] = file
}
w.Data = wads
return nil
}
func (w *WAD) GetData() []byte {
var data []byte
for _, content := range w.Data {
// Data internally is aligned by 64 bytes.
data = append(data, pad(content.RawData)...)
}
return data
}
func (d *WADFile) DecryptData(titleKey [16]byte) error {
content := d.ContentRecord
// The title's decrypted key will be what we'll decrypt with.
block, err := aes.NewCipher(titleKey[:])
if err != nil {
return err
}
// The IV we'll use will be the two bytes sourced from the content's index,
// padded with 14 null bytes.
var indexBytes [2]byte
binary.BigEndian.PutUint16(indexBytes[:], content.Index)
iv := make([]byte, 16)
iv[0] = indexBytes[0]
iv[1] = indexBytes[1]
blockMode := cipher.NewCBCDecrypter(block, iv)
// The resulting decrypted contents is the same size as the input, including padding.
decryptedData := make([]byte, len(d.RawData))
// ...and we're off!
blockMode.CryptBlocks(decryptedData, d.RawData)
// Trim off the excess padding once decrypted.
decryptedData = decryptedData[:content.Size]
// Ensure that the decrypted data matches the SHA-1 hash given in the contents list.
sha := sha1.Sum(decryptedData)
if bytes.Compare(sha[:], content.Hash[:]) != 0 {
return errors.New(fmt.Sprintf("content %08x did not match the noted hash when decrypted", content.ID))
}
// We're all set!
d.RawData = decryptedData
return nil
}
func (d *WADFile) EncryptData(titleKey [16]byte) error {
content := d.ContentRecord
// The title's decrypted key will be what we'll encrypt with.
block, err := aes.NewCipher(titleKey[:])
if err != nil {
return err
}
// The IV we'll use will be the two bytes sourced from the content's index,
// padded with 14 null bytes.
var indexBytes [2]byte
binary.BigEndian.PutUint16(indexBytes[:], content.Index)
iv := make([]byte, 16)
iv[0] = indexBytes[0]
iv[1] = indexBytes[1]
blockMode := cipher.NewCBCEncrypter(block, iv)
// One must encrypt content to 16 bytes.
// We pad with null bytes.
paddedSize := uint32(content.Size)
leftover := paddedSize % 16
if leftover != 0 {
paddedSize += 16 - leftover
}
decryptedData := make([]byte, paddedSize)
copy(decryptedData, d.RawData)
// The resulting encrypted contents is the same size as our adjusted input, including padding.
encryptedData := make([]byte, len(decryptedData))
// ...and we're off!
blockMode.CryptBlocks(encryptedData, decryptedData)
// Update the content record to reflect the hash of our origin content.
sha := sha1.Sum(d.RawData)
d.Hash = sha
// We're all set!
d.RawData = encryptedData
return nil
} | file.go | 0.501465 | 0.4165 | file.go | starcoder |
package binary
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"errors"
"math"
"strings"
)
const (
left = iota
right
)
// MerkleTree is a binary tree with hash values.
type MerkleTree struct {
Parent *MerkleTree
Left *MerkleTree
Right *MerkleTree
Hash []byte
}
// AuditPath is the shortest list of additional nodes in the Merkle tree
// required to compute the root hash for that tree.
type AuditPath struct {
Path [][]byte
Order []int
}
func hash(data []byte) []byte {
hash := sha256.Sum256(data)
return hash[:]
}
func hashString(hash []byte) string {
return hex.EncodeToString(hash)
}
// New builds a new Merkle hash tree using the data. If the date is empty
// then the hash value of the root node is the hash of an empty string.
func New(data ...[]byte) *MerkleTree {
n := len(data)
if n == 0 {
return &MerkleTree{Hash: hash([]byte{})}
}
var subTree func([][]byte) *MerkleTree
subTree = func(data [][]byte) *MerkleTree {
n := len(data)
// leaf node
if n == 1 {
return &MerkleTree{Hash: hash(data[0])}
}
parent := &MerkleTree{}
k := int(math.Exp2(math.Ceil(math.Log2(float64(n)) - 1)))
left := subTree(data[0:k])
right := subTree(data[k:n])
left.Parent = parent
right.Parent = parent
parent.Left = left
parent.Right = right
parent.Hash = hash(append(left.Hash, right.Hash...))
return parent
}
return subTree(data)
}
// findleaf finds the leaf node with the same hash value. If not find then
// nil will be returned.
func (mt *MerkleTree) findLeaf(hash []byte) *MerkleTree {
if mt == nil {
return nil
}
if mt.Left == nil && mt.Right == nil && bytes.Equal(mt.Hash, hash) {
return mt
}
leaf := mt.Left.findLeaf(hash)
if leaf == nil {
leaf = mt.Right.findLeaf(hash)
}
return leaf
}
// GetAuditPath returns a Merkle audit path for a leaf node. The audit path
// proofs the hash value of the data belongs to a leaf node.
func (mt *MerkleTree) GetAuditPath(data []byte) (*AuditPath, error) {
node := mt.findLeaf(hash(data))
if node == nil {
return nil, errors.New("failed to find leaf node")
}
ap := &AuditPath{}
for !bytes.Equal(node.Hash, mt.Hash) {
if node.Parent.Left == node {
ap.Path = append(ap.Path, node.Parent.Right.Hash)
ap.Order = append(ap.Order, right)
} else {
ap.Path = append(ap.Path, node.Parent.Left.Hash)
ap.Order = append(ap.Order, left)
}
node = node.Parent
}
return ap, nil
}
// IsValid checks if an audit path is valid (the data's hash is a leaf of
// the Merkle hash tree).
func (ap *AuditPath) IsValid(data []byte, rootHash []byte) bool {
if ap == nil {
return false
}
h := hash(data)
for i, p := range ap.Path {
if ap.Order[i] == left {
h = hash(append(p, h...))
} else {
h = hash(append(h, p...))
}
}
return bytes.Equal(rootHash, h)
}
// Pretty returns a format string slice for Merkle hash tree as ASCII text.
// nodeWidth is the leading number of hash value.
func (mt *MerkleTree) Pretty(nodeWidth int) []string {
if mt == nil || nodeWidth < 1 {
return []string{}
}
leftBranchNum := 0 // number of leftmost branches
for leftRoot := mt.Left; leftRoot != nil; leftRoot = leftRoot.Left {
leftBranchNum++
}
/*
* * ** *** ****
* / \ / \ / \ / \
* * * / \ / \ / \
* ** ** *** *** / \
* **** ****
*
* nodeWidth = 1 2 3 4
* offset = 1 1 2 2
* branches = {1,3,7,15...} {2,4,9,19...} {2,5,11,23...} {3,6,13,27...}
*/
offset := int(math.Ceil(float64(nodeWidth) / 2))
branches := make([]int, leftBranchNum) // branch lengths at different heights
length := nodeWidth/2 + 1
for i := range branches {
if i == 0 {
branches[i] = length
continue
}
branches[i] = length + i + offset
length += branches[i]
}
/*
* treeHeight = 1 2 3 4
*
* nodeWidth = 1 --> canvasMaxHeight = 1 3 7 15
* branches = {1,3,7,15...} canvasMaxWidth = 1 5 13 29
*
* nodewidth = 2 --> canvasMaxHeight = 1 4 9 19
* branches = {2,4,9,19...} canvasMaxWidth = 2 7 17 37
*
* nodeWidth = 3 --> canvasMaxHeight = 1 4 10 22
* branches = {2,5,11,23...} canvasMaxWidth = 3 9 21 45
*
* nodeWidth = 4 --> canvasMaxHeight = 1 5 12 26
* branches = {3,6,13,27...} canvasMaxWidth = 4 11 25 53
*/
canvasMaxHeight := 1
canvasMaxWidth := nodeWidth
for i, b := range branches {
if i == 0 {
canvasMaxWidth = offset*2 - 1
}
canvasMaxHeight += b + 1
canvasMaxWidth += (b + 1) * 2
}
canvas := make([][]byte, canvasMaxHeight)
for i := range canvas {
canvas[i] = make([]byte, canvasMaxWidth)
for j := range canvas[i] {
canvas[i][j] = ' '
}
}
// limit the scope of the canvas
maxX := 0
minY := canvasMaxWidth - 1
maxY := 0
var draw func(*MerkleTree, int, int)
draw = func(mt *MerkleTree, x int, y int) {
copy(canvas[x][y:y+nodeWidth], hashString(mt.Hash)[0:nodeWidth])
if mt.Parent != nil {
if mt == mt.Parent.Left {
if x > maxX {
maxX = x
}
if y < minY {
minY = y
}
} else {
if y+nodeWidth-1 > maxY {
maxY = y + nodeWidth - 1
}
}
}
if mt.Left == nil {
return
}
rightBranchNum := 0
for childRoot := mt.Right; childRoot.Left != nil; childRoot = childRoot.Left {
rightBranchNum++
}
length := branches[rightBranchNum]
/*
* ****
* / \
* / \
* / \
* / \
* / \
* / \
* **** ****
* / \ / \
* / \ / \
* / \ / \
* **** **** **** ****
*/
lx := x + 1
ly := y + offset - 2
if mt.Parent != nil && mt == mt.Parent.Right {
ly += (nodeWidth + 1) % 2
}
for lx <= x+length {
canvas[lx][ly] = '/'
lx++
ly--
}
draw(mt.Left, lx, ly-offset+1)
rx := x + 1
ry := y + offset
if mt.Parent != nil && mt == mt.Parent.Right {
ry += (nodeWidth + 1) % 2
}
for rx <= x+length {
canvas[rx][ry] = '\\'
rx++
ry++
}
draw(mt.Right, rx, ry-int(math.Ceil(float64(nodeWidth-1)/2)))
}
draw(mt, 0, len(canvas[0])/2-offset+1)
canvas = canvas[:maxX+1]
for i := range canvas {
canvas[i] = canvas[i][minY : maxY+1]
}
canvasStrinng := make([]string, len(canvas))
for i, c := range canvas {
canvasStrinng[i] = strings.TrimRight(string(c), " ")
}
return canvasStrinng
}
// PrettyString returns a format string for Merkle hash tree as ASCII text.
// nodeWidth is the leading number of hash value.
func (mt *MerkleTree) PrettyString(nodeWidth int) string {
return strings.Join(mt.Pretty(nodeWidth), "\n")
} | binary/binary.go | 0.720467 | 0.40698 | binary.go | starcoder |
package main
import (
"fmt"
"sort"
"github.com/rolfschmidt/advent-of-code-2021/helper"
)
func main() {
fmt.Println("Part 1", Part1())
fmt.Println("Part 2", Part2())
}
func Part1() int {
return Run(false)
}
func Part2() int {
return Run(true)
}
type Point struct {
value int
x int
y int
}
func (p Point) low(matrix [][]Point) bool {
for y := -1; y < 2; y++ {
cx := p.x
cy := p.y + y
if (cx == p.x && cy == p.y) || !PointExist(matrix, cx, cy) {
continue
}
if matrix[cy][cx].value <= p.value {
return false
}
}
for x := -1; x < 2; x++ {
cx := p.x + x
cy := p.y
if (cx == p.x && cy == p.y) || !PointExist(matrix, cx, cy) {
continue
}
if matrix[cy][cx].value <= p.value {
return false
}
}
return true
}
func (p Point) risk() int {
return p.value + 1
}
func PointExist(matrix [][]Point, x int , y int) bool {
if x < 0 || y < 0 || y > len(matrix) - 1 || x > len(matrix[y]) - 1 {
return false
}
return true
}
func Checked(checked map[string]bool, x int , y int) bool {
if _, ok := checked[helper.Int2String(x) + "," + helper.Int2String(y)]; ok {
return true
}
return false
}
func Basin(matrix [][]Point, px int, py int, checked map[string]bool) (int, map[string]bool) {
if !PointExist(matrix, px, py) || Checked(checked, px, py) || matrix[py][px].value == 9 {
checked[helper.Int2String(px) + "," + helper.Int2String(py)] = true
return 0, checked
}
result := 1
checked[helper.Int2String(px) + "," + helper.Int2String(py)] = true
// right
basin := 0
basin, checked = Basin(matrix, px + 1, py, checked)
result += basin
// left
basin = 0
basin, checked = Basin(matrix, px - 1, py, checked)
result += basin
// up
basin = 0
basin, checked = Basin(matrix, px, py + 1, checked)
result += basin
// down
basin = 0
basin, checked = Basin(matrix, px, py - 1, checked)
result += basin
return result, checked
}
func Run(Part2 bool) int {
matrix := [][]Point{}
for y, line := range helper.ReadFile("input.txt") {
line := helper.Split(line, "")
linePoints := []Point{}
for x, v := range line {
linePoints = append(linePoints, Point{ value: helper.String2Int(v), x: x, y: y })
}
matrix = append(matrix, linePoints)
}
result := 0
if !Part2 {
for _, line := range matrix {
for _, point := range line {
if point.low(matrix) {
result += point.risk()
}
}
}
} else {
basins := []int{}
for _, line := range matrix {
for _, point := range line {
if point.low(matrix) {
basin, _ := Basin(matrix, point.x, point.y, map[string]bool{})
basins = append(basins, basin)
}
}
}
sort.Ints(basins)
return basins[len(basins) - 1] * basins[len(basins) - 2] * basins[len(basins) - 3]
}
return result
} | day09/main.go | 0.648355 | 0.49884 | main.go | starcoder |
package man
import (
. "github.com/gocircuit/circuit/gocircuit.org/render"
)
func RenderRunPage() string {
figs := A{
"FigTwoHosts": RenderFigurePngSvg("A circuit system of two hosts (i.e. two circuit servers).", "servers", "400px"),
}
return RenderHtml("Running Circuit servers", Render(runBody, figs))
}
const runBody = `
<h1>Running Circuit servers</h1>
<p>Circuit servers can be started asynchronously (and in any order)
using the command
<pre>
circuit start -if eth0 -discover 192.168.3.11:7711
</pre>
<p>The same command is used for all instances. The <code>-if</code> option specifies the
desired network interface to bind to, while the <code>-discover</code> command
specifies a desired IP address of a UDP multicast channel to be used for automatic
server-server discover.
The <code>-discover</code> option can be omitted by setting the environment variable
<code>CIRCUIT_DISCOVER</code> to equal the desired multicast address.
<h2>Alternative advanced server startup</h2>
<p>To run the circuit server on the first machine, pick a public IP address and port for it to
listen on, and start it like so
<pre>
circuit start -a 10.0.0.1:11022
</pre>
<p>The circuit server will print its own circuit URL on its standard output.
It should look like this:
<pre>
circuit://10.0.0.1:11022/78517/Q56e7a2a0d47a7b5d
</pre>
<p>Copy it. We will need it to tell the next circuit server to “join” this one
in a network, i.e. circuit.
<p>Log onto another machine and similarly start a circuit server there, as well.
This time, use the <code>-j</code> option to tell the new server to join the first one:
<pre>
circuit start -a 10.0.0.2:11088 -j circuit://10.0.0.1:11022/78517/Q56e7a2a0d47a7b5d
</pre>
<p>You now have two mutually-aware circuit servers, running on two different
hosts in your cluster.
{{.FigTwoHosts}}
<p>You can join any number of additional hosts to the circuit environment in a
similar fashion, even billions. The circuit uses a modern
<a href="http://en.wikipedia.org/wiki/Expander_graph">expander graph</a>-based algorithm for
presence awareness and ordered communication, which is genuinely distributed;
It uses communication and connectivity sparingly, hardly leaving a footprint
when idle.
` | gocircuit.org/man/run.go | 0.69035 | 0.523786 | run.go | starcoder |
package tuple
import (
"math"
)
type Tuple [4]float64
func New(x, y, z, w float64) Tuple {
return Tuple{x, y, z, w}
}
func Point(x, y, z float64) Tuple {
return Tuple{x, y, z, 1.0}
}
func Vector(x, y, z float64) Tuple {
return Tuple{x, y, z, 0.0}
}
// TODO should color be its own type? Or is tuple fine?
// We will allocate more data than we need with 4 points?!?!?
// This is prob fine for now.
func Color(x, y, z float64) Tuple {
return Tuple{x, y, z, 0.0}
}
func (t Tuple) Add(x Tuple) Tuple {
return Tuple{t[0] + x[0], t[1] + x[1], t[2] + x[2], t[3] + x[3]}
}
func (t Tuple) Sub(x Tuple) Tuple {
return Tuple{t[0] - x[0], t[1] - x[1], t[2] - x[2], t[3] - x[3]}
}
func (t Tuple) SubScalar(x float64) Tuple {
return Tuple{t[0] - x, t[1] - x, t[2] - x, 1.0}
}
func (t Tuple) Negate() Tuple {
return Tuple{-t[0], -t[1], -t[2], -t[3]}
}
func (t Tuple) Mul(x float64) Tuple {
return Tuple{t[0] * x, t[1] * x, t[2] * x, t[3] * x}
}
func (t Tuple) ColorMul(x Tuple) Tuple {
return Tuple{t[0] * x[0], t[1] * x[1], t[2] * x[2], t[3] * x[3]}
}
func (t Tuple) Div(x float64) Tuple {
return Tuple{t[0] / x, t[1] / x, t[2] / x, t[3] / x}
}
func (a Tuple) TupleDiv(b Tuple) Tuple {
return Tuple{a[0] / b[0], a[1] / b[1], a[2] / b[2], 1.0}
}
func (t Tuple) Magnitude() float64 {
return math.Sqrt(t[0]*t[0] + t[1]*t[1] + t[2]*t[2] + t[3]*t[3])
}
func (t Tuple) Normalize() Tuple {
return t.Div(t.Magnitude())
}
func (t Tuple) Dot(x Tuple) float64 {
return t[0]*x[0] + t[1]*x[1] + t[2]*x[2] + t[3]*x[3]
}
func (t Tuple) Cross(x Tuple) Tuple {
return Tuple{
t[1]*x[2] - t[2]*x[1],
t[2]*x[0] - t[0]*x[2],
t[0]*x[1] - t[1]*x[0],
0.0,
}
}
func (t Tuple) Reflect(n Tuple) Tuple {
return t.Sub(n.Mul(2 * t.Dot(n)))
}
func (a Tuple) Min(b Tuple) Tuple {
return Point(
math.Min(a[0], b[0]),
math.Min(a[1], b[1]),
math.Min(a[2], b[2]),
)
}
func (a Tuple) Max(b Tuple) Tuple {
return Point(
math.Max(a[0], b[0]),
math.Max(a[1], b[1]),
math.Max(a[2], b[2]),
)
} | tuple/tuple.go | 0.623148 | 0.760451 | tuple.go | starcoder |
package navigation
import (
"fmt"
"github.com/vmykhailyk/advent-of-code-2021/pkg/structures"
"math"
)
type Path []structures.Point
func (path Path) End() structures.Point {
return path[len(path)-1]
}
func (path Path) ContinueWith(point structures.Point) Path {
return append(path[0:len(path):len(path)], point)
}
func (path Path) HasPoint(point structures.Point) bool {
for _, p := range path {
if p == point {
return true
}
}
return false
}
func (path Path) LengthAt(plot structures.FlatValuePlot) int {
length := 0
for i, point := range path {
if i > 0 {
length += plot.Get(point)
}
}
return length
}
func BuildRiskLevelPlot(plot structures.FlatValuePlot, start structures.Point, end structures.Point) structures.FlatValuePlot {
maxX, maxY := plot.Dimensions()
distancePlot := structures.FlatValuesFromSpec(maxX, maxY, math.MaxInt)
buildDistancePlotV2(plot, distancePlot, start, end)
return distancePlot
}
func ScaleRiskPlot(plot structures.FlatValuePlot, size int) structures.FlatValuePlot {
maxX, maxY := plot.Dimensions()
newPlot := structures.FlatValuesFromSpec(maxX*size, maxY*size, 0)
plot.VisitAll(func(value int, point structures.Point) {
for x := 0; x < size; x++ {
for y := 0; y < size; y++ {
newValue := value + x + y
for newValue > 9 {
newValue -= 9
}
newPoint := structures.Point{X: point.X + (maxX * x), Y: point.Y + (maxY * y)}
//fmt.Printf("Point %v, %v\n", point, newPoint)
newPlot.Set(newPoint, newValue)
}
}
})
return newPlot
}
func buildDistancePlot(plot structures.FlatValuePlot, distancePlot structures.FlatValuePlot, path Path, end structures.Point) {
currentPoint := path.End()
currentRisk := path.LengthAt(plot)
previousRisk := distancePlot.Get(currentPoint)
if previousRisk > currentRisk {
distancePlot.Set(currentPoint, currentRisk)
if currentPoint == end {
return
} else {
plot.VisitAdjacent(currentPoint, func(value int, point structures.Point) {
if !path.HasPoint(point) {
buildDistancePlot(plot, distancePlot, path.ContinueWith(point), end)
}
})
}
}
}
func buildDistancePlotV2(plot structures.FlatValuePlot, distancePlot structures.FlatValuePlot, start, end structures.Point) {
visitQueue := make([]Path, 0)
visitQueue = append(visitQueue, Path{start})
i := 0
for len(visitQueue) > 0 {
i++
currentPath := visitQueue[0]
visitQueue = visitQueue[1:]
currentPoint := currentPath.End()
currentRisk := currentPath.LengthAt(plot)
previousRisk := distancePlot.Get(currentPoint)
//fmt.Printf("Visiting point %v, %v, %v\n", currentPoint, currentRisk, currentPath)
if previousRisk > currentRisk {
//fmt.Printf("Better path at %v, %v, %v\n", currentPoint, currentRisk, currentPath)
distancePlot.Set(currentPoint, currentRisk)
if currentPoint != end {
plot.VisitAdjacent(currentPoint, func(value int, point structures.Point) {
if !currentPath.HasPoint(point) {
//fmt.Printf("Planing visit: %v, %v\n", point, currentPath.ContinueWith(point))
visitQueue = append(visitQueue, currentPath.ContinueWith(point))
//fmt.Printf("Next Queue: %v\n", visitQueue)
}
})
} else {
//fmt.Printf("Reached end iteration: %v, risk: %v, path: %v\n", i, currentRisk, currentPath)
}
}
}
fmt.Printf("Total Iterations %v\n", i)
}
func LowestRiskPathLevel(plot structures.FlatValuePlot, end structures.Point) int {
return plot.Get(end)
} | pkg/submarine/navigation/path_finder.go | 0.523664 | 0.583381 | path_finder.go | starcoder |
package index
import "sort"
// LessFn is a function type used for evaluating
type LessFn func(s, t Track) bool
type trackSlice struct {
fn LessFn
tracks []Track
}
func (o *trackSlice) Len() int { return len(o.tracks) }
func (o *trackSlice) Swap(i, j int) { o.tracks[i], o.tracks[j] = o.tracks[j], o.tracks[i] }
func (o *trackSlice) Less(i, j int) bool { return o.fn(o.tracks[i], o.tracks[j]) }
// Sort sorts the slice of tracks using the given LessFn.
func Sort(tracks []Track, f LessFn) {
o := &trackSlice{f, tracks}
sort.Sort(o)
}
// SortByString returns a LessFn which orders Tracks using the GetString Attr on the given
// field.
func SortByString(field string) LessFn {
return func(s, t Track) bool {
return s.GetString(field) < t.GetString(field)
}
}
// SortByInt returns a LessFn which orders Tracks using the GetInt Attr on the given field.
func SortByInt(field string) LessFn {
return func(s, t Track) bool {
return s.GetInt(field) < t.GetInt(field)
}
}
// SortByTime returns a LessFn which orders Tracks using the GetTime Attr on the given field.
func SortByTime(field string) LessFn {
return func(s, t Track) bool {
return s.GetTime(field).Before(t.GetTime(field))
}
}
// MultiSort creates a LessFn for tracks using the given LessFns.
func MultiSort(fns ...LessFn) LessFn {
return func(s, t Track) bool {
for _, fn := range fns[:len(fns)-1] {
switch {
case fn(s, t):
return true
case fn(t, s):
return false
}
}
return fns[len(fns)-1](s, t)
}
}
// Swapper is an interface which defines the Swap method.
type Swaper interface {
// Swap the items at indices i and j.
Swap(i, j int)
}
// ParallelSort combines a sort.Interface implementation with a Swaper, and performs the same
// swap operations to w as they are applied to s.
func ParallelSort(s sort.Interface, w Swaper) sort.Interface {
return ¶llelSort{
Interface: s,
sw: w,
}
}
// parallelSort is a type allows for a Swapper implementation to be reordered in parallel
// to an implementation of sort.Interface.
type parallelSort struct {
sort.Interface
sw Swaper
}
// Swap implements Swapper (and sort.Interface) so that swaps are done on both the
// sort.Interface and Swapper.
func (p *parallelSort) Swap(i, j int) {
p.Interface.Swap(i, j)
p.sw.Swap(i, j)
}
// KeySlice attaches the methods of Swaper to []Key
type keySlice []Key
// Implements Swapper.
func (k keySlice) Swap(i, j int) { k[i], k[j] = k[j], k[i] }
func nameKeyMap(c Collection) map[string]Key {
m := make(map[string]Key)
for _, k := range c.Keys() {
m[c.Get(k).Name()] = k
}
return m
}
func names(c Collection) []string {
keys := c.Keys()
n := make([]string, 0, len(keys))
for _, k := range keys {
n = append(n, c.Get(k).Name())
}
return n
}
// SorkKeysByGroupName sorts the names of the given collection (in place). In particular, this
// assumes that g.Names() returns the actual internal representation of the listing.
func SortKeysByGroupName(c Collection) {
sort.Sort(ParallelSort(sort.StringSlice(names(c)), keySlice(c.Keys())))
} | index/sort.go | 0.834306 | 0.405508 | sort.go | starcoder |
package area
import (
"fmt"
"github.com/guillermo/terminal/char"
)
// Area represents a rectangular area of Chars.
// An empty Area is a valid one.
type Area struct {
Rows, Cols int
content [][]char.Charer
Fixed bool
}
// Size returns the current Size.
func (a *Area) Size() (rows, cols int) {
return a.Rows, a.Cols
}
// SetSize changes the current area size an sets the area to Fixed.
func (a *Area) SetSize(rows, cols int) {
a.Rows = rows
a.Cols = cols
a.Fixed = true
}
// Each iterates over each column and row. The rows and the columns starts with 1
func (a *Area) Each(fn func(Row, Col int, char char.Charer)) {
for r := 0; r < a.Rows; r++ {
for c := 0; c < a.Cols; c++ {
if len(a.content) <= r {
fn(r+1, c+1, nil)
continue
}
row := a.content[r]
if len(row) <= c {
fn(r+1, c+1, nil)
continue
}
fn(r+1, c+1, row[c])
}
}
}
// Set will change the given Char in the row,col position.
// If the area is fixed, it will return an error if a char is being set outside the area
// A row or col smaller than 1 will also return an error
func (a *Area) Set(row, col int, c char.Charer) error {
if row <= 0 || (a.Fixed && row > a.Rows) {
return fmt.Errorf("Invalid Row %d", row)
}
if col <= 0 || (a.Fixed && col > a.Cols) {
return fmt.Errorf("Invalid Col %d", col)
}
for len(a.content) < row {
a.content = append(a.content, make([]char.Charer, col))
}
for len(a.content[row-1]) < col {
a.content[row-1] = append(a.content[row-1], nil)
}
a.content[row-1][col-1] = c
if a.Cols < col {
a.Cols = col
}
if a.Rows < row {
a.Rows = row
}
return nil
}
// Get will return the character at the given position.
// A row or col smaller than 1 will return nil and error.
// For a fixed area a row and col bigger than the actual size will also return an error
func (a *Area) Get(Row, Col int) (char.Charer, error) {
if Row <= 0 || (a.Fixed && Row > a.Rows) {
return nil, fmt.Errorf("Invalid Row %d", Row)
}
if Col <= 0 || (a.Fixed && Col > a.Cols) {
return nil, fmt.Errorf("Invalid Col %d", Col)
}
if len(a.content) < Row {
return nil, nil
}
row := a.content[Row-1]
if len(row) < Col {
return nil, nil
}
return row[Col-1], nil
} | area/area.go | 0.784154 | 0.446796 | area.go | starcoder |
package world
import (
"fmt"
"github.com/g3n/engine/math32"
)
type Solid struct {
Id int
Sides []Side
Editor *Editor
}
type Side struct {
Id int
Plane Plane
Material string
UAxis UVTransform
VAxis UVTransform
Rotation float32
LightmapScale float32
SmoothingGroups bool
}
type UVTransform struct {
Transform math32.Vector4
Scale float32
}
type Editor struct {
Color math32.Vector3
visgroupShown bool
visGroupAutoShown bool
logicalPos math32.Vector2 // only exists on brush entities?
}
type Plane struct {
Normal math32.Vector3
Dist float32
Points [3]math32.Vector3
}
func NewSolid(id int, sides []Side, editor *Editor) *Solid {
return &Solid{
Id: id,
Sides: sides,
Editor: editor,
}
}
func NewSide(id int, plane Plane, material string, uAxis UVTransform, vAxis UVTransform, rotation float32, lightmapScale float32, smoothingGroups bool) *Side {
return &Side{
Id: id,
Plane: plane,
Material: material,
UAxis: uAxis,
VAxis: vAxis,
Rotation: rotation,
LightmapScale: lightmapScale,
SmoothingGroups: smoothingGroups,
}
}
func NewEditor(color math32.Vector3, visgroupShown bool, visgroupAutoShown bool) *Editor {
return &Editor{
Color: color,
visgroupShown: visgroupShown,
visGroupAutoShown: visgroupAutoShown,
}
}
func NewPlane(a math32.Vector3, b math32.Vector3, c math32.Vector3) *Plane {
x := b.Clone().Sub(&a)
y := c.Clone().Sub(&a)
normal := x.Clone().Cross(y).Normalize()
p := Plane{
Points: [3]math32.Vector3{a, b, c},
Normal: *normal,
}
p.Dist = p.Points[0].Dot(normal)
return &p
}
func NewPlaneFromString(marshalled string) *Plane {
var v1, v2, v3 = float32(0), float32(0), float32(0)
var v4, v5, v6 = float32(0), float32(0), float32(0)
var v7, v8, v9 = float32(0), float32(0), float32(0)
fmt.Sscanf(marshalled, "(%f %f %f) (%f %f %f) (%f %f %f)", &v1, &v2, &v3, &v4, &v5, &v6, &v7, &v8, &v9)
return NewPlane(
math32.Vector3{v1, v2, v3},
math32.Vector3{v4, v5, v6},
math32.Vector3{v7, v8, v9})
}
func NewUVTransform(transform math32.Vector4, scale float32) *UVTransform {
return &UVTransform{
Transform: transform,
Scale: scale,
}
}
func NewUVTransformFromString(marshalled string) *UVTransform {
var v1, v2, v3, v4 = float32(0), float32(0), float32(0), float32(0)
var scale = float32(0)
fmt.Sscanf(marshalled, "[%f %f %f %f] %f", &v1, &v2, &v3, &v4, &scale)
return NewUVTransform(math32.Vector4{v1, v2, v3, v4}, scale)
} | core/world/geometry.go | 0.769167 | 0.441673 | geometry.go | starcoder |
package filter
import (
"fmt"
"github.com/gocraft/dbr"
"github.com/sonm-io/marketplace/ds"
pb "github.com/sonm-io/marketplace/proto"
)
// Operator is used to indicate how to filter different values.
type Operator int
func (op Operator) String() string {
var res string
switch op {
case LessThan:
res = "<"
case LessEq:
res = "<="
case Equal:
res = "="
case NotEqual:
res = "!="
case GreaterEq:
res = ">="
case GreaterThan:
res = ">"
default:
res = "unknown"
}
return res
}
func (op Operator) Condition(column string, value interface{}) dbr.Condition {
switch op {
case LessThan:
return dbr.Lt(column, value)
case LessEq:
return dbr.Lte(column, value)
case Equal:
return dbr.Eq(column, value)
case NotEqual:
return dbr.Neq(column, value)
case GreaterEq:
return dbr.Gte(column, value)
case GreaterThan:
return dbr.Gt(column, value)
default:
panic("unsupported operator given: " + op.String())
}
}
const (
// LessThan shows that the field being filtered must be less than the provided value
LessThan Operator = iota
// LessEq shows that the the field being filtered must be less than or equal to the provided value
LessEq
// Equal shows that the field being filtered must be equal to the provided value
Equal
// NotEqual shows that the field being filtered must be not equal to the provided value
NotEqual
// GreaterEq shows that the field being filtered must be greater than or equal to the provided value
GreaterEq
// GreaterThan shows that the field being filtered must be greater than the provided value
GreaterThan
)
func MatchOrder(order ds.Order) (dbr.Condition, error) {
switch order.OrderType {
case pb.OrderType_ASK:
return MatchAsk(order), nil
case pb.OrderType_BID:
return MatchBid(order), nil
default:
return nil, fmt.Errorf("searching by any type is not supported")
}
}
func MatchBid(order ds.Order) dbr.Condition {
cond := IsBidOrder()
if order.ByuerID != "" {
cond = dbr.And(cond, BuyerID(order.ByuerID))
}
if order.SupplierID != "" {
cond = dbr.And(cond, SupplierID(order.SupplierID))
}
if order.Slot == nil {
return cond
}
slot := order.Slot
cond = dbr.And(cond,
GPUCount(LessEq, slot.Resources.GpuCount),
NetType(LessEq, slot.Resources.NetworkType),
)
if slot.Resources.CpuCores > 0 {
cond = dbr.And(cond, CPUCores(LessEq, slot.Resources.CpuCores))
}
if slot.Resources.RamBytes > 0 {
cond = dbr.And(cond, RamBytes(LessEq, slot.Resources.RamBytes))
}
if slot.Resources.Storage > 0 {
cond = dbr.And(cond, Storage(LessEq, slot.Resources.Storage))
}
if slot.Resources.NetTrafficIn > 0 {
cond = dbr.And(cond, NetTrafficIn(LessEq, slot.Resources.NetTrafficIn))
}
if slot.Resources.NetTrafficOut > 0 {
cond = dbr.And(cond, NetTrafficOut(LessEq, slot.Resources.NetTrafficOut))
}
return cond
}
func MatchAsk(order ds.Order) dbr.Condition {
cond := IsAskOrder()
if order.ByuerID != "" {
cond = dbr.And(cond, BuyerID(order.ByuerID))
}
if order.SupplierID != "" {
cond = dbr.And(cond, SupplierID(order.SupplierID))
}
if order.Slot == nil {
return cond
}
slot := order.Slot
cond = dbr.And(cond,
GPUCount(GreaterEq, slot.Resources.GpuCount),
NetType(GreaterEq, slot.Resources.NetworkType),
CPUCores(GreaterEq, slot.Resources.CpuCores),
RamBytes(GreaterEq, slot.Resources.RamBytes),
Storage(GreaterEq, slot.Resources.Storage),
NetTrafficIn(GreaterEq, slot.Resources.NetTrafficIn),
NetTrafficOut(GreaterEq, slot.Resources.NetTrafficOut),
)
return cond
}
func IsAskOrder() dbr.Condition {
return dbr.Eq("type", pb.OrderType_ASK)
}
func IsBidOrder() dbr.Condition {
return dbr.Eq("type", pb.OrderType_BID)
}
func BuyerID(ID string) dbr.Condition {
return dbr.Eq("buyer_id", ID)
}
func SupplierID(ID string) dbr.Condition {
return dbr.Eq("supplier_id", ID)
}
func CPUCores(op Operator, value uint64) dbr.Condition {
return op.Condition("resources_cpu_cores", value)
}
func GPUCount(op Operator, value pb.GPUCount) dbr.Condition {
return op.Condition("resources_gpu_count", value)
}
func RamBytes(op Operator, value uint64) dbr.Condition {
return op.Condition("resources_ram_bytes", value)
}
func Storage(op Operator, value uint64) dbr.Condition {
return op.Condition("resources_storage", value)
}
func NetType(op Operator, value pb.NetworkType) dbr.Condition {
return op.Condition("resources_net_type", value)
}
func NetTrafficIn(op Operator, value uint64) dbr.Condition {
return op.Condition("resources_net_inbound", value)
}
func NetTrafficOut(op Operator, value uint64) dbr.Condition {
return op.Condition("resources_net_outbound", value)
} | service/filter/filters.go | 0.764276 | 0.530054 | filters.go | starcoder |
package filter
import (
"math"
"sort"
"github.com/square/metrics/api"
)
type filterList struct {
index []int
value []float64
ascending bool
}
func (list filterList) Len() int {
return len(list.index)
}
func (list filterList) Less(i, j int) bool {
if math.IsNaN(list.value[i]) {
return false // NaN must go second
}
if math.IsNaN(list.value[j]) {
return true // NaN must go second
}
if list.ascending {
return list.value[i] < list.value[j]
}
return list.value[j] < list.value[i]
}
func (list filterList) Swap(i, j int) {
list.index[i], list.index[j] = list.index[j], list.index[i]
list.value[i], list.value[j] = list.value[j], list.value[i]
}
func sortSeries(series []api.Timeseries, summary func([]float64) float64, lowest bool) ([]api.Timeseries, []float64) {
array := filterList{
index: make([]int, len(series)),
value: make([]float64, len(series)),
ascending: lowest,
}
for i := range array.index {
array.index[i] = i
array.value[i] = summary(series[i].Values)
}
sort.Sort(array)
result := make([]api.Timeseries, len(series))
weights := make([]float64, len(series))
for i, index := range array.index {
result[i] = series[index]
weights[i] = array.value[i]
}
return result, weights
}
func sortSeriesRecent(list api.SeriesList, summary func([]float64) float64, lowest bool, slots int) ([]api.Timeseries, []float64) {
if slots < 1 {
slots = 1
}
return sortSeries(
list.Series,
func(values []float64) float64 {
if slots < len(values) {
return summary(values[len(values)-slots:])
}
return summary(values)
},
lowest,
)
}
// ByRecent reduces the number of things in the series `list` to at most the given `count`.
// However, it only considered recent points when evaluating their ordering.
func ByRecent(list api.SeriesList, count int, summary func([]float64) float64, lowest bool, slots int) api.SeriesList {
// Sort them by their recent points.
sorted, _ := sortSeriesRecent(list, summary, lowest, slots)
if len(sorted) < count {
// Limit the count to the number of available series
count = len(sorted)
}
return api.SeriesList{
Series: sorted[:count],
}
}
// ThresholdByRecent reduces the number of things in the series `list` to those whose `summar` is at at least/at most the threshold.
// However, it only considers the data points as recent as the duration permits.
func ThresholdByRecent(list api.SeriesList, threshold float64, summary func([]float64) float64, below bool, slots int) api.SeriesList {
sorted, values := sortSeriesRecent(list, summary, below, slots)
result := []api.Timeseries{}
for i := range sorted {
// Since the series are sorted, once one of them falls outside the threshold, we can stop.
if (below && values[i] > threshold) || (!below && values[i] < threshold) {
break
}
result = append(result, sorted[i])
}
return api.SeriesList{
Series: result,
}
} | function/builtin/filter/filter.go | 0.743168 | 0.419886 | filter.go | starcoder |
package gen
import (
"math"
"reflect"
"github.com/leanovate/gopter"
)
// Int64Range generates int64 numbers within a given range
func Int64Range(min, max int64) gopter.Gen {
if max < min {
return Fail(reflect.TypeOf(int64(0)))
}
if max == math.MaxInt64 && min == math.MinInt64 { // Check for range overflow
return func(genParams *gopter.GenParameters) *gopter.GenResult {
return gopter.NewGenResult(genParams.NextInt64(), Int64Shrinker)
}
}
rangeSize := uint64(max - min + 1)
return func(genParams *gopter.GenParameters) *gopter.GenResult {
var nextResult = uint64(min) + (genParams.NextUint64() % rangeSize)
genResult := gopter.NewGenResult(int64(nextResult), Int64Shrinker)
genResult.Sieve = func(v interface{}) bool {
return v.(int64) >= min && v.(int64) <= max
}
return genResult
}
}
// UInt64Range generates uint64 numbers within a given range
func UInt64Range(min, max uint64) gopter.Gen {
if max < min {
return Fail(reflect.TypeOf(uint64(0)))
}
d := max - min + 1
if d == 0 { // Check overflow (i.e. max = MaxInt64, min = MinInt64)
return func(genParams *gopter.GenParameters) *gopter.GenResult {
return gopter.NewGenResult(genParams.NextUint64(), UInt64Shrinker)
}
}
return func(genParams *gopter.GenParameters) *gopter.GenResult {
genResult := gopter.NewGenResult(min+genParams.NextUint64()%d, UInt64Shrinker)
genResult.Sieve = func(v interface{}) bool {
return v.(uint64) >= min && v.(uint64) <= max
}
return genResult
}
}
// Int64 generates an arbitrary int64 number
func Int64() gopter.Gen {
return Int64Range(math.MinInt64, math.MaxInt64)
}
// UInt64 generates an arbitrary Uint64 number
func UInt64() gopter.Gen {
return UInt64Range(0, math.MaxUint64)
}
// Int32Range generates int32 numbers within a given range
func Int32Range(min, max int32) gopter.Gen {
return Int64Range(int64(min), int64(max)).
Map(int64To32).
WithShrinker(Int32Shrinker).
SuchThat(func(v int32) bool {
return v >= min && v <= max
})
}
// UInt32Range generates uint32 numbers within a given range
func UInt32Range(min, max uint32) gopter.Gen {
return UInt64Range(uint64(min), uint64(max)).
Map(uint64To32).
WithShrinker(UInt32Shrinker).
SuchThat(func(v uint32) bool {
return v >= min && v <= max
})
}
// Int32 generate arbitrary int32 numbers
func Int32() gopter.Gen {
return Int32Range(math.MinInt32, math.MaxInt32)
}
// UInt32 generate arbitrary int32 numbers
func UInt32() gopter.Gen {
return UInt32Range(0, math.MaxUint32)
}
// Int16Range generates int16 numbers within a given range
func Int16Range(min, max int16) gopter.Gen {
return Int64Range(int64(min), int64(max)).
Map(int64To16).
WithShrinker(Int16Shrinker).
SuchThat(func(v int16) bool {
return v >= min && v <= max
})
}
// UInt16Range generates uint16 numbers within a given range
func UInt16Range(min, max uint16) gopter.Gen {
return UInt64Range(uint64(min), uint64(max)).
Map(uint64To16).
WithShrinker(UInt16Shrinker).
SuchThat(func(v uint16) bool {
return v >= min && v <= max
})
}
// Int16 generate arbitrary int16 numbers
func Int16() gopter.Gen {
return Int16Range(math.MinInt16, math.MaxInt16)
}
// UInt16 generate arbitrary uint16 numbers
func UInt16() gopter.Gen {
return UInt16Range(0, math.MaxUint16)
}
// Int8Range generates int8 numbers within a given range
func Int8Range(min, max int8) gopter.Gen {
return Int64Range(int64(min), int64(max)).
Map(int64To8).
WithShrinker(Int8Shrinker).
SuchThat(func(v int8) bool {
return v >= min && v <= max
})
}
// UInt8Range generates uint8 numbers within a given range
func UInt8Range(min, max uint8) gopter.Gen {
return UInt64Range(uint64(min), uint64(max)).
Map(uint64To8).
WithShrinker(UInt8Shrinker).
SuchThat(func(v uint8) bool {
return v >= min && v <= max
})
}
// Int8 generate arbitrary int8 numbers
func Int8() gopter.Gen {
return Int8Range(math.MinInt8, math.MaxInt8)
}
// UInt8 generate arbitrary uint8 numbers
func UInt8() gopter.Gen {
return UInt8Range(0, math.MaxUint8)
}
// IntRange generates int numbers within a given range
func IntRange(min, max int) gopter.Gen {
return Int64Range(int64(min), int64(max)).
Map(int64ToInt).
WithShrinker(IntShrinker).
SuchThat(func(v int) bool {
return v >= min && v <= max
})
}
// Int generate arbitrary int numbers
func Int() gopter.Gen {
return Int64Range(math.MinInt32, math.MaxInt32).
Map(int64ToInt).
WithShrinker(IntShrinker)
}
// UIntRange generates uint numbers within a given range
func UIntRange(min, max uint) gopter.Gen {
return UInt64Range(uint64(min), uint64(max)).
Map(uint64ToUint).
WithShrinker(UIntShrinker).
SuchThat(func(v uint) bool {
return v >= min && v <= max
})
}
// UInt generate arbitrary uint numbers
func UInt() gopter.Gen {
return UInt64Range(0, math.MaxUint32).
Map(uint64ToUint).
WithShrinker(UIntShrinker)
}
// Size just extracts the MaxSize field of the GenParameters.
// This can be helpful to generate limited integer value in a more structued
// manner.
func Size() gopter.Gen {
return func(genParams *gopter.GenParameters) *gopter.GenResult {
return gopter.NewGenResult(genParams.MaxSize, IntShrinker)
}
}
func int64To32(value int64) int32 {
return int32(value)
}
func uint64To32(value uint64) uint32 {
return uint32(value)
}
func int64To16(value int64) int16 {
return int16(value)
}
func uint64To16(value uint64) uint16 {
return uint16(value)
}
func int64To8(value int64) int8 {
return int8(value)
}
func uint64To8(value uint64) uint8 {
return uint8(value)
}
func int64ToInt(value int64) int {
return int(value)
}
func uint64ToUint(value uint64) uint {
return uint(value)
} | vendor/github.com/leanovate/gopter/gen/integers.go | 0.749087 | 0.443962 | integers.go | starcoder |
package input
import (
"context"
"crypto/tls"
"errors"
"fmt"
"strconv"
"strings"
"sync"
"time"
"github.com/Jeffail/benthos/v3/internal/checkpoint"
"github.com/Jeffail/benthos/v3/internal/component/input"
"github.com/Jeffail/benthos/v3/internal/docs"
"github.com/Jeffail/benthos/v3/lib/input/reader"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/message"
"github.com/Jeffail/benthos/v3/lib/message/batch"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/types"
"github.com/Jeffail/benthos/v3/lib/util/kafka/sasl"
btls "github.com/Jeffail/benthos/v3/lib/util/tls"
"github.com/Jeffail/gabs/v2"
"github.com/Shopify/sarama"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeKafka] = TypeSpec{
constructor: fromSimpleConstructor(NewKafka),
Summary: `
Connects to Kafka brokers and consumes one or more topics.`,
Description: `
Offsets are managed within Kafka under the specified consumer group, and partitions for each topic are automatically balanced across members of the consumer group.
The Kafka input allows parallel processing of messages from different topic partitions, but by default messages of the same topic partition are processed in lockstep in order to enforce ordered processing. This protection often means that batching messages at the output level can stall, in which case it can be tuned by increasing the field ` + "[`checkpoint_limit`](#checkpoint_limit)" + `, ideally to a value greater than the number of messages you expect to batch.
Alternatively, if you perform batching at the input level using the ` + "[`batching`](#batching)" + ` field it is done per-partition and therefore avoids stalling.
### Metadata
This input adds the following metadata fields to each message:
` + "``` text" + `
- kafka_key
- kafka_topic
- kafka_partition
- kafka_offset
- kafka_lag
- kafka_timestamp_unix
- All existing message headers (version 0.11+)
` + "```" + `
The field ` + "`kafka_lag`" + ` is the calculated difference between the high water mark offset of the partition at the time of ingestion and the current message offset.
You can access these metadata fields using [function interpolation](/docs/configuration/interpolation#metadata).`,
FieldSpecs: docs.FieldSpecs{
docs.FieldString(
"addresses", "A list of broker addresses to connect to. If an item of the list contains commas it will be expanded into multiple addresses.",
[]string{"localhost:9092"}, []string{"localhost:9041,localhost:9042"}, []string{"localhost:9041", "localhost:9042"},
).Array(),
docs.FieldString(
"topics",
"A list of topics to consume from. Multiple comma separated topics can be listed in a single element. Partitions are automatically distributed across consumers of a topic. Alternatively, it's possible to specify explicit partitions to consume from with a colon after the topic name, e.g. `foo:0` would consume the partition 0 of the topic foo. This syntax supports ranges, e.g. `foo:0-10` would consume partitions 0 through to 10 inclusive.",
[]string{"foo", "bar"},
[]string{"foo,bar"},
[]string{"foo:0", "bar:1", "bar:3"},
[]string{"foo:0,bar:1,bar:3"},
[]string{"foo:0-5"},
).AtVersion("3.33.0").Array(),
docs.FieldString("target_version", "The version of the Kafka protocol to use. This limits the capabilities used by the client and should ideally match the version of your brokers."),
btls.FieldSpec(),
sasl.FieldSpec(),
docs.FieldCommon("consumer_group", "An identifier for the consumer group of the connection. This field can be explicitly made empty in order to disable stored offsets for the consumed topic partitions."),
docs.FieldCommon("client_id", "An identifier for the client connection."),
docs.FieldAdvanced("rack_id", "A rack identifier for this client."),
docs.FieldAdvanced("start_from_oldest", "If an offset is not found for a topic partition, determines whether to consume from the oldest available offset, otherwise messages are consumed from the latest offset."),
docs.FieldCommon(
"checkpoint_limit", "EXPERIMENTAL: The maximum number of messages of the same topic and partition that can be processed at a given time. Increasing this limit enables parallel processing and batching at the output level to work on individual partitions. Any given offset will not be committed unless all messages under that offset are delivered in order to preserve at least once delivery guarantees.",
).AtVersion("3.33.0"),
docs.FieldAdvanced("commit_period", "The period of time between each commit of the current partition offsets. Offsets are always committed during shutdown."),
docs.FieldAdvanced("max_processing_period", "A maximum estimate for the time taken to process a message, this is used for tuning consumer group synchronization."),
input.ExtractTracingSpanMappingDocs,
docs.FieldAdvanced("group", "Tuning parameters for consumer group synchronization.").WithChildren(
docs.FieldAdvanced("session_timeout", "A period after which a consumer of the group is kicked after no heartbeats."),
docs.FieldAdvanced("heartbeat_interval", "A period in which heartbeats should be sent out."),
docs.FieldAdvanced("rebalance_timeout", "A period after which rebalancing is abandoned if unresolved."),
),
docs.FieldAdvanced("fetch_buffer_cap", "The maximum number of unprocessed messages to fetch at a given time."),
func() docs.FieldSpec {
b := batch.FieldSpec()
b.IsAdvanced = true
return b
}(),
// TODO: Remove V4
docs.FieldDeprecated("max_batch_count"),
docs.FieldDeprecated("topic").OmitWhen(func(field, parent interface{}) (string, bool) {
return "field topic is deprecated and should be omitted when topics is used",
len(gabs.Wrap(parent).S("topics").Children()) > 0
}),
docs.FieldDeprecated("partition").OmitWhen(func(field, parent interface{}) (string, bool) {
return "field partition is deprecated and should be omitted when topics is used",
len(gabs.Wrap(parent).S("topics").Children()) > 0
}),
},
Categories: []Category{
CategoryServices,
},
}
}
//------------------------------------------------------------------------------
// NewKafka creates a new Kafka input type.
func NewKafka(conf Config, mgr types.Manager, log log.Modular, stats metrics.Type) (Type, error) {
if !conf.Kafka.IsDeprecated() || len(conf.Kafka.Topics) > 0 {
var rdr reader.Async
var err error
if rdr, err = newKafkaReader(conf.Kafka, mgr, log, stats); err != nil {
return nil, err
}
if conf.Kafka.ExtractTracingMap != "" {
if rdr, err = input.NewSpanReader(TypeKafka, conf.Kafka.ExtractTracingMap, rdr, mgr, log); err != nil {
return nil, err
}
}
return NewAsyncReader(TypeKafka, false, reader.NewAsyncPreserver(rdr), log, stats)
}
// TODO: V4 Remove this.
if conf.Kafka.MaxBatchCount > 1 {
log.Warnf("Field '%v.max_batch_count' is deprecated, use '%v.batching.count' instead.\n", conf.Type, conf.Type)
conf.Kafka.Batching.Count = conf.Kafka.MaxBatchCount
}
log.Warnln("The kafka input has been revamped, falling back to the deprecated version. In order to use the new version use the field `topics`.")
k, err := reader.NewKafka(conf.Kafka, mgr, log, stats)
if err != nil {
return nil, err
}
var kb reader.Type = k
if !conf.Kafka.Batching.IsNoop() {
if kb, err = reader.NewSyncBatcher(conf.Kafka.Batching, k, mgr, log, stats); err != nil {
return nil, err
}
}
return NewReader(TypeKafka, reader.NewPreserver(kb), log, stats)
}
//------------------------------------------------------------------------------
type asyncMessage struct {
msg types.Message
ackFn reader.AsyncAckFn
}
type offsetMarker interface {
MarkOffset(topic string, partition int32, offset int64, metadata string)
}
type kafkaReader struct {
version sarama.KafkaVersion
tlsConf *tls.Config
addresses []string
topicPartitions map[string][]int32
balancedTopics []string
commitPeriod time.Duration
sessionTimeout time.Duration
heartbeatInterval time.Duration
rebalanceTimeout time.Duration
maxProcPeriod time.Duration
// Connection resources
cMut sync.Mutex
consumerCloseFn context.CancelFunc
consumerDoneCtx context.Context
msgChan chan asyncMessage
session offsetMarker
mRebalanced metrics.StatCounter
conf reader.KafkaConfig
stats metrics.Type
log log.Modular
mgr types.Manager
closeOnce sync.Once
closedChan chan struct{}
}
var errCannotMixBalanced = errors.New("it is not currently possible to include balanced and explicit partition topics in the same kafka input")
func parsePartitions(expr string) ([]int32, error) {
rangeExpr := strings.Split(expr, "-")
if len(rangeExpr) > 2 {
return nil, fmt.Errorf("partition '%v' is invalid, only one range can be specified", expr)
}
if len(rangeExpr) == 1 {
partition, err := strconv.ParseInt(expr, 10, 32)
if err != nil {
return nil, fmt.Errorf("failed to parse partition number: %w", err)
}
return []int32{int32(partition)}, nil
}
start, err := strconv.ParseInt(rangeExpr[0], 10, 32)
if err != nil {
return nil, fmt.Errorf("failed to parse start of range: %w", err)
}
end, err := strconv.ParseInt(rangeExpr[1], 10, 32)
if err != nil {
return nil, fmt.Errorf("failed to parse end of range: %w", err)
}
var parts []int32
for i := start; i <= end; i++ {
parts = append(parts, int32(i))
}
return parts, nil
}
func newKafkaReader(
conf reader.KafkaConfig, mgr types.Manager, log log.Modular, stats metrics.Type,
) (*kafkaReader, error) {
if conf.Batching.IsNoop() {
conf.Batching.Count = 1
}
k := kafkaReader{
conf: conf,
stats: stats,
consumerCloseFn: nil,
log: log,
mgr: mgr,
mRebalanced: stats.GetCounter("rebalanced"),
closedChan: make(chan struct{}),
topicPartitions: map[string][]int32{},
}
if conf.TLS.Enabled {
var err error
if k.tlsConf, err = conf.TLS.Get(); err != nil {
return nil, err
}
}
for _, addr := range conf.Addresses {
for _, splitAddr := range strings.Split(addr, ",") {
if trimmed := strings.TrimSpace(splitAddr); len(trimmed) > 0 {
k.addresses = append(k.addresses, trimmed)
}
}
}
if len(conf.Topics) == 0 {
return nil, errors.New("must specify at least one topic in the topics field")
}
for _, t := range conf.Topics {
for _, splitTopics := range strings.Split(t, ",") {
if trimmed := strings.TrimSpace(splitTopics); len(trimmed) > 0 {
if withParts := strings.Split(trimmed, ":"); len(withParts) > 1 {
if len(k.balancedTopics) > 0 {
return nil, errCannotMixBalanced
}
if len(withParts) > 2 {
return nil, fmt.Errorf("topic '%v' is invalid, only one partition should be specified and the same topic can be listed multiple times, e.g. use `foo:0,foo:1` not `foo:0:1`", trimmed)
}
topic := strings.TrimSpace(withParts[0])
parts, err := parsePartitions(withParts[1])
if err != nil {
return nil, err
}
k.topicPartitions[topic] = append(k.topicPartitions[topic], parts...)
} else {
if len(k.topicPartitions) > 0 {
return nil, errCannotMixBalanced
}
k.balancedTopics = append(k.balancedTopics, trimmed)
}
}
}
}
if tout := conf.CommitPeriod; len(tout) > 0 {
var err error
if k.commitPeriod, err = time.ParseDuration(tout); err != nil {
return nil, fmt.Errorf("failed to parse commit period string: %v", err)
}
}
if tout := conf.Group.SessionTimeout; len(tout) > 0 {
var err error
if k.sessionTimeout, err = time.ParseDuration(tout); err != nil {
return nil, fmt.Errorf("failed to parse session timeout string: %v", err)
}
}
if tout := conf.Group.HeartbeatInterval; len(tout) > 0 {
var err error
if k.heartbeatInterval, err = time.ParseDuration(tout); err != nil {
return nil, fmt.Errorf("failed to parse heartbeat interval string: %v", err)
}
}
if tout := conf.Group.RebalanceTimeout; len(tout) > 0 {
var err error
if k.rebalanceTimeout, err = time.ParseDuration(tout); err != nil {
return nil, fmt.Errorf("failed to parse rebalance timeout string: %v", err)
}
}
if tout := conf.MaxProcessingPeriod; len(tout) > 0 {
var err error
if k.maxProcPeriod, err = time.ParseDuration(tout); err != nil {
return nil, fmt.Errorf("failed to parse max processing period string: %v", err)
}
}
if conf.ConsumerGroup == "" && len(k.balancedTopics) > 0 {
return nil, errors.New("a consumer group must be specified when consuming balanced topics")
}
var err error
if k.version, err = sarama.ParseKafkaVersion(conf.TargetVersion); err != nil {
return nil, err
}
return &k, nil
}
//------------------------------------------------------------------------------
func (k *kafkaReader) asyncCheckpointer(topic string, partition int32) func(context.Context, chan<- asyncMessage, types.Message, int64) bool {
cp := checkpoint.NewCapped(int64(k.conf.CheckpointLimit))
return func(ctx context.Context, c chan<- asyncMessage, msg types.Message, offset int64) bool {
if msg == nil {
return true
}
resolveFn, err := cp.Track(ctx, offset, int64(msg.Len()))
if err != nil {
if err != types.ErrTimeout {
k.log.Errorf("Failed to checkpoint offset: %v\n", err)
}
return false
}
select {
case c <- asyncMessage{
msg: msg,
ackFn: func(ctx context.Context, res types.Response) error {
maxOffset := resolveFn()
if maxOffset == nil {
return nil
}
k.cMut.Lock()
if k.session != nil {
k.log.Debugf("Marking offset for topic '%v' partition '%v'.\n", topic, partition)
k.session.MarkOffset(topic, partition, maxOffset.(int64), "")
} else {
k.log.Debugf("Unable to mark offset for topic '%v' partition '%v'.\n", topic, partition)
}
k.cMut.Unlock()
return nil
},
}:
case <-ctx.Done():
return false
}
return true
}
}
func (k *kafkaReader) syncCheckpointer(topic string, partition int32) func(context.Context, chan<- asyncMessage, types.Message, int64) bool {
ackedChan := make(chan error)
return func(ctx context.Context, c chan<- asyncMessage, msg types.Message, offset int64) bool {
if msg == nil {
return true
}
select {
case c <- asyncMessage{
msg: msg,
ackFn: func(ctx context.Context, res types.Response) error {
resErr := res.Error()
if resErr == nil {
k.cMut.Lock()
if k.session != nil {
k.log.Debugf("Marking offset for topic '%v' partition '%v'.\n", topic, partition)
k.session.MarkOffset(topic, partition, offset, "")
} else {
k.log.Debugf("Unable to mark offset for topic '%v' partition '%v'.\n", topic, partition)
}
k.cMut.Unlock()
}
select {
case ackedChan <- resErr:
case <-ctx.Done():
}
return nil
},
}:
select {
case resErr := <-ackedChan:
if resErr != nil {
k.log.Errorf("Received error from message batch: %v, shutting down consumer.\n", resErr)
return false
}
case <-ctx.Done():
return false
}
case <-ctx.Done():
return false
}
return true
}
}
func dataToPart(highestOffset int64, data *sarama.ConsumerMessage) types.Part {
part := message.NewPart(data.Value)
meta := part.Metadata()
for _, hdr := range data.Headers {
meta.Set(string(hdr.Key), string(hdr.Value))
}
lag := highestOffset - data.Offset - 1
if lag < 0 {
lag = 0
}
meta.Set("kafka_key", string(data.Key))
meta.Set("kafka_partition", strconv.Itoa(int(data.Partition)))
meta.Set("kafka_topic", data.Topic)
meta.Set("kafka_offset", strconv.Itoa(int(data.Offset)))
meta.Set("kafka_lag", strconv.FormatInt(lag, 10))
meta.Set("kafka_timestamp_unix", strconv.FormatInt(data.Timestamp.Unix(), 10))
return part
}
//------------------------------------------------------------------------------
func (k *kafkaReader) closeGroupAndConsumers() {
k.cMut.Lock()
consumerCloseFn := k.consumerCloseFn
consumerDoneCtx := k.consumerDoneCtx
k.cMut.Unlock()
if consumerCloseFn != nil {
k.log.Debugln("Waiting for topic consumers to close.")
consumerCloseFn()
<-consumerDoneCtx.Done()
k.log.Debugln("Topic consumers are closed.")
}
k.closeOnce.Do(func() {
close(k.closedChan)
})
}
//------------------------------------------------------------------------------
// ConnectWithContext establishes a kafkaReader connection.
func (k *kafkaReader) ConnectWithContext(ctx context.Context) error {
k.cMut.Lock()
defer k.cMut.Unlock()
if k.msgChan != nil {
return nil
}
config := sarama.NewConfig()
config.ClientID = k.conf.ClientID
config.RackID = k.conf.RackID
config.Net.DialTimeout = time.Second
config.Version = k.version
config.Consumer.Return.Errors = true
config.Consumer.MaxProcessingTime = k.maxProcPeriod
config.Consumer.Offsets.AutoCommit.Enable = true
config.Consumer.Offsets.AutoCommit.Interval = k.commitPeriod
config.Consumer.Group.Session.Timeout = k.sessionTimeout
config.Consumer.Group.Heartbeat.Interval = k.heartbeatInterval
config.Consumer.Group.Rebalance.Timeout = k.rebalanceTimeout
config.ChannelBufferSize = k.conf.FetchBufferCap
if config.Net.ReadTimeout <= k.sessionTimeout {
config.Net.ReadTimeout = k.sessionTimeout * 2
}
if config.Net.ReadTimeout <= k.rebalanceTimeout {
config.Net.ReadTimeout = k.rebalanceTimeout * 2
}
config.Net.TLS.Enable = k.conf.TLS.Enabled
if k.conf.TLS.Enabled {
config.Net.TLS.Config = k.tlsConf
}
if k.conf.StartFromOldest {
config.Consumer.Offsets.Initial = sarama.OffsetOldest
}
if err := k.conf.SASL.Apply(k.mgr, config); err != nil {
return err
}
if len(k.topicPartitions) > 0 {
return k.connectExplicitTopics(ctx, config)
}
return k.connectBalancedTopics(ctx, config)
}
// ReadWithContext attempts to read a message from a kafkaReader topic.
func (k *kafkaReader) ReadWithContext(ctx context.Context) (types.Message, reader.AsyncAckFn, error) {
k.cMut.Lock()
msgChan := k.msgChan
k.cMut.Unlock()
if msgChan == nil {
return nil, nil, types.ErrNotConnected
}
select {
case m, open := <-msgChan:
if !open {
return nil, nil, types.ErrNotConnected
}
return m.msg, m.ackFn, nil
case <-ctx.Done():
}
return nil, nil, types.ErrTimeout
}
// CloseAsync shuts down the kafkaReader input and stops processing requests.
func (k *kafkaReader) CloseAsync() {
go k.closeGroupAndConsumers()
}
// WaitForClose blocks until the kafkaReader input has closed down.
func (k *kafkaReader) WaitForClose(timeout time.Duration) error {
select {
case <-k.closedChan:
case <-time.After(timeout):
return types.ErrTimeout
}
return nil
}
//------------------------------------------------------------------------------ | lib/input/kafka.go | 0.706089 | 0.595493 | kafka.go | starcoder |
package iso20022
// Account between an investor(s) and a fund manager or a fund. The account can contain holdings in any investment fund or investment fund class managed (or distributed) by the fund manager, within the same fund family.
type InvestmentAccount28 struct {
// Name of the account. It provides an additional means of identification, and is designated by the account servicer in agreement with the account owner.
Name *Max35Text `xml:"Nm,omitempty"`
// Supplementary registration information applying to a specific block of units for dealing and reporting purposes. The supplementary registration information may be used when all the units are registered, for example, to a funds supermarket, but holdings for each investor have to reconciled individually.
Designation *Max35Text `xml:"Dsgnt,omitempty"`
// Purpose of the account/source fund type. This is typically linked to an investment product, eg, wrapper, PEP, ISA.
Type *FundCashAccount3Code `xml:"Tp,omitempty"`
// Purpose of the account/source fund type. This is typically linked to an investment product, eg, wrapper, PEP, ISA.
ExtendedType *Extended350Code `xml:"XtndedTp,omitempty"`
// Ownership status of the account, eg, joint owners.
OwnershipType *AccountOwnershipType3Code `xml:"OwnrshTp,omitempty"`
// Ownership status of the account, eg, joint owners.
ExtendedOwnershipType *Extended350Code `xml:"XtndedOwnrshTp,omitempty"`
// Tax advantage specific to the account.
TaxExemptionReason *TaxExemptReason1Code `xml:"TaxXmptnRsn,omitempty"`
// Tax advantage specific to the account.
ExtendedTaxExemptionReason *Extended350Code `xml:"XtndedTaxXmptnRsn,omitempty"`
// Regularity at which a statement is issued.
StatementFrequency *EventFrequency1Code `xml:"StmtFrqcy,omitempty"`
// Regularity at which a statement is issued.
ExtendedStatementFrequency *Extended350Code `xml:"XtndedStmtFrqcy,omitempty"`
// Currency chosen for reporting purposes by the account owner in agreement with the account servicer.
ReferenceCurrency *ActiveCurrencyCode `xml:"RefCcy,omitempty"`
// Language for all communication concerning the account.
Language *LanguageCode `xml:"Lang,omitempty"`
// Dividend option chosen by the account owner based on the options offered in the prospectus.
IncomePreference *IncomePreference1Code `xml:"IncmPref,omitempty"`
// Method by which the tax (withholding tax) is to be processed i.e. either withheld at source or tax information reported to tax authorities or tax information is reported due to the provision of a tax certificate.
TaxWithholdingMethod *TaxWithholdingMethod1Code `xml:"TaxWhldgMtd,omitempty"`
// Reference of a letter of intent program, in which sales commissions are reduced based on the aggregate of a customer's actual purchase and anticipated purchases, over a specific period of time, and as agreed by the customer. A letter of intent program is mainly used in the US market.
LetterIntentReference *Max35Text `xml:"LttrInttRef,omitempty"`
// Reference of an accumulation rights program, in which sales commissions are based on a customer's present purchases of shares and the aggregate quantity previously purchased by the customer. An accumulation rights program is mainly used in the US market.
AccumulationRightReference *Max35Text `xml:"AcmltnRghtRef,omitempty"`
// Number of account owners or related parties required to authorise transactions on the account.
RequiredSignatoriesNumber *Number `xml:"ReqrdSgntriesNb,omitempty"`
// Name of the investment fund family.
FundFamilyName *Max350Text `xml:"FndFmlyNm,omitempty"`
// Detailed information about the investment fund associated to the account.
ModifiedFundDetails []*ModificationScope12 `xml:"ModfdFndDtls,omitempty"`
// Parameters to be applied on deal amount for orders when the amount is a fractional number.
RoundingDetails *RoundingParameters1 `xml:"RndgDtls,omitempty"`
// Party that manages the account on behalf of the account owner, that is manages the registration and booking of entries on the account, calculates balances on the account and provides information about the account.
AccountServicer *PartyIdentification2Choice `xml:"AcctSvcr,omitempty"`
}
func (i *InvestmentAccount28) SetName(value string) {
i.Name = (*Max35Text)(&value)
}
func (i *InvestmentAccount28) SetDesignation(value string) {
i.Designation = (*Max35Text)(&value)
}
func (i *InvestmentAccount28) SetType(value string) {
i.Type = (*FundCashAccount3Code)(&value)
}
func (i *InvestmentAccount28) SetExtendedType(value string) {
i.ExtendedType = (*Extended350Code)(&value)
}
func (i *InvestmentAccount28) SetOwnershipType(value string) {
i.OwnershipType = (*AccountOwnershipType3Code)(&value)
}
func (i *InvestmentAccount28) SetExtendedOwnershipType(value string) {
i.ExtendedOwnershipType = (*Extended350Code)(&value)
}
func (i *InvestmentAccount28) SetTaxExemptionReason(value string) {
i.TaxExemptionReason = (*TaxExemptReason1Code)(&value)
}
func (i *InvestmentAccount28) SetExtendedTaxExemptionReason(value string) {
i.ExtendedTaxExemptionReason = (*Extended350Code)(&value)
}
func (i *InvestmentAccount28) SetStatementFrequency(value string) {
i.StatementFrequency = (*EventFrequency1Code)(&value)
}
func (i *InvestmentAccount28) SetExtendedStatementFrequency(value string) {
i.ExtendedStatementFrequency = (*Extended350Code)(&value)
}
func (i *InvestmentAccount28) SetReferenceCurrency(value string) {
i.ReferenceCurrency = (*ActiveCurrencyCode)(&value)
}
func (i *InvestmentAccount28) SetLanguage(value string) {
i.Language = (*LanguageCode)(&value)
}
func (i *InvestmentAccount28) SetIncomePreference(value string) {
i.IncomePreference = (*IncomePreference1Code)(&value)
}
func (i *InvestmentAccount28) SetTaxWithholdingMethod(value string) {
i.TaxWithholdingMethod = (*TaxWithholdingMethod1Code)(&value)
}
func (i *InvestmentAccount28) SetLetterIntentReference(value string) {
i.LetterIntentReference = (*Max35Text)(&value)
}
func (i *InvestmentAccount28) SetAccumulationRightReference(value string) {
i.AccumulationRightReference = (*Max35Text)(&value)
}
func (i *InvestmentAccount28) SetRequiredSignatoriesNumber(value string) {
i.RequiredSignatoriesNumber = (*Number)(&value)
}
func (i *InvestmentAccount28) SetFundFamilyName(value string) {
i.FundFamilyName = (*Max350Text)(&value)
}
func (i *InvestmentAccount28) AddModifiedFundDetails() *ModificationScope12 {
newValue := new(ModificationScope12)
i.ModifiedFundDetails = append(i.ModifiedFundDetails, newValue)
return newValue
}
func (i *InvestmentAccount28) AddRoundingDetails() *RoundingParameters1 {
i.RoundingDetails = new(RoundingParameters1)
return i.RoundingDetails
}
func (i *InvestmentAccount28) AddAccountServicer() *PartyIdentification2Choice {
i.AccountServicer = new(PartyIdentification2Choice)
return i.AccountServicer
} | InvestmentAccount28.go | 0.757974 | 0.475301 | InvestmentAccount28.go | starcoder |
package models
// DataPRE is a Presentation
func ParseDataPRE(tokens []string) DataPRE {
pre := DataPRE{}
pre.Adsh = tokens[0]
pre.Report = parseInt(tokens[1])
pre.Line = parseInt(tokens[2])
pre.Stmt = tokens[3]
pre.Inpth = tokens[4]
pre.Tag = tokens[5]
pre.Version = tokens[6]
pre.Prole = tokens[7]
pre.Plabel = tokens[8]
pre.Negating = tokens[9] == "1"
return pre
}
type DataPRE struct {
/**
Accession Number. The 20-character string
formed from the 18-digit number assigned by
the Commission to each EDGAR submission.
*/
Adsh string `gorm:"index:idx_pres_adsh"`
/**
Represents the report grouping. The numeric
value refers to the "R file" as computed by
the renderer and posted on the EDGAR website.
Note that in some situations the numbers skip.
*/
Report int //`gorm:"index:idx_pre"`
/**
Represents the tag's presentation line order
for a given report. Together with the statement
and report field, presentation location,
order and grouping can be derived.
*/
Line int //`gorm:"index:idx_pre"`
/**
The financial statement location to which the value of the "report" field pertains.
(
CP = Cover Page, BS = Balance Sheet, IS = Income Statement,
CF = Cash Flow, EQ = Equity,
CI = Comprehensive Income, UN = Unclassifiable Statement).
*/
Stmt string
/**
1 indicates that the value was presented "parenthetically"
instead of in fields within the financial statements.
For example: Receivables (net of allowance for bad debts of $200 in 2012) $700.
*/
Inpth string
/**
The tag chosen by the filer for this line item.
*/
Tag string
/**
The taxonomy identifier if the tag is a standard tag, otherwise adsh.
*/
Version string
/**
The XBRL link "role" of the preferred label,
using only the portion of the role URI after the last "/".
*/
Prole string
/**
The text presented on the line item, also known as a "preferred" label.
*/
Plabel string
/**
Flag to indicate whether the prole is treated as negating by the renderer.
*/
Negating bool
} | models/pre.go | 0.709221 | 0.418162 | pre.go | starcoder |
package gofun
// BoolOrElse returns x if x is bool, otherwise y.
func BoolOrElse(x interface{}, y bool) bool {
z, isOk := x.(bool)
if isOk {
return z
} else {
return y
}
}
// ByteOrElse returns x if x is byte, otherwise y.
func ByteOrElse(x interface{}, y byte) byte {
z, isOk := x.(byte)
if isOk {
return z
} else {
return y
}
}
// Complex64OrElse returns x if x is complex64, otherwise y.
func Complex64OrElse(x interface{}, y complex64) complex64 {
z, isOk := x.(complex64)
if isOk {
return z
} else {
return y
}
}
// Complex128OrElse returns x if x is complex128, otherwise y.
func Complex128OrElse(x interface{}, y complex128) complex128 {
z, isOk := x.(complex128)
if isOk {
return z
} else {
return y
}
}
// ErrorOrElse returns x if x is error, otherwise y.
func ErrorOrElse(x interface{}, y error) error {
z, isOk := x.(error)
if isOk {
return z
} else {
return y
}
}
// Float32OrElse returns x if x is float32, otherwise y.
func Float32OrElse(x interface{}, y float32) float32 {
z, isOk := x.(float32)
if isOk {
return z
} else {
return y
}
}
// Float64OrElse returns x if x is float64, otherwise y.
func Float64OrElse(x interface{}, y float64) float64 {
z, isOk := x.(float64)
if isOk {
return z
} else {
return y
}
}
// IntOrElse returns x if x is int, otherwise y.
func IntOrElse(x interface{}, y int) int {
z, isOk := x.(int)
if isOk {
return z
} else {
return y
}
}
// Int8OrElse returns x if x is int8, otherwise y.
func Int8OrElse(x interface{}, y int8) int8 {
z, isOk := x.(int8)
if isOk {
return z
} else {
return y
}
}
// Int16OrElse returns x if x is int16, otherwise y.
func Int16OrElse(x interface{}, y int16) int16 {
z, isOk := x.(int16)
if isOk {
return z
} else {
return y
}
}
// Int132OrElse returns x if x is int32, otherwise y.
func Int32OrElse(x interface{}, y int32) int32 {
z, isOk := x.(int32)
if isOk {
return z
} else {
return y
}
}
// Int164OrElse returns x if x is int64, otherwise y.
func Int64OrElse(x interface{}, y int64) int64 {
z, isOk := x.(int64)
if isOk {
return z
} else {
return y
}
}
// RuneOrElse returns x if x is rune, otherwise y.
func RuneOrElse(x interface{}, y rune) rune {
z, isOk := x.(rune)
if isOk {
return z
} else {
return y
}
}
// StringOrElse returns x if x is string, otherwise y.
func StringOrElse(x interface{}, y string) string {
z, isOk := x.(string)
if isOk {
return z
} else {
return y
}
}
// UintOrElse returns x if x is uint, otherwise y.
func UintOrElse(x interface{}, y uint) uint {
z, isOk := x.(uint)
if isOk {
return z
} else {
return y
}
}
// Uint8OrElse returns x if x is uint8, otherwise y.
func Uint8OrElse(x interface{}, y uint8) uint8 {
z, isOk := x.(uint8)
if isOk {
return z
} else {
return y
}
}
// Uint16OrElse returns x if x is uint16, otherwise y.
func Uint16OrElse(x interface{}, y uint16) uint16 {
z, isOk := x.(uint16)
if isOk {
return z
} else {
return y
}
}
// Uint32OrElse returns x if x is uint32, otherwise y.
func Uint32OrElse(x interface{}, y uint32) uint32 {
z, isOk := x.(uint32)
if isOk {
return z
} else {
return y
}
}
// Uint64OrElse returns x if x is uint64, otherwise y.
func Uint64OrElse(x interface{}, y uint64) uint64 {
z, isOk := x.(uint64)
if isOk {
return z
} else {
return y
}
}
// UintptrOrElse returns x if x is uintptr, otherwise y.
func UintptrOrElse(x interface{}, y uintptr) uintptr {
z, isOk := x.(uintptr)
if isOk {
return z
} else {
return y
}
}
// InterfaceSliceOrElse returns x if x is InterfaceSlice, otherwise y.
func InterfaceSliceOrElse(x interface{}, y InterfaceSlice) InterfaceSlice {
z, isOk := x.(InterfaceSlice)
if isOk {
return z
} else {
return y
}
}
// InterfacePairMapOrElse returns x if x is InterfacePairMap, otherwise y.
func InterfacePairMapOrElse(x interface{}, y InterfacePairMap) InterfacePairMap {
z, isOk := x.(InterfacePairMap)
if isOk {
return z
} else {
return y
}
}
// InterfacePairFunctionOrElse returns x if x is InterfacePairFunction, otherwise y.
func InterfacePairFunctionOrElse(x interface{}, y InterfacePairFunction) InterfacePairFunction {
z, isOk := x.(InterfacePairFunction)
if isOk {
return z
} else {
return y
}
} | utils.go | 0.695131 | 0.608914 | utils.go | starcoder |
package main
import (
"fmt"
)
// It takes one (1) minute to travel from one stop to another, there are eight (8) hours in a work
// day and sixty (60) minutes in an hour which totals four-hundred and eighty (480) minutes and,
// therefore, four-hundred and eighty (480) trips made in a work day.
const MaxTrips int = 480
// A BusDriver has a daily route composed of MaxTrips stops and a collection of gossip which can be
// shared with other BusDrivers.
type BusDriver struct {
DailyRoute []int
Gossips []*BusDriver
}
// NewBusDriver creates and initializes a new BusDriver. The BusDriver's route for the entire day is
// filled out and starts with one (1) gossip to share.
func NewBusDriver(r ...int) *BusDriver {
var bd BusDriver
// Initial gossip.
bd.Gossips = append(bd.Gossips, &bd)
// Determine daily route.
for t := 0; t < MaxTrips; t++ {
for s := range r {
bd.DailyRoute = append(bd.DailyRoute, r[s])
}
}
return &bd
}
// ExchangeGossip shares unknown gossip between two BusDrivers.
func ExchangeGossip(bd1, bd2 *BusDriver) {
giveGossip := func(src, dst *BusDriver) {
for sg := range src.Gossips {
var known bool
for dg := range dst.Gossips {
if src.Gossips[sg] == dst.Gossips[dg] {
known = true
}
}
if !known {
dst.Gossips = append(dst.Gossips, src.Gossips[sg])
}
}
}
giveGossip(bd1, bd2)
giveGossip(bd2, bd1)
}
// AllGossipExchanged determines if all of the BusDrivers have shared their gossip with each other.
func AllGossipExchanged(bds ...*BusDriver) bool {
for i := 0; i < len(bds) - 1; i++ {
switch {
// If a BusDriver only has 1 gossip then hasn't received anything.
case len(bds[i].Gossips) == 1:
return false
// If any two BusDriver's gossips are not of equal length then everyone has not shared their
// gossip with everyone.
case len(bds[i].Gossips) != len(bds[i + 1].Gossips):
return false
}
}
return true
}
// BusDriverGossipExchange calculates the number of stops each BusDriver must make before they all
// have shared each other's gossip. Returns -1 if all of the BusDrivers have not shared and heard
// all of the gossip there is to share and hear by the end of their routes.
func BusDriverGossipExchange(r ...[]int) int {
var drvs []*BusDriver
for br := range r {
drvs = append(drvs, NewBusDriver(r[br]...))
}
for t := 0; t < MaxTrips; t++ {
for src := range drvs {
for dst := range drvs {
switch {
// Dont't exchange gossip with the same BusDriver.
case drvs[src] == drvs[dst]:
continue
// Two different BusDrivers at the same bus stop.
case drvs[src].DailyRoute[t] == drvs[dst].DailyRoute[t]:
ExchangeGossip(drvs[src], drvs[dst])
}
}
}
if AllGossipExchanged(drvs...) {
return t + 1
}
}
return -1
}
func main() {
tests := [][][]int{
[][]int {
[]int{3, 1, 2, 3},
[]int{3, 2, 3, 1},
[]int{4, 2, 3, 4, 5},
},
[][]int{
[]int{2, 1, 2},
[]int{5, 2, 8},
},
[][]int{
[]int{7, 11, 2, 2, 4, 8, 2, 2},
[]int{3, 0, 11, 8},
[]int{5, 11, 8, 10, 3, 11},
[]int{5, 9, 2, 5, 0, 3},
[]int{7, 4, 8, 2, 8, 1, 0, 5},
[]int{3, 6, 8, 9},
[]int{4, 2, 11, 3, 3},
},
[][]int {
[]int{12, 23, 15, 2, 8, 20, 21, 3, 23, 3, 27, 20, 0},
[]int{21, 14, 8, 20, 10, 0, 23, 3, 24, 23, 0, 19, 14, 12, 10, 9, 12, 12, 11, 6, 27, 5},
[]int{8, 18, 27, 10, 11, 22, 29, 23, 14},
[]int{13, 7, 14, 1, 9, 14, 16, 12, 0, 10, 13, 19, 16, 17},
[]int{24, 25, 21, 4, 6, 19, 1, 3, 26, 11, 22, 28, 14, 14, 27, 7, 20, 8, 7, 4, 1, 8, 10, 18, 21},
[]int{13, 20, 26, 22, 6, 5, 6, 23, 26, 2, 21, 16, 26, 24},
[]int{6, 7, 17, 2, 22, 23, 21},
[]int{23, 14, 22, 28, 10, 23, 7, 21, 3, 20, 24, 23, 8, 8, 21, 13, 15, 6, 9, 17, 27, 17, 13, 14},
[]int{23, 13, 1, 15, 5, 16, 7, 26, 22, 29, 17, 3, 14, 16, 16, 18, 6, 10, 3, 14, 10, 17, 27, 25},
[]int{25, 28, 5, 21, 8, 10, 27, 21, 23, 28, 7, 20, 6, 6, 9, 29, 27, 26, 24, 3, 12, 10, 21, 10, 12, 17},
[]int{26, 22, 26, 13, 10, 19, 3, 15, 2, 3, 25, 29, 25, 19, 19, 24, 1, 26, 22, 10, 17, 19, 28, 11, 22, 2, 13},
[]int{8, 4, 25, 15, 20, 9, 11, 3, 19},
[]int{24, 29, 4, 17, 2, 0, 8, 19, 11, 28, 13, 4, 16, 5, 15, 25, 16, 5, 6, 1, 0, 19, 7, 4, 6},
[]int{16, 25, 15, 17, 20, 27, 1, 11, 1, 18, 14, 23, 27, 25, 26, 17, 1},
},
}
for t := range tests {
fmt.Println(BusDriverGossipExchange(tests[t]...))
}
} | 264-gossiping_bus_drivers/main.go | 0.584153 | 0.507263 | main.go | starcoder |
package operator
import (
"github.com/matrixorigin/matrixone/pkg/container/nulls"
"github.com/matrixorigin/matrixone/pkg/container/vector"
"github.com/matrixorigin/matrixone/pkg/vm/process"
)
func ColOrCol(lv, rv *vector.Vector, proc *process.Process) (*vector.Vector, error) {
lvs, rvs := lv.Col.([]bool), rv.Col.([]bool)
n := len(lvs)
vec, err := proc.AllocVector(lv.Typ, int64(n)*1)
if err != nil {
return nil, err
}
col := make([]bool, len(lvs))
for i := 0; i < len(lvs); i++ {
col[i] = lvs[i] || rvs[i]
}
nulls.Or(lv.Nsp, rv.Nsp, vec.Nsp)
vector.SetCol(vec, col)
return vec, nil
}
func ColOrConst(lv, rv *vector.Vector, proc *process.Process) (*vector.Vector, error) {
lvs, rvs := lv.Col.([]bool), rv.Col.([]bool)
n := len(lvs)
vec, err := proc.AllocVector(lv.Typ, int64(n)*1)
if err != nil {
return nil, err
}
rb := rvs[0]
col := make([]bool, len(lvs))
for i := 0; i < len(lvs); i++ {
col[i] = lvs[i] || rb
}
nulls.Or(lv.Nsp, rv.Nsp, vec.Nsp)
vector.SetCol(vec, col)
return vec, nil
}
func ColOrNull(lv, rv *vector.Vector, proc *process.Process) (*vector.Vector, error) {
lvs := lv.Col.([]bool)
n := len(lvs)
vec, err := proc.AllocVector(lv.Typ, int64(n)*1)
if err != nil {
return nil, err
}
col := make([]bool, len(lvs))
for i := 0; i < len(lvs); i++ {
nulls.Add(vec.Nsp, uint64(i))
}
vector.SetCol(vec, col)
return vec, nil
}
func ConstOrCol(lv, rv *vector.Vector, proc *process.Process) (*vector.Vector, error) {
return ColOrConst(rv, lv, proc)
}
func ConstOrConst(lv, rv *vector.Vector, proc *process.Process) (*vector.Vector, error) {
lvs, rvs := lv.Col.([]bool), rv.Col.([]bool)
vec := proc.AllocScalarVector(lv.Typ)
vector.SetCol(vec, []bool{lvs[0] || rvs[0]})
return vec, nil
}
func ConstOrNull(lv, rv *vector.Vector, proc *process.Process) (*vector.Vector, error) {
return proc.AllocScalarNullVector(lv.Typ), nil
}
func NullOrCol(lv, rv *vector.Vector, proc *process.Process) (*vector.Vector, error) {
return ColOrNull(rv, lv, proc)
}
func NullOrConst(lv, rv *vector.Vector, proc *process.Process) (*vector.Vector, error) {
return ConstOrNull(rv, lv, proc)
}
func NullOrNull(lv, rv *vector.Vector, proc *process.Process) (*vector.Vector, error) {
return proc.AllocScalarNullVector(lv.Typ), nil
}
type OrFunc = func(lv, rv *vector.Vector, proc *process.Process) (*vector.Vector, error)
var OrFuncMap = map[int]OrFunc{}
var OrFuncVec = []OrFunc{
ColOrCol, ColOrConst, ColOrNull,
ConstOrCol, ConstOrConst, ConstOrNull,
NullOrCol, NullOrConst, NullOrNull,
}
func InitOrFuncMap() {
for i := 0; i < len(OrFuncVec); i++ {
OrFuncMap[i] = OrFuncVec[i]
}
}
func Or(vectors []*vector.Vector, proc *process.Process) (*vector.Vector, error) {
lv := vectors[0]
rv := vectors[1]
lt, rt := GetTypeID(lv), GetTypeID(rv)
vec, err := OrFuncMap[lt*3+rt](lv, rv, proc)
if err != nil {
return nil, err
}
return vec, nil
} | pkg/sql/plan2/function/operator/or.go | 0.548915 | 0.450178 | or.go | starcoder |
package matsa
import (
"math"
)
// Abs returns the List_f64 with nonnegative components.
func (vec List_f64) Abs() List_f64 {
var vec2 List_f64
if vec.Length() == 0 {
return vec2
}
for _, val := range vec {
vec2 = append(vec2, math.Abs(val))
}
return vec2
}
// Add returns the standard List_f64 sum of v and ov.
func (vec List_f64) Add(vec2 List_f64) List_f64 {
var vec3 List_f64
l1 := vec.Length()
l2 := vec2.Length()
if l1 == 0 || l2 == 0 {
return vec3
}
if l1 != l2 {
return vec3
}
for i, val := range vec {
sum := val + vec2[i]
vec3 = append(vec3, sum)
}
return vec3
}
// Sub returns the standard List_f64 difference of v and ov.
func (vec List_f64) Sub(vec2 List_f64) List_f64 {
var vec3 List_f64
l1 := vec.Length()
l2 := vec2.Length()
if l1 == 0 || l2 == 0 {
return vec3
}
if l1 != l2 {
return vec3
}
for i, val := range vec {
sum := val - vec2[i]
vec3 = append(vec3, sum)
}
return vec3
}
// Mul returns the standard scalar product of v and m.
func (vec List_f64) Mul(m float64) List_f64 {
var vec2 List_f64
l := vec.Length()
if l == 0 {
return vec2
}
for _, val := range vec {
sum := val * m
vec2 = append(vec2, sum)
}
return vec2
}
// Dot returns the standard dot product of v and ov.
func (vec List_f64) Dot(vec2 List_f64) float64 {
var big_sum float64
l1 := vec.Length()
l2 := vec2.Length()
if l1 == 0 || l2 == 0 {
return big_sum
}
if l1 != l2 {
return big_sum
}
for i, val := range vec {
sum := val * vec2[i]
big_sum = big_sum + sum
}
return big_sum
}
// Distance returns the Euclidean distance between v and ov.
func (vec List_f64) Distance(vec2 List_f64) float64 {
vec3 := vec.Sub(vec2)
return vec3.Norm()
}
// Norm returns the List_f64's norm.
func (vec List_f64) Norm() float64 {
if vec.Length() == 0 {
return 0
}
dot := vec.Dot(vec)
return math.Sqrt(dot)
} | linear.go | 0.878568 | 0.536981 | linear.go | starcoder |
package gostuff
import (
"fmt"
"math"
"golang.org/x/net/websocket"
)
//fetches player's new rating by passing both player's rating and their deviation and game result and returns their rating and deviation
func grabRating(pRating float64, pDeviation float64, oRating float64, oDeviation float64, results float64) (float64, float64) {
player := &Rating{pRating, pDeviation, 0.06}
opponents := &Rating{oRating, oDeviation, DefaultVol}
newRating, _ := CalculateRating(player, opponents, results)
return Round(newRating.Rating), RoundPlus(newRating.Deviation, 4)
}
func Round(f float64) float64 {
return math.Floor(f + .5)
}
func RoundPlus(f float64, places int) float64 {
shift := math.Pow(10, float64(places))
return Round(f*shift) / shift
}
// fmt.Println(Round(123.4999))
// fmt.Println(RoundPlus(123.558, 2))
//computes the rating for one player and the other player and updates the database and notifies both players, result can be white, black or draw
func ComputeRating(name string, gameID int, gameType string, result float64) {
var bullet, blitz, standard, correspondence, bulletRD, blitzRD, standardRD, correspondenceRD float64
var oBullet, oBlitz, oStandard, oCorrespondence, oBulletRD, oBlitzRD, oStandardRD, oCorrespondenceRD float64
//update player's rating and notify them of rating change, also determine player color to assign correct rating
if All.Games[gameID].WhitePlayer == name {
_, bullet, blitz, standard, correspondence, bulletRD, blitzRD, standardRD,
correspondenceRD = GetRatingAndRD(name)
_, oBullet, oBlitz, oStandard, oCorrespondence, oBulletRD, oBlitzRD,
oStandardRD, oCorrespondenceRD = GetRatingAndRD(PrivateChat[name])
} else {
_, bullet, blitz, standard, correspondence, bulletRD, blitzRD, standardRD,
correspondenceRD = GetRatingAndRD(PrivateChat[name])
_, oBullet, oBlitz, oStandard, oCorrespondence, oBulletRD, oBlitzRD,
oStandardRD, oCorrespondenceRD = GetRatingAndRD(name)
}
var whiteRating float64
var blackRating float64
var whiteRD float64
var blackRD float64
const (
bulletString = "bullet"
blitzString = "blitz"
standardString = "standard"
correspondenceString = "correspondence"
)
if gameType == bulletString {
whiteRating, whiteRD = grabRating(bullet, bulletRD, oBullet, oBulletRD, result)
blackRating, blackRD = grabRating(oBullet, oBulletRD, bullet, bulletRD, 1.0-result)
//updates database with players new rating and RD
if All.Games[gameID].WhitePlayer == name {
updateRating(bulletString, name, whiteRating, whiteRD, PrivateChat[name], blackRating, blackRD)
updateRatingHistory(name, bulletString, whiteRating)
updateRatingHistory(PrivateChat[name], bulletString, blackRating)
} else {
updateRating(bulletString, PrivateChat[name], whiteRating, whiteRD, name, blackRating, blackRD)
updateRatingHistory(PrivateChat[name], bulletString, whiteRating)
updateRatingHistory(name, bulletString, blackRating)
}
} else if gameType == blitzString {
whiteRating, whiteRD = grabRating(blitz, blitzRD, oBlitz, oBlitzRD, result)
blackRating, blackRD = grabRating(oBlitz, oBlitzRD, blitz, blitzRD, 1.0-result)
//updates both players rating
if All.Games[gameID].WhitePlayer == name {
updateRating(blitzString, name, whiteRating, whiteRD, PrivateChat[name], blackRating, blackRD)
updateRatingHistory(name, blitzString, whiteRating)
updateRatingHistory(PrivateChat[name], blitzString, blackRating)
} else {
updateRating(blitzString, PrivateChat[name], whiteRating, whiteRD, name, blackRating, blackRD)
updateRatingHistory(PrivateChat[name], blitzString, whiteRating)
updateRatingHistory(name, blitzString, blackRating)
}
} else if gameType == standardString {
whiteRating, whiteRD = grabRating(standard, standardRD, oStandard, oStandardRD, result)
blackRating, blackRD = grabRating(oStandard, oStandardRD, standard, standardRD, 1.0-result)
//updates database with players new rating and RD
if All.Games[gameID].WhitePlayer == name {
updateRating(standardString, name, whiteRating, whiteRD, PrivateChat[name], blackRating, blackRD)
updateRatingHistory(name, standardString, whiteRating)
updateRatingHistory(PrivateChat[name], standardString, blackRating)
} else {
updateRating(standardString, PrivateChat[name], whiteRating, whiteRD, name, blackRating, blackRD)
updateRatingHistory(PrivateChat[name], standardString, whiteRating)
updateRatingHistory(name, standardString, blackRating)
}
} else if gameType == correspondenceString {
whiteRating, whiteRD = grabRating(correspondence, correspondenceRD, oCorrespondence, oCorrespondenceRD, result)
blackRating, blackRD = grabRating(oCorrespondence, oCorrespondenceRD, correspondence, correspondenceRD, 1.0-result)
//updates database with players new rating and RD
if All.Games[gameID].WhitePlayer == name {
updateRating(correspondenceString, name, whiteRating, whiteRD, PrivateChat[name], blackRating, blackRD)
updateRatingHistory(name, correspondenceString, whiteRating)
updateRatingHistory(PrivateChat[name], correspondenceString, blackRating)
} else {
updateRating(correspondenceString, PrivateChat[name], whiteRating, whiteRD, name, blackRating, blackRD)
updateRatingHistory(PrivateChat[name], correspondenceString, whiteRating)
updateRatingHistory(name, correspondenceString, blackRating)
}
} else {
fmt.Println("Not a valid game type rate.go 1")
}
var r Nrating
r.Type = "rating"
r.WhiteRating = whiteRating
r.BlackRating = blackRating
if _, ok := Active.Clients[name]; ok { // send data if other guy is still connected
websocket.JSON.Send(Active.Clients[name], &r)
}
if _, ok := Active.Clients[PrivateChat[name]]; ok { // send data if other guy is still connected
websocket.JSON.Send(Active.Clients[PrivateChat[name]], &r)
}
} | gostuff/rate.go | 0.568176 | 0.439868 | rate.go | starcoder |
package query
// RawResultHeap is a heap storing a list of values.
// The ordering of such items are determined by `lessThanFn`.
// The smallest item will be at the top of the heap.
type RawResultHeap struct {
dv []RawResult
lessThanFn func(v1, v2 RawResult) bool
}
// NewRawResultHeap creates a new values heap.
func NewRawResultHeap(
initCapacity int,
lessThanFn func(v1, v2 RawResult) bool,
) *RawResultHeap {
return &RawResultHeap{
dv: make([]RawResult, 0, initCapacity),
lessThanFn: lessThanFn,
}
}
// RawData returns the underlying backing array in no particular order.
func (h RawResultHeap) RawData() []RawResult { return h.dv }
// Min returns the "smallest" heap element according to the `lessThan` function.
func (h RawResultHeap) Min() RawResult { return h.dv[0] }
// Len returns the number of items in the heap.
func (h RawResultHeap) Len() int { return len(h.dv) }
// Cap returns the heap capacity before a reallocation is needed.
func (h RawResultHeap) Cap() int { return cap(h.dv) }
// Less returns true if item `i` is less than item `j`.
func (h RawResultHeap) Less(i, j int) bool {
return h.lessThanFn(h.dv[i], h.dv[j])
}
// Swap swaps item `i` with item `j`.
func (h RawResultHeap) Swap(i, j int) { h.dv[i], h.dv[j] = h.dv[j], h.dv[i] }
// Reset resets the internal backing array.
func (h *RawResultHeap) Reset() { h.dv = h.dv[:0] }
// Push pushes a value onto the heap.
func (h *RawResultHeap) Push(value RawResult) {
h.dv = append(h.dv, value)
h.shiftUp(h.Len() - 1)
}
// Pop pops a value from the heap.
func (h *RawResultHeap) Pop() RawResult {
var (
n = h.Len()
val = h.dv[0]
)
h.dv[0], h.dv[n-1] = h.dv[n-1], h.dv[0]
h.heapify(0, n-1)
h.dv = h.dv[0 : n-1]
return val
}
// SortInPlace sorts the heap in place and returns the sorted data, with the smallest element
// at the end of the returned array. This is done by repeated swapping the smallest element with
// the last element of the current heap and shrinking the heap size.
// NB: The heap becomes invalid after this is called.
func (h *RawResultHeap) SortInPlace() []RawResult {
numElems := len(h.dv)
for len(h.dv) > 0 {
h.Pop()
}
res := h.dv[:numElems]
h.dv = nil
h.lessThanFn = nil
return res
}
func (h RawResultHeap) shiftUp(i int) {
for {
parent := (i - 1) / 2
if parent == i || !h.Less(i, parent) {
break
}
h.dv[parent], h.dv[i] = h.dv[i], h.dv[parent]
i = parent
}
}
func (h RawResultHeap) heapify(i, n int) {
for {
left := i*2 + 1
right := left + 1
smallest := i
if left < n && h.Less(left, smallest) {
smallest = left
}
if right < n && h.Less(right, smallest) {
smallest = right
}
if smallest == i {
return
}
h.dv[i], h.dv[smallest] = h.dv[smallest], h.dv[i]
i = smallest
}
}
// TopNRawResults keeps track of the top n values in a value sequence for the
// order defined by the `lessThanFn`. In particular if `lessThanFn` defines
// an increasing order (returning true if `v1` < `v2`), the collection stores
// the top N largest values, and vice versa.
type TopNRawResults struct {
n int
lessThanFn func(v1, v2 RawResult) bool
h *RawResultHeap
}
// NewTopNRawResults creates a new top n value collection.
func NewTopNRawResults(
n int,
lessThanFn func(v1, v2 RawResult) bool,
) *TopNRawResults {
return &TopNRawResults{
n: n,
lessThanFn: lessThanFn,
h: NewRawResultHeap(n, lessThanFn),
}
}
// RawResultAddOptions provide the options for adding a value.
type RawResultAddOptions struct {
CopyOnAdd bool
CopyFn func(v RawResult) RawResult
CopyToFn func(src RawResult, target *RawResult)
}
// Len returns the number of items in the collection.
func (v TopNRawResults) Len() int { return v.h.Len() }
// Cap returns the collection capacity.
func (v TopNRawResults) Cap() int { return v.h.Cap() }
// RawData returns the underlying array backing the heap in no particular order.
func (v TopNRawResults) RawData() []RawResult { return v.h.RawData() }
// Top returns the "smallest" value according to the `lessThan` function.
func (v TopNRawResults) Top() RawResult { return v.h.Min() }
// Reset resets the internal array backing the heap.
func (v *TopNRawResults) Reset() { v.h.Reset() }
// Add adds a value to the collection.
func (v *TopNRawResults) Add(val RawResult, opts RawResultAddOptions) {
if v.h.Len() < v.n {
if opts.CopyOnAdd {
val = opts.CopyFn(val)
}
v.h.Push(val)
return
}
if min := v.h.Min(); !v.lessThanFn(min, val) {
return
}
popped := v.h.Pop()
if !opts.CopyOnAdd {
v.h.Push(val)
return
}
// Reuse popped item from the heap.
opts.CopyToFn(val, &popped)
v.h.Push(popped)
}
// SortInPlace sorts the backing heap in place and returns the sorted data.
// NB: The value collection becomes invalid after this is called.
func (v *TopNRawResults) SortInPlace() []RawResult {
res := v.h.SortInPlace()
v.h = nil
v.lessThanFn = nil
return res
} | query/raw_result_heap.gen.go | 0.895811 | 0.414425 | raw_result_heap.gen.go | starcoder |
// Package be provides holiday definitions for Belgium.
package be
import (
"time"
"github.com/rickar/cal/v2"
"github.com/rickar/cal/v2/aa"
)
var (
// Nieuwjaar represents New Year's Day on 1-Jan
Nieuwjaar = aa.NewYear.Clone(&cal.Holiday{Name: "Nieuwjaarsdag", Type: cal.ObservancePublic})
// Paasmaandag represents Easter Monday on the day after Easter
Paasmaandag = aa.EasterMonday.Clone(&cal.Holiday{Name: "Paasmaandag", Type: cal.ObservancePublic})
// DagVanDeArbeid represents Labor Day on the first Monday in May
DagVanDeArbeid = aa.WorkersDay.Clone(&cal.Holiday{Name: "Dag van de Arbeid", Type: cal.ObservancePublic})
// OnzeLieveHeerHemelvaart represents Ascension Day on the 39th day after Easter
OnzeLieveHeerHemelvaart = aa.AscensionDay.Clone(&cal.Holiday{Name: "Onze Lieve Heer Hemelvaart", Type: cal.ObservancePublic})
// Pinkstermaandag represents Pentecost Monday on the day after Pentecost (50 days after Easter)
Pinkstermaandag = aa.PentecostMonday.Clone(&cal.Holiday{Name: "Pinkstermaandag", Type: cal.ObservancePublic})
// NationaleFeestdag represents Belgian National Day on 21-Jul
NationaleFeestdag = &cal.Holiday{
Name: "Nationale Feestdag",
Type: cal.ObservancePublic,
Month: time.July,
Day: 21,
Func: cal.CalcDayOfMonth,
}
// OnzeLieveVrouwHemelvaart represents Assumption of Mary on 15-Aug
OnzeLieveVrouwHemelvaart = aa.AssumptionOfMary.Clone(&cal.Holiday{Name: "Onze Lieve Vrouw Hemelvaart", Type: cal.ObservancePublic})
// Allerheiligen represents All Saints' Day on 1-Nov
Allerheiligen = aa.AllSaintsDay.Clone(&cal.Holiday{Name: "Allerheiligen", Type: cal.ObservancePublic})
// Wapenstilstand represents Armistice Day on 11-Nov
Wapenstilstand = aa.ArmisticeDay.Clone(&cal.Holiday{Name: "Wapenstilstand", Type: cal.ObservancePublic})
// Kerstmis represents Christmas Day on 25-Dec
Kerstmis = aa.ChristmasDay.Clone(&cal.Holiday{Name: "Kerstmis", Type: cal.ObservancePublic})
// Holidays provides a list of the standard national holidays
Holidays = []*cal.Holiday{
Nieuwjaar,
Paasmaandag,
DagVanDeArbeid,
OnzeLieveHeerHemelvaart,
Pinkstermaandag,
NationaleFeestdag,
OnzeLieveVrouwHemelvaart,
Allerheiligen,
Wapenstilstand,
Kerstmis,
}
) | v2/be/be_holidays.go | 0.531209 | 0.44553 | be_holidays.go | starcoder |
package cmd
import (
"fmt"
"github.com/spf13/cobra"
"math"
)
var day03cmd = &cobra.Command{
Use: "day03",
Run: run03,
}
func init() {
RootCmd.AddCommand(day03cmd)
}
func compute03(input int) int {
output := doCompute03(input)
fmt.Println(input, "=>", output)
return output
}
func doCompute03(input int) int {
if input == 1 {
return 0
}
// Compute the current level
level := int(math.Ceil(0.5*math.Sqrt(float64(input)) + 0.5))
// Level is the absolute of the first manhattance distance coordinate
// How long is one side of the level
sideLength := 2*level - 1
// The start index of the level
start := SqrInt(2*(level-1) - 1)
// Compute the offset in a circle
circleOffset := input - start - 1
// compute the offset on a specific side
sideOffset := circleOffset % (sideLength - 1)
// we're almost at the second parameter for the manhattan distance
// all we need to do is adjust the offset for that it starts off-center
offset := sideOffset - (sideLength/2 - 1)
// Compute the manhattan distance
return (level - 1) + AbsInt(offset)
}
func test03(input int, output int) {
val := compute03(input)
if val != output {
fmt.Println("Test failed, value should be", output, "but is", val)
}
}
func makeIndex03b(x int, y int) int {
return x + (y << 16)
}
func compute03b(target int) int {
output := doCompute03b(target)
fmt.Println(target, "=>", output)
return output
}
func nextMapStep03b(m map[int]int, i int) int {
// Compute coordinates
level := int(math.Ceil(0.5*math.Sqrt(float64(i+1)) + 0.5))
sideLength := 2*level - 1
start := SqrInt(2*(level-1) - 1)
circleOffset := i - start
sideOffset := circleOffset % (sideLength - 1)
offset := sideOffset - (sideLength/2 - 1)
sideIndex := circleOffset / (sideLength - 1)
x := 0
y := 0
switch sideIndex {
case 0:
x = level - 1
y = offset
case 1:
x = -offset
y = level - 1
case 2:
x = -(level - 1)
y = -offset
case 3:
x = offset
y = -(level - 1)
}
// Compute location
index := makeIndex03b(x, y)
// Fetch 8 patch around index
sum := 0
for dx := -1; dx <= 1; dx++ {
for dy := -1; dy <= 1; dy++ {
idx := makeIndex03b(x+dx, y+dy)
sum += m[idx]
}
}
m[index] = sum
return sum
}
func doCompute03b(target int) int {
m := make(map[int]int)
m[0] = 1
sum := 1
for i := 1; i <= target; i++ {
sum = nextMapStep03b(m, i)
}
// Return the last sum
return sum
}
func find03b(target int) int {
output := doFind03b(target)
fmt.Println(target, "found", output)
return output
}
func findTest03b(target int, ref int) int {
output := doFind03b(target)
fmt.Println(target, ref, output)
if output != ref {
fmt.Println("Test failed, value should be", ref, "but is", output)
}
return output
}
func doFind03b(value int) int {
m := make(map[int]int)
m[0] = 1
i := 1
for {
sum := nextMapStep03b(m, i)
if sum > value {
return sum
}
i += 1
}
return 0
}
func test03b(index int, output int) {
val := compute03b(index)
if val != output {
fmt.Println("Test failed, value for index", index, "should be", output, "but is", val)
}
}
func run03(cmd *cobra.Command, args []string) {
test03(1, 0)
test03(2, 1)
test03(3, 2)
test03(4, 1)
test03(5, 2)
test03(6, 1)
test03(7, 2)
test03(8, 1)
test03(9, 2)
test03(12, 3)
test03(17, 4)
test03(21, 4)
test03(22, 3)
test03(23, 2)
test03(1024, 31)
compute03(325489)
test03b(0, 1)
test03b(1, 1)
test03b(2, 2)
test03b(3, 4)
test03b(4, 5)
test03b(5, 10)
test03b(6, 11)
test03b(7, 23)
test03b(8, 25)
test03b(9, 26)
test03b(10, 54)
test03b(11, 57)
test03b(12, 59)
findTest03b(24, 25)
findTest03b(56, 57)
findTest03b(57, 59)
findTest03b(58, 59)
find03b(325489)
} | cmd/03.go | 0.625209 | 0.462837 | 03.go | starcoder |
package ast
import (
"bytes"
"io"
"github.com/jensneuse/graphql-go-tools/internal/pkg/unsafebytes"
"github.com/jensneuse/graphql-go-tools/pkg/lexer/literal"
"github.com/jensneuse/graphql-go-tools/pkg/lexer/position"
)
type TypeKind int
const (
TypeKindUnknown TypeKind = 14 + iota
TypeKindNamed
TypeKindList
TypeKindNonNull
)
type Type struct {
TypeKind TypeKind // one of Named,List,NonNull
Name ByteSliceReference // e.g. String (only on NamedType)
Open position.Position // [ (only on ListType)
Close position.Position // ] (only on ListType)
Bang position.Position // ! (only on NonNullType)
OfType int
}
func (d *Document) TypeNameBytes(ref int) ByteSlice {
return d.Input.ByteSlice(d.Types[ref].Name)
}
func (d *Document) TypeNameString(ref int) string {
return unsafebytes.BytesToString(d.Input.ByteSlice(d.Types[ref].Name))
}
func (d *Document) PrintType(ref int, w io.Writer) error {
switch d.Types[ref].TypeKind {
case TypeKindNonNull:
err := d.PrintType(d.Types[ref].OfType, w)
if err != nil {
return err
}
_, err = w.Write(literal.BANG)
return err
case TypeKindNamed:
_, err := w.Write(d.Input.ByteSlice(d.Types[ref].Name))
return err
case TypeKindList:
_, err := w.Write(literal.LBRACK)
if err != nil {
return err
}
err = d.PrintType(d.Types[ref].OfType, w)
if err != nil {
return err
}
_, err = w.Write(literal.RBRACK)
return err
}
return nil
}
func (d *Document) PrintTypeBytes(ref int, buf []byte) ([]byte, error) {
if buf == nil {
buf = make([]byte, 0, 24)
}
b := bytes.NewBuffer(buf)
err := d.PrintType(ref, b)
return b.Bytes(), err
}
func (d *Document) AddType(t Type) (ref int) {
d.Types = append(d.Types, t)
return len(d.Types) - 1
}
func (d *Document) AddNamedType(name []byte) (ref int) {
nameRef := d.Input.AppendInputBytes(name)
d.Types = append(d.Types, Type{
TypeKind: TypeKindNamed,
Name: nameRef,
})
return len(d.Types) - 1
}
func (d *Document) AddNonNullNamedType(name []byte) (ref int) {
namedRef := d.AddNamedType(name)
d.Types = append(d.Types, Type{
TypeKind: TypeKindNonNull,
OfType: namedRef,
})
return len(d.Types) - 1
}
func (d *Document) TypesAreEqualDeep(left int, right int) bool {
for {
if left == -1 || right == -1 {
return false
}
if d.Types[left].TypeKind != d.Types[right].TypeKind {
return false
}
if d.Types[left].TypeKind == TypeKindNamed {
leftName := d.TypeNameBytes(left)
rightName := d.TypeNameBytes(right)
return bytes.Equal(leftName, rightName)
}
left = d.Types[left].OfType
right = d.Types[right].OfType
}
}
func (d *Document) TypeIsScalar(ref int, definition *Document) bool {
switch d.Types[ref].TypeKind {
case TypeKindNamed:
typeName := d.TypeNameBytes(ref)
node, _ := definition.Index.FirstNodeByNameBytes(typeName)
return node.Kind == NodeKindScalarTypeDefinition
case TypeKindNonNull:
return d.TypeIsScalar(d.Types[ref].OfType, definition)
}
return false
}
func (d *Document) TypeIsEnum(ref int, definition *Document) bool {
switch d.Types[ref].TypeKind {
case TypeKindNamed:
typeName := d.TypeNameBytes(ref)
node, _ := definition.Index.FirstNodeByNameBytes(typeName)
return node.Kind == NodeKindEnumTypeDefinition
case TypeKindNonNull:
return d.TypeIsEnum(d.Types[ref].OfType, definition)
}
return false
}
func (d *Document) TypeIsNonNull(ref int) bool {
return d.Types[ref].TypeKind == TypeKindNonNull
}
func (d *Document) TypeIsList(ref int) bool {
switch d.Types[ref].TypeKind {
case TypeKindList:
return true
case TypeKindNonNull:
return d.TypeIsList(d.Types[ref].OfType)
default:
return false
}
}
func (d *Document) TypesAreCompatibleDeep(left int, right int) bool {
for {
if left == -1 || right == -1 {
return false
}
if d.Types[left].TypeKind != d.Types[right].TypeKind {
return false
}
if d.Types[left].TypeKind == TypeKindNamed {
leftName := d.TypeNameBytes(left)
rightName := d.TypeNameBytes(right)
if bytes.Equal(leftName, rightName) {
return true
}
leftNode, _ := d.Index.FirstNodeByNameBytes(leftName)
rightNode, _ := d.Index.FirstNodeByNameBytes(rightName)
if leftNode.Kind == rightNode.Kind {
return false
}
if leftNode.Kind == NodeKindInterfaceTypeDefinition && rightNode.Kind == NodeKindObjectTypeDefinition {
return d.NodeImplementsInterface(rightNode, leftNode)
}
if leftNode.Kind == NodeKindObjectTypeDefinition && rightNode.Kind == NodeKindInterfaceTypeDefinition {
return d.NodeImplementsInterface(leftNode, rightNode)
}
if leftNode.Kind == NodeKindUnionTypeDefinition && rightNode.Kind == NodeKindObjectTypeDefinition {
return d.NodeIsUnionMember(rightNode, leftNode)
}
if leftNode.Kind == NodeKindObjectTypeDefinition && rightNode.Kind == NodeKindUnionTypeDefinition {
return d.NodeIsUnionMember(leftNode, rightNode)
}
return false
}
left = d.Types[left].OfType
right = d.Types[right].OfType
}
}
func (d *Document) ResolveTypeNameBytes(ref int) ByteSlice {
resolvedTypeRef := d.ResolveUnderlyingType(ref)
graphqlType := d.Types[resolvedTypeRef]
return d.Input.ByteSlice(graphqlType.Name)
}
func (d *Document) ResolveTypeNameString(ref int) string {
return unsafebytes.BytesToString(d.ResolveTypeNameBytes(ref))
}
func (d *Document) ResolveUnderlyingType(ref int) (typeRef int) {
typeRef = ref
graphqlType := d.Types[ref]
for graphqlType.TypeKind != TypeKindNamed {
typeRef = graphqlType.OfType
graphqlType = d.Types[typeRef]
}
return
} | pkg/ast/ast_type.go | 0.535827 | 0.426322 | ast_type.go | starcoder |
package specs
import (
"fmt"
"github.com/jexia/semaphore/pkg/specs/labels"
"github.com/jexia/semaphore/pkg/specs/metadata"
"github.com/jexia/semaphore/pkg/specs/types"
)
// Schemas represents a map string collection of properties
type Schemas map[string]*Property
// Get attempts to return the given key from the objects collection
func (objects Schemas) Get(key string) *Property {
return objects[key]
}
// Append appends the given objects to the objects collection
func (objects Schemas) Append(arg Schemas) {
for key, val := range arg {
objects[key] = val
}
}
// Expression provides information about expression.
type Expression interface {
Position() string
}
// Property represents a value property.
type Property struct {
*metadata.Meta
Name string `json:"name,omitempty" yaml:"name,omitempty"` // Name represents the name of the given property
Path string `json:"path,omitempty" yaml:"path,omitempty"` // Path represents the full path to the given property
Description string `json:"description,omitempty" yaml:"description,omitempty"` // Description holds the description of the given property used to describe its use
Position int32 `json:"position,omitempty" yaml:"position,omitempty"` // Position of the given property (in array/object)
Options Options `json:"options,omitempty" yaml:"options,omitempty"` // Options holds variable options used inside single modules or components
Expr Expression `json:"expression,omitempty"` // Expr represents the position on where the given property is defined
Raw string `json:"raw,omitempty"` // Raw holds the raw template string used to define the given property
Label labels.Label `json:"label,omitempty" yaml:"label,omitempty"` // Label label describes the usage of a given property ex: optional
Template `json:"template" yaml:"template"`
}
// DefaultValue returns rge default value for a given property.
func (property *Property) DefaultValue() interface{} {
t := property.Template
switch {
case t.Scalar != nil:
return t.Scalar.Default
case t.Message != nil:
return nil
case t.Repeated != nil:
return nil
case t.Enum != nil:
return nil
}
return nil
}
// Empty checks if the property has any defined type
func (property *Property) Empty() bool {
return property.Type() == types.Unknown
}
// Clone makes a deep clone of the given property
func (property *Property) Clone() *Property {
if property == nil {
return &Property{}
}
result := property.ShallowClone()
result.Template = property.Template.Clone()
return result
}
// ShallowClone clones the given property but ignores the defined template and/or
// nested properties. This method is often used in cases where comparisons between
// the flow and schema are made and any defined properties are seen as defined values.
func (property *Property) ShallowClone() *Property {
if property == nil {
return &Property{}
}
return &Property{
Meta: property.Meta,
Position: property.Position,
Description: property.Description,
Name: property.Name,
Path: property.Path,
Expr: property.Expr,
Raw: property.Raw,
Options: property.Options,
Label: property.Label,
Template: property.Template.ShallowClone(),
}
}
// Compare checks the given property against the provided one.
func (property *Property) Compare(expected *Property) error {
if expected == nil {
return fmt.Errorf("unable to check types for '%s' no schema given", property.Path)
}
if property.Type() != expected.Type() {
return fmt.Errorf("cannot use type (%s) for '%s', expected (%s)", property.Type(), property.Path, expected.Type())
}
if property.Label != expected.Label {
return fmt.Errorf("cannot use label (%s) for '%s', expected (%s)", property.Label, property.Path, expected.Label)
}
if !property.Empty() && expected.Empty() {
return fmt.Errorf("property '%s' has a nested object but schema does not '%s'", property.Path, expected.Name)
}
if !expected.Empty() && property.Empty() {
return fmt.Errorf("schema '%s' has a nested object but property does not '%s'", expected.Name, property.Path)
}
if err := property.Template.Compare(expected.Template); err != nil {
return fmt.Errorf("nested schema mismatch under property '%s': %w", property.Path, err)
}
return nil
}
// Define ensures that all missing nested properties are defined
func (property *Property) Define(expected *Property) {
property.Position = expected.Position
property.Template.Define(expected.Template)
}
// ParameterMap is the initial map of parameter names (keys) and their (templated) values (values)
type ParameterMap struct {
*metadata.Meta
DependsOn Dependencies `json:"depends_on,omitempty"`
Schema string `json:"schema,omitempty"`
Params map[string]*Property `json:"params,omitempty"`
Options Options `json:"options,omitempty"`
Header Header `json:"header,omitempty"`
Property *Property `json:"property,omitempty"`
Stack map[string]*Property `json:"stack,omitempty"`
}
// Clone clones the given parameter map
func (parameters *ParameterMap) Clone() *ParameterMap {
if parameters == nil {
return nil
}
result := &ParameterMap{
Meta: parameters.Meta,
Schema: parameters.Schema,
Params: make(map[string]*Property, len(parameters.Params)),
Options: make(Options, len(parameters.Options)),
Header: make(Header, len(parameters.Header)),
Stack: make(map[string]*Property, len(parameters.Stack)),
Property: parameters.Property.Clone(),
}
for key, property := range parameters.Params {
result.Params[key] = property.Clone()
}
for key, value := range parameters.Options {
result.Options[key] = value
}
for key, value := range parameters.Header {
result.Header[key] = value.Clone()
}
for key, property := range parameters.Stack {
result.Stack[key] = property.Clone()
}
return result
} | pkg/specs/property.go | 0.852813 | 0.421492 | property.go | starcoder |
package horizon
import (
"github.com/LdDl/viterbi"
"github.com/golang/geo/s2"
)
// ObservationResult Representation of gps measurement matched to G(v,e)
/*
Observation - gps measurement itself
MatchedEdge - edge in G(v,e) corresponding to current gps measurement
*/
type ObservationResult struct {
Observation *GPSMeasurement
MatchedEdge Edge
}
// MatcherResult Representation of map matching algorithm's output
/*
Observations - set of ObservationResult
Probability - probability got from Viterbi's algotithm
Path - final path as s2.Polyline
*/
type MatcherResult struct {
Observations []*ObservationResult
Probability float64
Path s2.Polyline
}
// prepareResult Return MatcherResult for corresponding ViterbiPath, set of gps measurements and calculated routes' lengths
func (matcher *MapMatcher) prepareResult(vpath viterbi.ViterbiPath, gpsMeasurements GPSMeasurements, chRoutes map[int]map[int][]int64) MatcherResult {
result := MatcherResult{
Observations: make([]*ObservationResult, len(gpsMeasurements)),
Probability: vpath.Probability,
}
rpPath := make(RoadPositions, len(vpath.Path))
for i := range vpath.Path {
rpPath[i] = vpath.Path[i].(*RoadPosition)
}
result.Observations[0] = &ObservationResult{
gpsMeasurements[0],
*rpPath[0].GraphEdge,
}
result.Path = append(result.Path, *rpPath[0].GraphEdge.Polyline...)
for i := 1; i < len(rpPath); i++ {
previousState := rpPath[i-1]
currentState := rpPath[i]
if previousState.GraphEdge.ID == currentState.GraphEdge.ID {
result.Observations[i] = &ObservationResult{
gpsMeasurements[i],
*previousState.GraphEdge,
}
continue
}
path := chRoutes[previousState.RoadPositionID][currentState.RoadPositionID]
for e := 1; e < len(path); e++ {
sourceVertex := path[e-1]
targetVertex := path[e]
edge := matcher.engine.edges[sourceVertex][targetVertex]
result.Path = append(result.Path, *edge.Polyline...)
if e == len(path)-1 {
result.Observations[i] = &ObservationResult{
gpsMeasurements[i],
*edge,
}
}
}
}
return result
} | map_matcher_result.go | 0.653569 | 0.509276 | map_matcher_result.go | starcoder |
package elarr
import "reflect"
var LIndex = LIndexInter
// Get the index of the last item that is same as the given `item` parameter.
// If can't found same item, return -1.
func LIndexInter(v []interface{}, item interface{}) int {
l := len(v)
for i := l - 1; i >= 0; i-- {
if reflect.DeepEqual(v[i], item) {
return i
}
}
return -1
}
// Get the index of the last item that is same as the given `item` parameter.
// If can't found same item, return -1.
func LIndexStr(v []string, item string) int {
l := len(v)
for i := l - 1; i >= 0; i-- {
if v[i] == item {
return i
}
}
return -1
}
// Get the index of the last item that is same as the given `item` parameter.
// If can't found same item, return -1.
func LIndexInt(v []int, item int) int {
l := len(v)
for i := l - 1; i >= 0; i-- {
if v[i] == item {
return i
}
}
return -1
}
// Get the index of the last item that is same as the given `item` parameter.
// If can't found same item, return -1.
func LIndexInt8(v []int8, item int8) int {
l := len(v)
for i := l - 1; i >= 0; i-- {
if v[i] == item {
return i
}
}
return -1
}
// Get the index of the last item that is same as the given `item` parameter.
// If can't found same item, return -1.
func LIndexInt16(v []int16, item int16) int {
l := len(v)
for i := l - 1; i >= 0; i-- {
if v[i] == item {
return i
}
}
return -1
}
// Get the index of the last item that is same as the given `item` parameter.
// If can't found same item, return -1.
func LIndexInt32(v []int32, item int32) int {
l := len(v)
for i := l - 1; i >= 0; i-- {
if v[i] == item {
return i
}
}
return -1
}
// Get the index of the last item that is same as the given `item` parameter.
// If can't found same item, return -1.
func LIndexInt64(v []int64, item int64) int {
l := len(v)
for i := l - 1; i >= 0; i-- {
if v[i] == item {
return i
}
}
return -1
}
// Get the index of the last item that is same as the given `item` parameter.
// If can't found same item, return -1.
func LIndexUint(v []uint, item uint) int {
l := len(v)
for i := l - 1; i >= 0; i-- {
if v[i] == item {
return i
}
}
return -1
}
// Get the index of the last item that is same as the given `item` parameter.
// If can't found same item, return -1.
func LIndexUint8(v []uint8, item uint8) int {
l := len(v)
for i := l - 1; i >= 0; i-- {
if v[i] == item {
return i
}
}
return -1
}
// Get the index of the last item that is same as the given `item` parameter.
// If can't found same item, return -1.
func LIndexUint16(v []uint16, item uint16) int {
l := len(v)
for i := l - 1; i >= 0; i-- {
if v[i] == item {
return i
}
}
return -1
}
// Get the index of the last item that is same as the given `item` parameter.
// If can't found same item, return -1.
func LIndexUint32(v []uint32, item uint32) int {
l := len(v)
for i := l - 1; i >= 0; i-- {
if v[i] == item {
return i
}
}
return -1
}
// Get the index of the last item that is same as the given `item` parameter.
// If can't found same item, return -1.
func LIndexUint64(v []uint64, item uint64) int {
l := len(v)
for i := l - 1; i >= 0; i-- {
if v[i] == item {
return i
}
}
return -1
}
// Get the index of the last item that is same as the given `item` parameter.
// If can't found same item, return -1.
func LIndexRune(v []rune, item rune) int {
l := len(v)
for i := l - 1; i >= 0; i-- {
if v[i] == item {
return i
}
}
return -1
}
// Get the index of the last item that is same as the given `item` parameter.
// If can't found same item, return -1.
func LIndexByte(v []byte, item byte) int {
l := len(v)
for i := l - 1; i >= 0; i-- {
if v[i] == item {
return i
}
}
return -1
}
// Get the index of the last item that is same as the given `item` parameter.
// If can't found same item, return -1.
func LIndexFloat32(v []float32, item float32) int {
l := len(v)
for i := l - 1; i >= 0; i-- {
if v[i] == item {
return i
}
}
return -1
}
// Get the index of the last item that is same as the given `item` parameter.
// If can't found same item, return -1.
func LIndexFloat64(v []float64, item float64) int {
l := len(v)
for i := l - 1; i >= 0; i-- {
if v[i] == item {
return i
}
}
return -1
}
// Get the index of the last item that is same as the given `item` parameter.
// If can't found same item, return -1.
func LIndexBool(v []bool, item bool) int {
l := len(v)
for i := l - 1; i >= 0; i-- {
if v[i] == item {
return i
}
}
return -1
} | elarr/lindex.go | 0.631708 | 0.407216 | lindex.go | starcoder |
package matroska
import "honnef.co/go/xcapture/internal/matroska/ebml"
func Segment(c ...ebml.Object) ebml.Element { return ebml.Element{0x18538067, c} }
func SeekHead(c ...ebml.Object) ebml.Element { return ebml.Element{0x114D9B74, c} }
func Seek(c ...ebml.Object) ebml.Element { return ebml.Element{0x4DBB, c} }
func SeekID(c ...ebml.Object) ebml.Element { return ebml.Element{0x53AB, c} }
func SeekPosition(c ...ebml.Object) ebml.Element { return ebml.Element{0x53AC, c} }
func Info(c ...ebml.Object) ebml.Element { return ebml.Element{0x1549A966, c} }
func SegmentUID(c ...ebml.Object) ebml.Element { return ebml.Element{0x73A4, c} }
func SegmentFilename(c ...ebml.Object) ebml.Element { return ebml.Element{0x7384, c} }
func PrevUID(c ...ebml.Object) ebml.Element { return ebml.Element{0x3CB923, c} }
func PrevFilename(c ...ebml.Object) ebml.Element { return ebml.Element{0x3C83AB, c} }
func NextUID(c ...ebml.Object) ebml.Element { return ebml.Element{0x3EB923, c} }
func NextFilename(c ...ebml.Object) ebml.Element { return ebml.Element{0x3E83BB, c} }
func SegmentFamily(c ...ebml.Object) ebml.Element { return ebml.Element{0x4444, c} }
func ChapterTranslate(c ...ebml.Object) ebml.Element { return ebml.Element{0x6924, c} }
func ChapterTranslateEditionUID(c ...ebml.Object) ebml.Element { return ebml.Element{0x69FC, c} }
func ChapterTranslateCodec(c ...ebml.Object) ebml.Element { return ebml.Element{0x69BF, c} }
func ChapterTranslateID(c ...ebml.Object) ebml.Element { return ebml.Element{0x69A5, c} }
func TimecodeScale(c ...ebml.Object) ebml.Element { return ebml.Element{0x2AD7B1, c} }
func Duration(c ...ebml.Object) ebml.Element { return ebml.Element{0x4489, c} }
func DateUTC(c ...ebml.Object) ebml.Element { return ebml.Element{0x4461, c} }
func Title(c ...ebml.Object) ebml.Element { return ebml.Element{0x7BA9, c} }
func MuxingApp(c ...ebml.Object) ebml.Element { return ebml.Element{0x4D80, c} }
func WritingApp(c ...ebml.Object) ebml.Element { return ebml.Element{0x5741, c} }
func Cluster(c ...ebml.Object) ebml.Element { return ebml.Element{0x1F43B675, c} }
func Timecode(c ...ebml.Object) ebml.Element { return ebml.Element{0xE7, c} }
func SilentTracks(c ...ebml.Object) ebml.Element { return ebml.Element{0x5854, c} }
func SilentTrackNumber(c ...ebml.Object) ebml.Element { return ebml.Element{0x58D7, c} }
func Position(c ...ebml.Object) ebml.Element { return ebml.Element{0xA7, c} }
func PrevSize(c ...ebml.Object) ebml.Element { return ebml.Element{0xAB, c} }
func SimpleBlock(c ...ebml.Object) ebml.Element { return ebml.Element{0xA3, c} }
func BlockGroup(c ...ebml.Object) ebml.Element { return ebml.Element{0xA0, c} }
func Block(c ...ebml.Object) ebml.Element { return ebml.Element{0xA1, c} }
func BlockVirtual(c ...ebml.Object) ebml.Element { return ebml.Element{0xA2, c} }
func BlockAdditions(c ...ebml.Object) ebml.Element { return ebml.Element{0x75A1, c} }
func BlockMore(c ...ebml.Object) ebml.Element { return ebml.Element{0xA6, c} }
func BlockAddID(c ...ebml.Object) ebml.Element { return ebml.Element{0xEE, c} }
func BlockAdditional(c ...ebml.Object) ebml.Element { return ebml.Element{0xA5, c} }
func BlockDuration(c ...ebml.Object) ebml.Element { return ebml.Element{0x9B, c} }
func ReferencePriority(c ...ebml.Object) ebml.Element { return ebml.Element{0xFA, c} }
func ReferenceBlock(c ...ebml.Object) ebml.Element { return ebml.Element{0xFB, c} }
func ReferenceVirtual(c ...ebml.Object) ebml.Element { return ebml.Element{0xFD, c} }
func CodecState(c ...ebml.Object) ebml.Element { return ebml.Element{0xA4, c} }
func DiscardPadding(c ...ebml.Object) ebml.Element { return ebml.Element{0x75A2, c} }
func Slices(c ...ebml.Object) ebml.Element { return ebml.Element{0x8E, c} }
func TimeSlice(c ...ebml.Object) ebml.Element { return ebml.Element{0xE8, c} }
func LaceNumber(c ...ebml.Object) ebml.Element { return ebml.Element{0xCC, c} }
func FrameNumber(c ...ebml.Object) ebml.Element { return ebml.Element{0xCD, c} }
func BlockAdditionID(c ...ebml.Object) ebml.Element { return ebml.Element{0xCB, c} }
func Delay(c ...ebml.Object) ebml.Element { return ebml.Element{0xCE, c} }
func SliceDuration(c ...ebml.Object) ebml.Element { return ebml.Element{0xCF, c} }
func ReferenceFrame(c ...ebml.Object) ebml.Element { return ebml.Element{0xC8, c} }
func ReferenceOffset(c ...ebml.Object) ebml.Element { return ebml.Element{0xC9, c} }
func ReferenceTimeCode(c ...ebml.Object) ebml.Element { return ebml.Element{0xCA, c} }
func EncryptedBlock(c ...ebml.Object) ebml.Element { return ebml.Element{0xAF, c} }
func Tracks(c ...ebml.Object) ebml.Element { return ebml.Element{0x1654AE6B, c} }
func TrackEntry(c ...ebml.Object) ebml.Element { return ebml.Element{0xAE, c} }
func TrackNumber(c ...ebml.Object) ebml.Element { return ebml.Element{0xD7, c} }
func TrackUID(c ...ebml.Object) ebml.Element { return ebml.Element{0x73C5, c} }
func TrackType(c ...ebml.Object) ebml.Element { return ebml.Element{0x83, c} }
func FlagEnabled(c ...ebml.Object) ebml.Element { return ebml.Element{0xB9, c} }
func FlagDefault(c ...ebml.Object) ebml.Element { return ebml.Element{0x88, c} }
func FlagForced(c ...ebml.Object) ebml.Element { return ebml.Element{0x55AA, c} }
func FlagLacing(c ...ebml.Object) ebml.Element { return ebml.Element{0x9C, c} }
func MinCache(c ...ebml.Object) ebml.Element { return ebml.Element{0x6DE7, c} }
func MaxCache(c ...ebml.Object) ebml.Element { return ebml.Element{0x6DF8, c} }
func DefaultDuration(c ...ebml.Object) ebml.Element { return ebml.Element{0x23E383, c} }
func DefaultDecodedFieldDuration(c ...ebml.Object) ebml.Element { return ebml.Element{0x234E7A, c} }
func TrackTimecodeScale(c ...ebml.Object) ebml.Element { return ebml.Element{0x23314F, c} }
func TrackOffset(c ...ebml.Object) ebml.Element { return ebml.Element{0x537F, c} }
func MaxBlockAdditionID(c ...ebml.Object) ebml.Element { return ebml.Element{0x55EE, c} }
func Name(c ...ebml.Object) ebml.Element { return ebml.Element{0x536E, c} }
func Language(c ...ebml.Object) ebml.Element { return ebml.Element{0x22B59C, c} }
func CodecID(c ...ebml.Object) ebml.Element { return ebml.Element{0x86, c} }
func CodecPrivate(c ...ebml.Object) ebml.Element { return ebml.Element{0x63A2, c} }
func CodecName(c ...ebml.Object) ebml.Element { return ebml.Element{0x258688, c} }
func AttachmentLink(c ...ebml.Object) ebml.Element { return ebml.Element{0x7446, c} }
func CodecSettings(c ...ebml.Object) ebml.Element { return ebml.Element{0x3A9697, c} }
func CodecInfoURL(c ...ebml.Object) ebml.Element { return ebml.Element{0x3B4040, c} }
func CodecDownloadURL(c ...ebml.Object) ebml.Element { return ebml.Element{0x26B240, c} }
func CodecDecodeAll(c ...ebml.Object) ebml.Element { return ebml.Element{0xAA, c} }
func TrackOverlay(c ...ebml.Object) ebml.Element { return ebml.Element{0x6FAB, c} }
func CodecDelay(c ...ebml.Object) ebml.Element { return ebml.Element{0x56AA, c} }
func SeekPreRoll(c ...ebml.Object) ebml.Element { return ebml.Element{0x56BB, c} }
func TrackTranslate(c ...ebml.Object) ebml.Element { return ebml.Element{0x6624, c} }
func TrackTranslateEditionUID(c ...ebml.Object) ebml.Element { return ebml.Element{0x66FC, c} }
func TrackTranslateCodec(c ...ebml.Object) ebml.Element { return ebml.Element{0x66BF, c} }
func TrackTranslateTrackID(c ...ebml.Object) ebml.Element { return ebml.Element{0x66A5, c} }
func Video(c ...ebml.Object) ebml.Element { return ebml.Element{0xE0, c} }
func FlagInterlaced(c ...ebml.Object) ebml.Element { return ebml.Element{0x9A, c} }
func FieldOrder(c ...ebml.Object) ebml.Element { return ebml.Element{0x9D, c} }
func StereoMode(c ...ebml.Object) ebml.Element { return ebml.Element{0x53B8, c} }
func AlphaMode(c ...ebml.Object) ebml.Element { return ebml.Element{0x53C0, c} }
func OldStereoMode(c ...ebml.Object) ebml.Element { return ebml.Element{0x53B9, c} }
func PixelWidth(c ...ebml.Object) ebml.Element { return ebml.Element{0xB0, c} }
func PixelHeight(c ...ebml.Object) ebml.Element { return ebml.Element{0xBA, c} }
func PixelCropBottom(c ...ebml.Object) ebml.Element { return ebml.Element{0x54AA, c} }
func PixelCropTop(c ...ebml.Object) ebml.Element { return ebml.Element{0x54BB, c} }
func PixelCropLeft(c ...ebml.Object) ebml.Element { return ebml.Element{0x54CC, c} }
func PixelCropRight(c ...ebml.Object) ebml.Element { return ebml.Element{0x54DD, c} }
func DisplayWidth(c ...ebml.Object) ebml.Element { return ebml.Element{0x54B0, c} }
func DisplayHeight(c ...ebml.Object) ebml.Element { return ebml.Element{0x54BA, c} }
func DisplayUnit(c ...ebml.Object) ebml.Element { return ebml.Element{0x54B2, c} }
func AspectRatioType(c ...ebml.Object) ebml.Element { return ebml.Element{0x54B3, c} }
func ColourSpace(c ...ebml.Object) ebml.Element { return ebml.Element{0x2EB524, c} }
func GammaValue(c ...ebml.Object) ebml.Element { return ebml.Element{0x2FB523, c} }
func FrameRate(c ...ebml.Object) ebml.Element { return ebml.Element{0x2383E3, c} }
func Colour(c ...ebml.Object) ebml.Element { return ebml.Element{0x55B0, c} }
func MatrixCoefficients(c ...ebml.Object) ebml.Element { return ebml.Element{0x55B1, c} }
func BitsPerChannel(c ...ebml.Object) ebml.Element { return ebml.Element{0x55B2, c} }
func ChromaSubsamplingHorz(c ...ebml.Object) ebml.Element { return ebml.Element{0x55B3, c} }
func ChromaSubsamplingVert(c ...ebml.Object) ebml.Element { return ebml.Element{0x55B4, c} }
func CbSubsamplingHorz(c ...ebml.Object) ebml.Element { return ebml.Element{0x55B5, c} }
func CbSubsamplingVert(c ...ebml.Object) ebml.Element { return ebml.Element{0x55B6, c} }
func ChromaSitingHorz(c ...ebml.Object) ebml.Element { return ebml.Element{0x55B7, c} }
func ChromaSitingVert(c ...ebml.Object) ebml.Element { return ebml.Element{0x55B8, c} }
func Range(c ...ebml.Object) ebml.Element { return ebml.Element{0x55B9, c} }
func TransferCharacteristics(c ...ebml.Object) ebml.Element { return ebml.Element{0x55BA, c} }
func Primaries(c ...ebml.Object) ebml.Element { return ebml.Element{0x55BB, c} }
func MaxCLL(c ...ebml.Object) ebml.Element { return ebml.Element{0x55BC, c} }
func MaxFALL(c ...ebml.Object) ebml.Element { return ebml.Element{0x55BD, c} }
func MasteringMetadata(c ...ebml.Object) ebml.Element { return ebml.Element{0x55D0, c} }
func PrimaryRChromaticityX(c ...ebml.Object) ebml.Element { return ebml.Element{0x55D1, c} }
func PrimaryRChromaticityY(c ...ebml.Object) ebml.Element { return ebml.Element{0x55D2, c} }
func PrimaryGChromaticityX(c ...ebml.Object) ebml.Element { return ebml.Element{0x55D3, c} }
func PrimaryGChromaticityY(c ...ebml.Object) ebml.Element { return ebml.Element{0x55D4, c} }
func PrimaryBChromaticityX(c ...ebml.Object) ebml.Element { return ebml.Element{0x55D5, c} }
func PrimaryBChromaticityY(c ...ebml.Object) ebml.Element { return ebml.Element{0x55D6, c} }
func WhitePointChromaticityX(c ...ebml.Object) ebml.Element { return ebml.Element{0x55D7, c} }
func WhitePointChromaticityY(c ...ebml.Object) ebml.Element { return ebml.Element{0x55D8, c} }
func LuminanceMax(c ...ebml.Object) ebml.Element { return ebml.Element{0x55D9, c} }
func LuminanceMin(c ...ebml.Object) ebml.Element { return ebml.Element{0x55DA, c} }
func Audio(c ...ebml.Object) ebml.Element { return ebml.Element{0xE1, c} }
func SamplingFrequency(c ...ebml.Object) ebml.Element { return ebml.Element{0xB5, c} }
func OutputSamplingFrequency(c ...ebml.Object) ebml.Element { return ebml.Element{0x78B5, c} }
func Channels(c ...ebml.Object) ebml.Element { return ebml.Element{0x9F, c} }
func ChannelPositions(c ...ebml.Object) ebml.Element { return ebml.Element{0x7D7B, c} }
func BitDepth(c ...ebml.Object) ebml.Element { return ebml.Element{0x6264, c} }
func TrackOperation(c ...ebml.Object) ebml.Element { return ebml.Element{0xE2, c} }
func TrackCombinePlanes(c ...ebml.Object) ebml.Element { return ebml.Element{0xE3, c} }
func TrackPlane(c ...ebml.Object) ebml.Element { return ebml.Element{0xE4, c} }
func TrackPlaneUID(c ...ebml.Object) ebml.Element { return ebml.Element{0xE5, c} }
func TrackPlaneType(c ...ebml.Object) ebml.Element { return ebml.Element{0xE6, c} }
func TrackJoinBlocks(c ...ebml.Object) ebml.Element { return ebml.Element{0xE9, c} }
func TrackJoinUID(c ...ebml.Object) ebml.Element { return ebml.Element{0xED, c} }
func TrickTrackUID(c ...ebml.Object) ebml.Element { return ebml.Element{0xC0, c} }
func TrickTrackSegmentUID(c ...ebml.Object) ebml.Element { return ebml.Element{0xC1, c} }
func TrickTrackFlag(c ...ebml.Object) ebml.Element { return ebml.Element{0xC6, c} }
func TrickMasterTrackUID(c ...ebml.Object) ebml.Element { return ebml.Element{0xC7, c} }
func TrickMasterTrackSegmentUID(c ...ebml.Object) ebml.Element { return ebml.Element{0xC4, c} }
func ContentEncodings(c ...ebml.Object) ebml.Element { return ebml.Element{0x6D80, c} }
func ContentEncoding(c ...ebml.Object) ebml.Element { return ebml.Element{0x6240, c} }
func ContentEncodingOrder(c ...ebml.Object) ebml.Element { return ebml.Element{0x5031, c} }
func ContentEncodingScope(c ...ebml.Object) ebml.Element { return ebml.Element{0x5032, c} }
func ContentEncodingType(c ...ebml.Object) ebml.Element { return ebml.Element{0x5033, c} }
func ContentCompression(c ...ebml.Object) ebml.Element { return ebml.Element{0x5034, c} }
func ContentCompAlgo(c ...ebml.Object) ebml.Element { return ebml.Element{0x4254, c} }
func ContentCompSettings(c ...ebml.Object) ebml.Element { return ebml.Element{0x4255, c} }
func ContentEncryption(c ...ebml.Object) ebml.Element { return ebml.Element{0x5035, c} }
func ContentEncAlgo(c ...ebml.Object) ebml.Element { return ebml.Element{0x47E1, c} }
func ContentEncKeyID(c ...ebml.Object) ebml.Element { return ebml.Element{0x47E2, c} }
func ContentSignature(c ...ebml.Object) ebml.Element { return ebml.Element{0x47E3, c} }
func ContentSigKeyID(c ...ebml.Object) ebml.Element { return ebml.Element{0x47E4, c} }
func ContentSigAlgo(c ...ebml.Object) ebml.Element { return ebml.Element{0x47E5, c} }
func ContentSigHashAlgo(c ...ebml.Object) ebml.Element { return ebml.Element{0x47E6, c} }
func Cues(c ...ebml.Object) ebml.Element { return ebml.Element{0x1C53BB6B, c} }
func CuePoint(c ...ebml.Object) ebml.Element { return ebml.Element{0xBB, c} }
func CueTime(c ...ebml.Object) ebml.Element { return ebml.Element{0xB3, c} }
func CueTrackPositions(c ...ebml.Object) ebml.Element { return ebml.Element{0xB7, c} }
func CueTrack(c ...ebml.Object) ebml.Element { return ebml.Element{0xF7, c} }
func CueClusterPosition(c ...ebml.Object) ebml.Element { return ebml.Element{0xF1, c} }
func CueRelativePosition(c ...ebml.Object) ebml.Element { return ebml.Element{0xF0, c} }
func CueDuration(c ...ebml.Object) ebml.Element { return ebml.Element{0xB2, c} }
func CueBlockNumber(c ...ebml.Object) ebml.Element { return ebml.Element{0x5378, c} }
func CueCodecState(c ...ebml.Object) ebml.Element { return ebml.Element{0xEA, c} }
func CueReference(c ...ebml.Object) ebml.Element { return ebml.Element{0xDB, c} }
func CueRefTime(c ...ebml.Object) ebml.Element { return ebml.Element{0x96, c} }
func CueRefCluster(c ...ebml.Object) ebml.Element { return ebml.Element{0x97, c} }
func CueRefNumber(c ...ebml.Object) ebml.Element { return ebml.Element{0x535F, c} }
func CueRefCodecState(c ...ebml.Object) ebml.Element { return ebml.Element{0xEB, c} }
func Attachments(c ...ebml.Object) ebml.Element { return ebml.Element{0x1941A469, c} }
func AttachedFile(c ...ebml.Object) ebml.Element { return ebml.Element{0x61A7, c} }
func FileDescription(c ...ebml.Object) ebml.Element { return ebml.Element{0x467E, c} }
func FileName(c ...ebml.Object) ebml.Element { return ebml.Element{0x466E, c} }
func FileMimeType(c ...ebml.Object) ebml.Element { return ebml.Element{0x4660, c} }
func FileData(c ...ebml.Object) ebml.Element { return ebml.Element{0x465C, c} }
func FileUID(c ...ebml.Object) ebml.Element { return ebml.Element{0x46AE, c} }
func FileReferral(c ...ebml.Object) ebml.Element { return ebml.Element{0x4675, c} }
func FileUsedStartTime(c ...ebml.Object) ebml.Element { return ebml.Element{0x4661, c} }
func FileUsedEndTime(c ...ebml.Object) ebml.Element { return ebml.Element{0x4662, c} }
func Chapters(c ...ebml.Object) ebml.Element { return ebml.Element{0x1043A770, c} }
func EditionEntry(c ...ebml.Object) ebml.Element { return ebml.Element{0x45B9, c} }
func EditionUID(c ...ebml.Object) ebml.Element { return ebml.Element{0x45BC, c} }
func EditionFlagHidden(c ...ebml.Object) ebml.Element { return ebml.Element{0x45BD, c} }
func EditionFlagDefault(c ...ebml.Object) ebml.Element { return ebml.Element{0x45DB, c} }
func EditionFlagOrdered(c ...ebml.Object) ebml.Element { return ebml.Element{0x45DD, c} }
func ChapterAtom(c ...ebml.Object) ebml.Element { return ebml.Element{0xB6, c} }
func ChapterUID(c ...ebml.Object) ebml.Element { return ebml.Element{0x73C4, c} }
func ChapterStringUID(c ...ebml.Object) ebml.Element { return ebml.Element{0x5654, c} }
func ChapterTimeStart(c ...ebml.Object) ebml.Element { return ebml.Element{0x91, c} }
func ChapterTimeEnd(c ...ebml.Object) ebml.Element { return ebml.Element{0x92, c} }
func ChapterFlagHidden(c ...ebml.Object) ebml.Element { return ebml.Element{0x98, c} }
func ChapterFlagEnabled(c ...ebml.Object) ebml.Element { return ebml.Element{0x4598, c} }
func ChapterSegmentUID(c ...ebml.Object) ebml.Element { return ebml.Element{0x6E67, c} }
func ChapterSegmentEditionUID(c ...ebml.Object) ebml.Element { return ebml.Element{0x6EBC, c} }
func ChapterPhysicalEquiv(c ...ebml.Object) ebml.Element { return ebml.Element{0x63C3, c} }
func ChapterTrack(c ...ebml.Object) ebml.Element { return ebml.Element{0x8F, c} }
func ChapterTrackNumber(c ...ebml.Object) ebml.Element { return ebml.Element{0x89, c} }
func ChapterDisplay(c ...ebml.Object) ebml.Element { return ebml.Element{0x80, c} }
func ChapString(c ...ebml.Object) ebml.Element { return ebml.Element{0x85, c} }
func ChapLanguage(c ...ebml.Object) ebml.Element { return ebml.Element{0x437C, c} }
func ChapCountry(c ...ebml.Object) ebml.Element { return ebml.Element{0x437E, c} }
func ChapProcess(c ...ebml.Object) ebml.Element { return ebml.Element{0x6944, c} }
func ChapProcessCodecID(c ...ebml.Object) ebml.Element { return ebml.Element{0x6955, c} }
func ChapProcessPrivate(c ...ebml.Object) ebml.Element { return ebml.Element{0x450D, c} }
func ChapProcessCommand(c ...ebml.Object) ebml.Element { return ebml.Element{0x6911, c} }
func ChapProcessTime(c ...ebml.Object) ebml.Element { return ebml.Element{0x6922, c} }
func ChapProcessData(c ...ebml.Object) ebml.Element { return ebml.Element{0x6933, c} }
func Tags(c ...ebml.Object) ebml.Element { return ebml.Element{0x1254C367, c} }
func Tag(c ...ebml.Object) ebml.Element { return ebml.Element{0x7373, c} }
func Targets(c ...ebml.Object) ebml.Element { return ebml.Element{0x63C0, c} }
func TargetTypeValue(c ...ebml.Object) ebml.Element { return ebml.Element{0x68CA, c} }
func TargetType(c ...ebml.Object) ebml.Element { return ebml.Element{0x63CA, c} }
func TagTrackUID(c ...ebml.Object) ebml.Element { return ebml.Element{0x63C5, c} }
func TagEditionUID(c ...ebml.Object) ebml.Element { return ebml.Element{0x63C9, c} }
func TagChapterUID(c ...ebml.Object) ebml.Element { return ebml.Element{0x63C4, c} }
func TagAttachmentUID(c ...ebml.Object) ebml.Element { return ebml.Element{0x63C6, c} }
func SimpleTag(c ...ebml.Object) ebml.Element { return ebml.Element{0x67C8, c} }
func TagName(c ...ebml.Object) ebml.Element { return ebml.Element{0x45A3, c} }
func TagLanguage(c ...ebml.Object) ebml.Element { return ebml.Element{0x447A, c} }
func TagDefault(c ...ebml.Object) ebml.Element { return ebml.Element{0x4484, c} }
func TagString(c ...ebml.Object) ebml.Element { return ebml.Element{0x4487, c} }
func TagBinary(c ...ebml.Object) ebml.Element { return ebml.Element{0x4485, c} } | internal/matroska/ids.go | 0.569494 | 0.517571 | ids.go | starcoder |
package school
import (
"strings"
"unicode"
"github.com/jinzhu/gorm"
"github.com/freitzzz/iped/model/canteen"
"github.com/freitzzz/iped/model/customerror"
)
// School is a model that offers canteens
// A school has a unique acronym, a descriptive name and needs to offer at least one canteen
// A UML overview of this model can be found at https://github.com/freitzzz/iped-documentation/wiki/Architecture#models-structure
type School struct {
gorm.Model
Acronym string `gorm:"UNIQUE"`
Name string
CanteensSlice []canteen.Canteen `gorm:"column:canteens"`
}
// New initializes a school model using its acronym, name and canteens
// A FieldError is returned if the canteen acronym is empty or has spaces between letters,
// name is empty, no canteens were provided or if it was found a duplicated canteen
func New(Acronym string, Name string, Canteens []canteen.Canteen) (School, *customerror.FieldError) {
school := School{gorm.Model{}, Acronym, Name, Canteens}
err := grantSchoolAcronymIsNotEmpty(Acronym)
if err != nil {
return school, err
}
err = grantSchoolAcronymDoesNotHaveSpacesBetweenLetters(Acronym)
if err != nil {
return school, err
}
err = grantSchoolNameIsNotEmpty(Name)
if err != nil {
return school, err
}
err = grantAtLeastOneCanteenIsProvided(Canteens)
if err != nil {
return school, err
}
err = grantNoDuplicatedCanteensExist(Canteens)
if err != nil {
return school, err
}
return school, err
}
// Canteens returns the available canteens provided by a school as a slice
// The returned slice has different reference of the one in School struct
// In order to prevent modifications
func (school School) Canteens() []canteen.Canteen {
availableCanteens := make([]canteen.Canteen, len(school.CanteensSlice))
copy(availableCanteens, school.CanteensSlice)
return availableCanteens
}
// AddCanteen allows the addition of a new canteen to the already provided by the school
// An error is returned if the canteen being added already exists
func (school *School) AddCanteen(canteen canteen.Canteen) *customerror.FieldError {
var err *customerror.FieldError
schoolCanteens := school.Canteens()
schoolCanteens = append(schoolCanteens, canteen)
err = grantNoDuplicatedCanteensExist(schoolCanteens)
if err == nil {
school.CanteensSlice = schoolCanteens
}
return err
}
// This function grants that a school acronym is not empty, and if empty returns a FieldError
func grantSchoolAcronymIsNotEmpty(acronym string) *customerror.FieldError {
var err *customerror.FieldError
if len(strings.TrimSpace(acronym)) == 0 {
err = &customerror.FieldError{Field: "acronym", Model: "school", Explanation: "school acronym cannot be an empty string"}
}
return err
}
// This function grants that a school acronym does not have spaces between letters, or else returns a FieldError
func grantSchoolAcronymDoesNotHaveSpacesBetweenLetters(acronym string) *customerror.FieldError {
var err *customerror.FieldError
acronymLength := len(acronym)
acronymLength--
for acronymLength >= 0 {
if unicode.IsSpace(rune(acronym[acronymLength])) {
acronymLength = -2
} else {
acronymLength--
}
}
if acronymLength == -2 {
err = &customerror.FieldError{Field: "acronym", Model: "school", Explanation: "school acronym cannot have spaces between letters"}
}
return err
}
// This function grants that a school name is not empty, and if empty returns a FieldError
func grantSchoolNameIsNotEmpty(name string) *customerror.FieldError {
var err *customerror.FieldError
if len(strings.TrimSpace(name)) == 0 {
err = &customerror.FieldError{Field: "name", Model: "school", Explanation: "school name cannot be an empty string"}
}
return err
}
// This function grants that at least one canteen is provided in given canteen slice
// If the given canteen slice is nil or empty a FieldError is returned
func grantAtLeastOneCanteenIsProvided(canteens []canteen.Canteen) *customerror.FieldError {
var err *customerror.FieldError
if canteens == nil || len(canteens) == 0 {
err = &customerror.FieldError{Field: "canteens", Model: "school", Explanation: "school requires at least one canteen"}
}
return err
}
// This function grants that all canteen given in a slice are unique
// If a canteen proves equality to any other canteen in the slice, a FieldError is returned
func grantNoDuplicatedCanteensExist(canteens []canteen.Canteen) *customerror.FieldError {
var err *customerror.FieldError
unique := true
canteensLength := len(canteens)
i := 0
for i < canteensLength {
j := i + 1
for j < canteensLength {
unique = !canteens[i].Equals(canteens[j])
j++
if !unique {
i = canteensLength
j = i
}
}
i++
}
if !unique {
err = &customerror.FieldError{Field: "canteens", Model: "school", Explanation: "school cannot have canteens with the same name"}
}
return err
} | model/school/school.go | 0.697094 | 0.412294 | school.go | starcoder |
package announcements
// Kind is used to record the kind of announcement
type Kind string
func (at Kind) String() string {
return string(at)
}
const (
// ProxyUpdate is the event kind used to trigger an update to subscribed proxies
ProxyUpdate Kind = "proxy-update"
// PodAdded is the type of announcement emitted when we observe an addition of a Kubernetes Pod
PodAdded Kind = "pod-added"
// PodDeleted the type of announcement emitted when we observe the deletion of a Kubernetes Pod
PodDeleted Kind = "pod-deleted"
// PodUpdated is the type of announcement emitted when we observe an update to a Kubernetes Pod
PodUpdated Kind = "pod-updated"
// ---
// EndpointAdded is the type of announcement emitted when we observe an addition of a Kubernetes Endpoint
EndpointAdded Kind = "endpoint-added"
// EndpointDeleted the type of announcement emitted when we observe the deletion of a Kubernetes Endpoint
EndpointDeleted Kind = "endpoint-deleted"
// EndpointUpdated is the type of announcement emitted when we observe an update to a Kubernetes Endpoint
EndpointUpdated Kind = "endpoint-updated"
// ---
// NamespaceAdded is the type of announcement emitted when we observe an addition of a Kubernetes Namespace
NamespaceAdded Kind = "namespace-added"
// NamespaceDeleted the type of announcement emitted when we observe the deletion of a Kubernetes Namespace
NamespaceDeleted Kind = "namespace-deleted"
// NamespaceUpdated is the type of announcement emitted when we observe an update to a Kubernetes Namespace
NamespaceUpdated Kind = "namespace-updated"
// ---
// ServiceAdded is the type of announcement emitted when we observe an addition of a Kubernetes Service
ServiceAdded Kind = "service-added"
// ServiceDeleted the type of announcement emitted when we observe the deletion of a Kubernetes Service
ServiceDeleted Kind = "service-deleted"
// ServiceUpdated is the type of announcement emitted when we observe an update to a Kubernetes Service
ServiceUpdated Kind = "service-updated"
// ---
// ServiceAccountAdded is the type of announcement emitted when we observe an addition of a Kubernetes Service Account
ServiceAccountAdded Kind = "serviceaccount-added"
// ServiceAccountDeleted the type of announcement emitted when we observe the deletion of a Kubernetes Service Account
ServiceAccountDeleted Kind = "serviceaccount-deleted"
// ServiceAccountUpdated is the type of announcement emitted when we observe an update to a Kubernetes Service
ServiceAccountUpdated Kind = "serviceaccount-updated"
// ---
// TrafficSplitAdded is the type of announcement emitted when we observe an addition of a Kubernetes TrafficSplit
TrafficSplitAdded Kind = "trafficsplit-added"
// TrafficSplitDeleted the type of announcement emitted when we observe the deletion of a Kubernetes TrafficSplit
TrafficSplitDeleted Kind = "trafficsplit-deleted"
// TrafficSplitUpdated is the type of announcement emitted when we observe an update to a Kubernetes TrafficSplit
TrafficSplitUpdated Kind = "trafficsplit-updated"
// ---
// RouteGroupAdded is the type of announcement emitted when we observe an addition of a Kubernetes RouteGroup
RouteGroupAdded Kind = "routegroup-added"
// RouteGroupDeleted the type of announcement emitted when we observe the deletion of a Kubernetes RouteGroup
RouteGroupDeleted Kind = "routegroup-deleted"
// RouteGroupUpdated is the type of announcement emitted when we observe an update to a Kubernetes RouteGroup
RouteGroupUpdated Kind = "routegroup-updated"
// ---
// TCPRouteAdded is the type of announcement emitted when we observe an addition of a Kubernetes TCPRoute
TCPRouteAdded Kind = "tcproute-added"
// TCPRouteDeleted the type of announcement emitted when we observe the deletion of a Kubernetes TCPRoute
TCPRouteDeleted Kind = "tcproute-deleted"
// TCPRouteUpdated is the type of announcement emitted when we observe an update to a Kubernetes TCPRoute
TCPRouteUpdated Kind = "tcproute-updated"
// ---
// TrafficTargetAdded is the type of announcement emitted when we observe an addition of a Kubernetes TrafficTarget
TrafficTargetAdded Kind = "traffictarget-added"
// TrafficTargetDeleted the type of announcement emitted when we observe the deletion of a Kubernetes TrafficTarget
TrafficTargetDeleted Kind = "traffictarget-deleted"
// TrafficTargetUpdated is the type of announcement emitted when we observe an update to a Kubernetes TrafficTarget
TrafficTargetUpdated Kind = "traffictarget-updated"
// ---
// IngressAdded is the type of announcement emitted when we observe an addition of a Kubernetes Ingress
IngressAdded Kind = "ingress-added"
// IngressDeleted the type of announcement emitted when we observe the deletion of a Kubernetes Ingress
IngressDeleted Kind = "ingress-deleted"
// IngressUpdated is the type of announcement emitted when we observe an update to a Kubernetes Ingress
IngressUpdated Kind = "ingress-updated"
// ---
// CertificateRotated is the type of announcement emitted when a certificate is rotated by the certificate provider
CertificateRotated Kind = "certificate-rotated"
// ---
// MeshConfigAdded is the type of announcement emitted when we observe an addition of a Kubernetes MeshConfig
MeshConfigAdded Kind = "meshconfig-added"
// MeshConfigDeleted the type of announcement emitted when we observe the deletion of a Kubernetes MeshConfig
MeshConfigDeleted Kind = "meshconfig-deleted"
// MeshConfigUpdated is the type of announcement emitted when we observe an update to a Kubernetes MeshConfig
MeshConfigUpdated Kind = "meshconfig-updated"
// --- policy.openservicemesh.io API events
// EgressAdded is the type of announcement emitted when we observe an addition of egresses.policy.openservicemesh.io
EgressAdded Kind = "egress-added"
// EgressDeleted the type of announcement emitted when we observe a deletion of egresses.policy.openservicemesh.io
EgressDeleted Kind = "egress-deleted"
// EgressUpdated is the type of announcement emitted when we observe an update to egresses.policy.openservicemesh.io
EgressUpdated Kind = "egress-updated"
// IngressBackendAdded is the type of announcement emitted when we observe an addition of ingressbackends.policy.openservicemesh.io
IngressBackendAdded Kind = "ingressbackend-added"
// IngressBackendDeleted the type of announcement emitted when we observe a deletion of ingressbackends.policy.openservicemesh.io
IngressBackendDeleted Kind = "ingressbackend-deleted"
// IngressBackendUpdated is the type of announcement emitted when we observe an update to ingressbackends.policy.openservicemesh.io
IngressBackendUpdated Kind = "ingressbackend-updated"
// RetryPolicyAdded is the type of announcement emitted when we observe an addition of retries.policy.openservicemesh.io
RetryPolicyAdded Kind = "retry-added"
// RetryPolicyDeleted the type of announcement emitted when we observe a deletion of retries.policy.openservicemesh.io
RetryPolicyDeleted Kind = "retry-deleted"
// RetryPolicyUpdated is the type of announcement emitted when we observe an update to retries.policy.openservicemesh.io
RetryPolicyUpdated Kind = "retry-updated"
// UpstreamTrafficSettingAdded is the type of announcement emitted when we observe an addition of upstreamtrafficsettings.policy.openservicemesh.io
UpstreamTrafficSettingAdded Kind = "upstreamtrafficsetting-added"
// UpstreamTrafficSettingDeleted is the type of announcement emitted when we observe a deletion of upstreamtrafficsettings.policy.openservicemesh.io
UpstreamTrafficSettingDeleted Kind = "upstreamtrafficsetting-deleted"
// UpstreamTrafficSettingUpdated is the type of announcement emitted when we observe an update of upstreamtrafficsettings.policy.openservicemesh.io
UpstreamTrafficSettingUpdated Kind = "upstreamtrafficsetting-updated"
// ---
// MultiClusterServiceAdded is the type of announcement emitted when we observe an addition of a multiclusterservice.config.openservicemesh.io
MultiClusterServiceAdded Kind = "multiclusterservice-added"
// MultiClusterServiceDeleted is the type of announcement emitted when we observe an deletion of a multiclusterservice.config.openservicemesh.io
MultiClusterServiceDeleted Kind = "multiclusterservice-deleted"
// MultiClusterServiceUpdated is the type of announcement emitted when we observe an update of a multiclusterservice.config.openservicemesh.io
MultiClusterServiceUpdated Kind = "multiclusterservice-updated"
)
// Announcement is a struct for messages between various components of OSM signaling a need for a change in Sidecar proxy configuration
type Announcement struct {
Type Kind
ReferencedObjectID interface{}
} | pkg/announcements/types.go | 0.690663 | 0.578567 | types.go | starcoder |
package json
import (
"fmt"
"gopkg.in/mgo.v2/bson"
"reflect"
)
// Represents base-64 encoded binary data
type BinData struct {
Type byte
Base64 string
}
// Represents the number of milliseconds since the Unix epoch.
type Date int64
type ISODate string
type ObjectId string
// Represents a reference to another document.
type DBRef struct {
Collection string
Id interface{}
Database string // optional
}
// Refers to a document in some namespace by wrapping a string containing the namespace
// and the objectId in which the _id of the document is contained
type DBPointer struct {
Namespace string
Id bson.ObjectId
}
// Represents the literal MinKey.
type MinKey struct{}
// Represents the literal MaxKey.
type MaxKey struct{}
// Represents a signed 32-bit integer.
type NumberInt int32
// Represents a signed 64-bit integer.
type NumberLong int64
// Represents a signed 64-bit float.
type NumberFloat float64
type Decimal128 struct {
bson.Decimal128
}
// Represents a regular expression.
type RegExp struct {
Pattern string
Options string
}
// Represents a timestamp value.
type Timestamp struct {
Seconds uint32
Increment uint32
}
type JavaScript struct {
Code string
Scope interface{}
}
type Float float64
// Represents the literal undefined.
type Undefined struct{}
var (
// primitive types
byteType = reflect.TypeOf(byte(0))
stringType = reflect.TypeOf(string(""))
uint32Type = reflect.TypeOf(uint32(0))
// object types
binDataType = reflect.TypeOf(BinData{})
dateType = reflect.TypeOf(Date(0))
isoDateType = reflect.TypeOf(ISODate(""))
dbRefType = reflect.TypeOf(DBRef{})
dbPointerType = reflect.TypeOf(DBPointer{})
maxKeyType = reflect.TypeOf(MaxKey{})
minKeyType = reflect.TypeOf(MinKey{})
numberIntType = reflect.TypeOf(NumberInt(0))
numberLongType = reflect.TypeOf(NumberLong(0))
numberFloatType = reflect.TypeOf(NumberFloat(0))
objectIdType = reflect.TypeOf(ObjectId(""))
regexpType = reflect.TypeOf(RegExp{})
timestampType = reflect.TypeOf(Timestamp{})
undefinedType = reflect.TypeOf(Undefined{})
orderedBSONType = reflect.TypeOf(bson.D{})
interfaceType = reflect.TypeOf((*interface{})(nil))
)
func (d Date) isFormatable() bool {
return int64(d) < int64(32535215999000)
}
func stateBeginExtendedValue(s *scanner, c int) int {
switch c {
case 'u': // beginning of undefined
s.step = stateU
case 'B': // beginning of BinData or Boolean
s.step = stateB
case 'D': // beginning of Date
s.step = stateD
case 'I': // beginning of Infinity or ISODate
s.step = stateI
case 'M': // beginning of MinKey or MaxKey
s.step = stateM
case 'N': // beginning of NaN or NumberXX
s.step = stateUpperN
case 'O': // beginning of ObjectId
s.step = stateO
case 'R': // beginning of RegExp
s.step = stateR
case 'T': // beginning of Timestamp
s.step = stateUpperT
case '/': // beginning of /foo/i
s.step = stateInRegexpPattern
default:
return s.error(c, "looking for beginning of value")
}
return scanBeginLiteral
}
// stateB is the state after reading `B`.
func stateB(s *scanner, c int) int {
if c == 'i' {
s.step = stateBi
return scanContinue
}
if c == 'o' {
s.step = stateBo
return scanContinue
}
return s.error(c, "in literal BinData or Boolean (expecting 'i' or 'o')")
}
// stateUpperN is the state after reading `N`.
func stateUpperN(s *scanner, c int) int {
if c == 'a' {
s.step = stateUpperNa
return scanContinue
}
if c == 'u' {
s.step = stateUpperNu
return scanContinue
}
return s.error(c, "in literal NaN or Number (expecting 'a' or 'u')")
}
// stateM is the state after reading `M`.
func stateM(s *scanner, c int) int {
if c == 'a' {
s.step = stateUpperMa
return scanContinue
}
if c == 'i' {
s.step = stateUpperMi
return scanContinue
}
return s.error(c, "in literal MaxKey or MinKey (expecting 'a' or 'i')")
}
// stateD is the state after reading `D`.
func stateD(s *scanner, c int) int {
switch c {
case 'a':
s.step = stateDa
case 'B':
s.step = stateDB
case 'b':
s.step = stateDb
default:
return s.error(c, "in literal Date or DBRef (expecting 'a' or 'B')")
}
return scanContinue
}
// stateDB is the state after reading `DB`.
func stateDB(s *scanner, c int) int {
if c == 'R' {
s.step = stateDBR
return scanContinue
}
if c == 'P' {
s.step = stateDBP
return scanContinue
}
return s.error(c, "in state DB (expecting 'R or P')")
}
// stateI is the state after reading `I`.
func stateI(s *scanner, c int) int {
switch c {
case 'n':
s.step = stateIn
case 'S':
s.step = stateIS
default:
return s.error(c, "in literal Infinity or ISO (expecting 'n' or 'S')")
}
return scanContinue
}
// Decodes a literal stored in item into v.
func (d *decodeState) storeExtendedLiteral(item []byte, v reflect.Value, fromQuoted bool) bool {
switch c := item[0]; c {
case 'n':
d.storeNewLiteral(v, fromQuoted)
case 'u': // undefined
switch kind := v.Kind(); kind {
case reflect.Interface:
v.Set(reflect.ValueOf(Undefined{}))
default:
d.error(fmt.Errorf("cannot store %v value into %v type", undefinedType, kind))
}
case 'B': // BinData or Boolean
switch item[1] {
case 'i': // BinData
d.storeBinData(v)
case 'o': // Boolean
d.storeBoolean(v)
}
case 'D': // Date, DBRef, DBPointer, Dbpointer,or Dbref
switch item[1] {
case 'a': // Date
d.storeDate(v)
case 'b': // Dbref
d.storeDBRef(v)
case 'B': // DBRef or DBPointer
switch item[2] {
case 'R': //DBRef
d.storeDBRef(v)
case 'P': //DBPointer
d.storeDBPointer(v)
}
}
case 'I':
switch item[1] {
case 'S': // ISODate
d.storeISODate(v)
}
case 'M': // MinKey or MaxKey
switch item[1] {
case 'i': // MinKey
switch kind := v.Kind(); kind {
case reflect.Interface:
v.Set(reflect.ValueOf(MinKey{}))
default:
d.error(fmt.Errorf("cannot store %v value into %v type", minKeyType, kind))
}
case 'a': // MaxKey
switch kind := v.Kind(); kind {
case reflect.Interface:
v.Set(reflect.ValueOf(MaxKey{}))
default:
d.error(fmt.Errorf("cannot store %v value into %v type", maxKeyType, kind))
}
}
case 'O': // ObjectId
d.storeObjectId(v)
case 'N': // NumberInt or NumberLong
switch item[6] {
case 'I': // NumberInt
d.storeNumberInt(v)
case 'L': // NumberLong
d.storeNumberLong(v)
}
case 'R': // RegExp constructor
d.storeRegexp(v)
case 'T': // Timestamp
d.storeTimestamp(v)
case '/': // regular expression literal
op := d.scanWhile(scanSkipSpace)
if op != scanRegexpPattern {
d.error(fmt.Errorf("expected beginning of regular expression pattern"))
}
pattern, options, err := d.regexp()
if err != nil {
d.error(err)
}
switch kind := v.Kind(); kind {
case reflect.Interface:
v.Set(reflect.ValueOf(RegExp{pattern, options}))
default:
d.error(fmt.Errorf("cannot store %v value into %v type", regexpType, kind))
}
default:
return false
}
return true
}
// Returns a literal from the underlying byte data.
func (d *decodeState) getExtendedLiteral(item []byte) (interface{}, bool) {
switch c := item[0]; c {
case 'n':
return d.getNewLiteral(), true
case 'u': // undefined
return Undefined{}, true
case 'B': // BinData or Boolean
switch item[1] {
case 'i': // BinData
return d.getBinData(), true
case 'o': // Boolean
return d.getBoolean(), true
}
case 'D': // Date, DBRef, or Dbref
switch item[1] {
case 'a': // Date
return d.getDate(), true
case 'b': // Dbref
return d.getDBRef(), true
case 'B': // DBRef or DBPoiner
switch item[2] {
case 'R': // DBRef
return d.getDBRef(), true
case 'P': // DBPointer
return d.getDBPointer(), true
}
}
case 'M': // MinKey or MaxKey
switch item[1] {
case 'i': // MinKey
return MinKey{}, true
case 'a': // MaxKey
return MaxKey{}, true
}
case 'O': // ObjectId
return d.getObjectId(), true
case 'N': // NumberInt or NumberLong
switch item[6] {
case 'I': // NumberInt
return d.getNumberInt(), true
case 'L': // NumberLong
return d.getNumberLong(), true
}
case 'R': // RegExp constructor
return d.getRegexp(), true
case 'T': // Timestamp
return d.getTimestamp(), true
case 'I': // ISO Date
switch item[1] {
case 'S': // ISODate
return d.getDate(), true
}
case '/': // regular expression literal
op := d.scanWhile(scanSkipSpace)
if op != scanRegexpPattern {
d.error(fmt.Errorf("expected beginning of regular expression pattern"))
}
pattern, options, err := d.regexp()
if err != nil {
d.error(err)
}
return RegExp{pattern, options}, true
}
return nil, false
} | src/mongo/gotools/common/json/mongo_extjson.go | 0.721449 | 0.441793 | mongo_extjson.go | starcoder |
package data
import (
"crypto/md5"
"encoding/binary"
"fmt"
"time"
"github.com/golang/protobuf/ptypes"
"github.com/simpleiot/simpleiot/internal/pb"
"google.golang.org/protobuf/proto"
)
// Point is a flexible data structure that can be used to represent
// a sensor value or a configuration parameter.
// ID, Type, and Index uniquely identify a point in a device
type Point struct {
//-------------------------------------------------------
//1st three fields uniquely identify a point when receiving updates
// Type of point (voltage, current, key, etc)
Type string `json:"type,omitempty"`
// Key is used to allow a group of points to represent a "map"
Key string `json:"key,omitempty"`
//-------------------------------------------------------
// The following fields are the values for a point
// Time the point was taken
Time time.Time `json:"time,omitempty"`
// Index is used to specify a position in an array such as
// which pump, temp sensor, etc.
Index float64 `json:"index,omitempty"`
// Instantaneous analog or digital value of the point.
// 0 and 1 are used to represent digital values
Value float64 `json:"value,omitempty"`
// Optional text value of the point for data that is best represented
// as a string rather than a number.
Text string `json:"text,omitempty"`
// catchall field for data that does not fit into float or string --
// should be used sparingly
Data []byte `json:"data,omitempty"`
//-------------------------------------------------------
// Metadata
// Used to indicate a point has been deleted. This value is only
// ever incremented. Odd values mean point is deleted.
Tombstone int `json:"tombstone,omitempty"`
}
func (p Point) String() string {
t := ""
if p.Type != "" {
t += "T:" + p.Type + " "
}
if p.Text != "" {
t += fmt.Sprintf("V:%v ", p.Text)
} else {
t += fmt.Sprintf("V:%.3f ", p.Value)
}
if p.Index != 0 {
t += fmt.Sprintf("I:%v ", p.Index)
}
t += p.Time.Format(time.RFC3339)
return t
}
// IsMatch returns true if the point matches the params passed in
func (p Point) IsMatch(typ, key string) bool {
if typ != "" && typ != p.Type {
return false
}
if key != p.Key {
return false
}
return true
}
// ToPb encodes point in protobuf format
func (p Point) ToPb() (pb.Point, error) {
ts, err := ptypes.TimestampProto(p.Time)
if err != nil {
return pb.Point{}, err
}
return pb.Point{
Type: p.Type,
Index: float32(p.Index),
Key: p.Key,
Value: float32(p.Value),
Text: p.Text,
Time: ts,
Tombstone: int32(p.Tombstone),
}, nil
}
// Bool returns a bool representation of value
func (p *Point) Bool() bool {
if p.Value == 0 {
return false
}
return true
}
// Points is an array of Point
type Points []Point
// Desc returns a Description of a set of points
func (ps Points) Desc() string {
firstName, _ := ps.Text(PointTypeFirstName, "")
if firstName != "" {
lastName, _ := ps.Text(PointTypeLastName, "")
if lastName == "" {
return firstName
}
return firstName + " " + lastName
}
desc, _ := ps.Text(PointTypeDescription, "")
if desc != "" {
return desc
}
return ""
}
// Find fetches a point given ID, Type, and Index
// and true of found, or false if not found
func (ps *Points) Find(typ, key string) (Point, bool) {
for _, p := range *ps {
if !p.IsMatch(typ, key) {
continue
}
return p, true
}
return Point{}, false
}
// Value fetches a value from an array of points given ID, Type, and Index.
// If ID or Type are set to "", they are ignored.
func (ps *Points) Value(typ, key string) (float64, bool) {
p, ok := ps.Find(typ, key)
return p.Value, ok
}
// ValueInt returns value as integer
func (ps *Points) ValueInt(typ, key string) (int, bool) {
f, ok := ps.Value(typ, key)
return int(f), ok
}
// ValueBool returns value as bool
func (ps *Points) ValueBool(typ, key string) (bool, bool) {
f, ok := ps.Value(typ, key)
return FloatToBool(f), ok
}
// Text fetches a text value from an array of points given ID, Type, and Index.
// If ID or Type are set to "", they are ignored.
func (ps *Points) Text(typ, key string) (string, bool) {
p, ok := ps.Find(typ, key)
return p.Text, ok
}
// LatestTime returns the latest timestamp of a devices points
func (ps *Points) LatestTime() time.Time {
ret := time.Time{}
for _, p := range *ps {
if p.Time.After(ret) {
ret = p.Time
}
}
return ret
}
// ToPb encodes an array of points into protobuf
func (ps *Points) ToPb() ([]byte, error) {
pbPoints := make([]*pb.Point, len(*ps))
for i, s := range *ps {
sPb, err := s.ToPb()
if err != nil {
return []byte{}, err
}
pbPoints[i] = &sPb
}
return proto.Marshal(&pb.Points{Points: pbPoints})
}
// question -- should be using []*Point instead of []Point?
// Hash returns the hash of points
func (ps *Points) Hash() []byte {
h := md5.New()
for _, p := range *ps {
d := make([]byte, 8)
binary.LittleEndian.PutUint64(d, uint64(p.Time.UnixNano()))
h.Write(d)
}
return h.Sum(nil)
}
// Add takes a point and updates an existing array of points. Existing points
// are replaced if the Timestamp in pIn is > than the existing timestamp. If
// the pIn timestamp is zero, the current time is used.
func (ps *Points) Add(pIn Point) {
pFound := false
if pIn.Time.IsZero() {
pIn.Time = time.Now()
}
for i, p := range *ps {
if p.Key == pIn.Key && p.Type == pIn.Type {
pFound = true
// largest tombstone value always wins
tombstone := p.Tombstone
if pIn.Tombstone > p.Tombstone {
tombstone = pIn.Tombstone
}
if pIn.Time.After(p.Time) {
(*ps)[i] = pIn
}
(*ps)[i].Tombstone = tombstone
}
}
if !pFound {
*ps = append(*ps, pIn)
}
}
// Implement methods needed by sort.Interface
// Len returns the number of points
func (ps Points) Len() int {
return len([]Point(ps))
}
// Less is required by sort.Interface
func (ps Points) Less(i, j int) bool {
return ps[i].Time.Before(ps[j].Time)
}
// Swap is required by sort.Interface
func (ps Points) Swap(i, j int) {
ps[i], ps[j] = ps[j], ps[i]
}
//PbToPoint converts pb point to point
func PbToPoint(sPb *pb.Point) (Point, error) {
ts, err := ptypes.Timestamp(sPb.Time)
if err != nil {
return Point{}, err
}
ret := Point{
Type: sPb.Type,
Text: sPb.Text,
Key: sPb.Key,
Index: float64(sPb.Index),
Value: float64(sPb.Value),
Time: ts,
Tombstone: int(sPb.Tombstone),
}
return ret, nil
}
// PbDecodePoints decode protobuf encoded points
func PbDecodePoints(data []byte) (Points, error) {
pbPoints := &pb.Points{}
err := proto.Unmarshal(data, pbPoints)
if err != nil {
return []Point{}, err
}
ret := make([]Point, len(pbPoints.Points))
for i, sPb := range pbPoints.Points {
s, err := PbToPoint(sPb)
if err != nil {
return []Point{}, err
}
ret[i] = s
}
return ret, nil
}
// PointFilter is used to send points upstream. It only sends
// the data has changed, and at a max frequency
type PointFilter struct {
minSend time.Duration
periodicSend time.Duration
points []Point
lastSent time.Time
lastPeriodicSend time.Time
}
// NewPointFilter is used to creat a new point filter
// If points have changed that get sent out at a minSend interval
// frequency of minSend.
// All points are periodically sent at lastPeriodicSend interval.
// Set minSend to 0 for things like config settings where you want them
// to be sent whenever anything changes.
func NewPointFilter(minSend, periodicSend time.Duration) *PointFilter {
return &PointFilter{
minSend: minSend,
periodicSend: periodicSend,
}
}
// returns true if point has changed, and merges point with saved points
func (sf *PointFilter) add(point Point) bool {
for i, p := range sf.points {
if point.Key == p.Key &&
point.Type == p.Type &&
point.Index == p.Index {
if point.Value == p.Value {
return false
}
sf.points[i].Value = point.Value
return true
}
}
// point not found, add to array
sf.points = append(sf.points, point)
return true
}
// Add adds points and returns points that meet the filter criteria
func (sf *PointFilter) Add(points []Point) []Point {
if time.Since(sf.lastPeriodicSend) > sf.periodicSend {
// send all points
for _, s := range points {
sf.add(s)
}
sf.lastPeriodicSend = time.Now()
sf.lastSent = sf.lastPeriodicSend
return sf.points
}
if sf.minSend != 0 && time.Since(sf.lastSent) < sf.minSend {
// don't return anything as
return []Point{}
}
// now check if anything has changed and just send what has changed
// only
var ret []Point
for _, s := range points {
if sf.add(s) {
ret = append(ret, s)
}
}
if len(ret) > 0 {
sf.lastSent = time.Now()
}
return ret
}
// FloatToBool converts a float to bool
func FloatToBool(v float64) bool {
if v == 0 {
return false
}
return true
}
// BoolToFloat converts bool to float
func BoolToFloat(v bool) float64 {
if !v {
return 0
}
return 1
} | data/point.go | 0.809351 | 0.453927 | point.go | starcoder |
package v1api
import (
"os"
"path"
"github.com/RumbleDiscovery/mustache/v2"
)
// ParseString compiles a mustache template string. The resulting output can
// be used to efficiently render the template multiple times with different data
// sources.
func ParseString(data string) (*mustache.Template, error) {
return ParseStringRaw(data, false)
}
// ParseStringRaw compiles a mustache template string. The resulting output can
// be used to efficiently render the template multiple times with different data
// sources.
func ParseStringRaw(data string, forceRaw bool) (*mustache.Template, error) {
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
partials := &mustache.FileProvider{
Paths: []string{cwd},
}
return ParseStringPartialsRaw(data, partials, forceRaw)
}
// ParseStringPartials compiles a mustache template string, retrieving any
// required partials from the given provider. The resulting output can be used
// to efficiently render the template multiple times with different data
// sources.
func ParseStringPartials(data string, partials mustache.PartialProvider) (*mustache.Template, error) {
return ParseStringPartialsRaw(data, partials, false)
}
// ParseStringPartialsRaw compiles a mustache template string, retrieving any
// required partials from the given provider. The resulting output can be used
// to efficiently render the template multiple times with different data
// sources.
func ParseStringPartialsRaw(data string, partials mustache.PartialProvider, forceRaw bool) (*mustache.Template, error) {
escapeMode := mustache.EscapeHTML
if forceRaw {
escapeMode = mustache.Raw
}
return mustache.New().WithPartials(partials).WithEscapeMode(escapeMode).CompileString(data)
}
// ParseFile loads a mustache template string from a file and compiles it. The
// resulting output can be used to efficiently render the template multiple
// times with different data sources.
func ParseFile(filename string) (*mustache.Template, error) {
dirname, _ := path.Split(filename)
partials := &mustache.FileProvider{
Paths: []string{dirname},
}
return ParseFilePartials(filename, partials)
}
// ParseFilePartials loads a mustache template string from a file, retrieving any
// required partials from the given provider, and compiles it. The resulting
// output can be used to efficiently render the template multiple times with
// different data sources.
func ParseFilePartials(filename string, partials mustache.PartialProvider) (*mustache.Template, error) {
return ParseFilePartialsRaw(filename, false, partials)
}
// ParseFilePartialsRaw loads a mustache template string from a file, retrieving
// any required partials from the given provider, and compiles it. The resulting
// output can be used to efficiently render the template multiple times with
// different data sources.
func ParseFilePartialsRaw(filename string, forceRaw bool, partials mustache.PartialProvider) (*mustache.Template, error) {
escapeMode := mustache.EscapeHTML
if forceRaw {
escapeMode = mustache.Raw
}
return mustache.New().WithPartials(partials).WithEscapeMode(escapeMode).CompileFile(filename)
}
// Render compiles a mustache template string and uses the the given data source
// - generally a map or struct - to render the template and return the output.
func Render(data string, context ...interface{}) (string, error) {
return RenderRaw(data, false, context...)
}
// RenderRaw compiles a mustache template string and uses the the given data
// source - generally a map or struct - to render the template and return the
// output.
func RenderRaw(data string, forceRaw bool, context ...interface{}) (string, error) {
return RenderPartialsRaw(data, nil, forceRaw, context...)
}
// RenderPartials compiles a mustache template string and uses the the given partial
// provider and data source - generally a map or struct - to render the template
// and return the output.
func RenderPartials(data string, partials mustache.PartialProvider, context ...interface{}) (string, error) {
return RenderPartialsRaw(data, partials, false, context...)
}
// RenderPartialsRaw compiles a mustache template string and uses the the given
// partial provider and data source - generally a map or struct - to render the
// template and return the output.
func RenderPartialsRaw(data string, partials mustache.PartialProvider, forceRaw bool, context ...interface{}) (string, error) {
tmpl := mustache.New()
if forceRaw {
tmpl = tmpl.WithEscapeMode(mustache.Raw)
}
if partials != nil {
tmpl = tmpl.WithPartials(partials)
}
renderer, err := tmpl.CompileString(data)
if err != nil {
return "", err
}
return renderer.Render(context...)
}
// RenderInLayout compiles a mustache template string and layout "wrapper" and
// uses the given data source - generally a map or struct - to render the
// compiled templates and return the output.
func RenderInLayout(data string, layoutData string, context ...interface{}) (string, error) {
return RenderInLayoutPartials(data, layoutData, nil, context...)
}
// RenderInLayoutPartials compiles a mustache template string and layout
// "wrapper" and uses the given data source - generally a map or struct - to
// render the compiled templates and return the output.
func RenderInLayoutPartials(data string, layoutData string, partials mustache.PartialProvider, context ...interface{}) (string, error) {
layoutCmpl := mustache.New()
if partials != nil {
layoutCmpl.WithPartials(partials)
}
layoutTmpl, err := layoutCmpl.CompileString(layoutData)
if err != nil {
return "", err
}
cmpl := mustache.New()
if partials != nil {
cmpl.WithPartials(partials)
}
tmpl, err := cmpl.CompileString(data)
if err != nil {
return "", err
}
return tmpl.RenderInLayout(layoutTmpl, context...)
}
// RenderFile loads a mustache template string from a file and compiles it, and
// then uses the the given data source - generally a map or struct - to render
// the template and return the output.
func RenderFile(filename string, context ...interface{}) (string, error) {
tmpl, err := mustache.New().CompileFile(filename)
if err != nil {
return "", err
}
return tmpl.Render(context...)
}
// RenderFileInLayout loads a mustache template string and layout "wrapper"
// template string from files and compiles them, and then uses the the given
// data source - generally a map or struct - to render the compiled templates
// and return the output.
func RenderFileInLayout(filename string, layoutFile string, context ...interface{}) (string, error) {
layoutTmpl, err := mustache.New().CompileFile(layoutFile)
if err != nil {
return "", err
}
tmpl, err := mustache.New().CompileFile(filename)
if err != nil {
return "", err
}
return tmpl.RenderInLayout(layoutTmpl, context...)
} | v1api/v1api.go | 0.718002 | 0.420302 | v1api.go | starcoder |
package ir
import (
"github.com/umaumax/llvm/ir/value"
)
// --- [ Binary instructions ] -------------------------------------------------
// ~~~ [ add ] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// NewAdd appends a new add instruction to the basic block based on the given
// operands.
func (block *Block) NewAdd(x, y value.Value) *InstAdd {
inst := NewAdd(x, y)
block.Insts = append(block.Insts, inst)
return inst
}
// ~~~ [ fadd ] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// NewFAdd appends a new fadd instruction to the basic block based on the given
// operands.
func (block *Block) NewFAdd(x, y value.Value) *InstFAdd {
inst := NewFAdd(x, y)
block.Insts = append(block.Insts, inst)
return inst
}
// ~~~ [ sub ] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// NewSub appends a new sub instruction to the basic block based on the given
// operands.
func (block *Block) NewSub(x, y value.Value) *InstSub {
inst := NewSub(x, y)
block.Insts = append(block.Insts, inst)
return inst
}
// ~~~ [ fsub ] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// NewFSub appends a new fsub instruction to the basic block based on the given
// operands.
func (block *Block) NewFSub(x, y value.Value) *InstFSub {
inst := NewFSub(x, y)
block.Insts = append(block.Insts, inst)
return inst
}
// ~~~ [ mul ] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// NewMul appends a new mul instruction to the basic block based on the given
// operands.
func (block *Block) NewMul(x, y value.Value) *InstMul {
inst := NewMul(x, y)
block.Insts = append(block.Insts, inst)
return inst
}
// ~~~ [ fmul ] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// NewFMul appends a new fmul instruction to the basic block based on the given
// operands.
func (block *Block) NewFMul(x, y value.Value) *InstFMul {
inst := NewFMul(x, y)
block.Insts = append(block.Insts, inst)
return inst
}
// ~~~ [ udiv ] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// NewUDiv appends a new udiv instruction to the basic block based on the given
// operands.
func (block *Block) NewUDiv(x, y value.Value) *InstUDiv {
inst := NewUDiv(x, y)
block.Insts = append(block.Insts, inst)
return inst
}
// ~~~ [ sdiv ] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// NewSDiv appends a new sdiv instruction to the basic block based on the given
// operands.
func (block *Block) NewSDiv(x, y value.Value) *InstSDiv {
inst := NewSDiv(x, y)
block.Insts = append(block.Insts, inst)
return inst
}
// ~~~ [ fdiv ] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// NewFDiv appends a new fdiv instruction to the basic block based on the given
// operands.
func (block *Block) NewFDiv(x, y value.Value) *InstFDiv {
inst := NewFDiv(x, y)
block.Insts = append(block.Insts, inst)
return inst
}
// ~~~ [ urem ] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// NewURem appends a new urem instruction to the basic block based on the given
// operands.
func (block *Block) NewURem(x, y value.Value) *InstURem {
inst := NewURem(x, y)
block.Insts = append(block.Insts, inst)
return inst
}
// ~~~ [ srem ] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// NewSRem appends a new srem instruction to the basic block based on the given
// operands.
func (block *Block) NewSRem(x, y value.Value) *InstSRem {
inst := NewSRem(x, y)
block.Insts = append(block.Insts, inst)
return inst
}
// ~~~ [ frem ] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// NewFRem appends a new frem instruction to the basic block based on the given
// operands.
func (block *Block) NewFRem(x, y value.Value) *InstFRem {
inst := NewFRem(x, y)
block.Insts = append(block.Insts, inst)
return inst
} | ir/block_binary.go | 0.621426 | 0.409693 | block_binary.go | starcoder |
package google
import "github.com/hashicorp/terraform/helper/schema"
func canonicalizeServiceScope(scope string) string {
// This is a convenience map of short names used by the gcloud tool
// to the GCE auth endpoints they alias to.
scopeMap := map[string]string{
"bigquery": "https://www.googleapis.com/auth/bigquery",
"cloud-platform": "https://www.googleapis.com/auth/cloud-platform",
"cloud-source-repos": "https://www.googleapis.com/auth/source.full_control",
"cloud-source-repos-ro": "https://www.googleapis.com/auth/source.read_only",
"compute-ro": "https://www.googleapis.com/auth/compute.readonly",
"compute-rw": "https://www.googleapis.com/auth/compute",
"datastore": "https://www.googleapis.com/auth/datastore",
"logging-write": "https://www.googleapis.com/auth/logging.write",
"monitoring": "https://www.googleapis.com/auth/monitoring",
"monitoring-write": "https://www.googleapis.com/auth/monitoring.write",
"pubsub": "https://www.googleapis.com/auth/pubsub",
"service-control": "https://www.googleapis.com/auth/servicecontrol",
"service-management": "https://www.googleapis.com/auth/service.management.readonly",
"sql": "https://www.googleapis.com/auth/sqlservice",
"sql-admin": "https://www.googleapis.com/auth/sqlservice.admin",
"storage-full": "https://www.googleapis.com/auth/devstorage.full_control",
"storage-ro": "https://www.googleapis.com/auth/devstorage.read_only",
"storage-rw": "https://www.googleapis.com/auth/devstorage.read_write",
"taskqueue": "https://www.googleapis.com/auth/taskqueue",
"trace-append": "https://www.googleapis.com/auth/trace.append",
"trace-ro": "https://www.googleapis.com/auth/trace.readonly",
"useraccounts-ro": "https://www.googleapis.com/auth/cloud.useraccounts.readonly",
"useraccounts-rw": "https://www.googleapis.com/auth/cloud.useraccounts",
"userinfo-email": "https://www.googleapis.com/auth/userinfo.email",
}
if matchedURL, ok := scopeMap[scope]; ok {
return matchedURL
}
return scope
}
func canonicalizeServiceScopes(scopes []string) []string {
cs := make([]string, len(scopes))
for i, scope := range scopes {
cs[i] = canonicalizeServiceScope(scope)
}
return cs
}
func stringScopeHashcode(v interface{}) int {
v = canonicalizeServiceScope(v.(string))
return schema.HashString(v)
} | vendor/github.com/terraform-providers/terraform-provider-google/google/service_scope.go | 0.544801 | 0.401336 | service_scope.go | starcoder |
package iso20022
// Extract of trade data for an investment fund order.
type FundOrderData1 struct {
// Account information of the individual order instruction for which the status is given.
InvestmentAccountDetails *InvestmentAccount13 `xml:"InvstmtAcctDtls,omitempty"`
// Financial instrument information of the individual order instruction for which the status is given.
FinancialInstrumentDetails *FinancialInstrument10 `xml:"FinInstrmDtls,omitempty"`
// Quantity of investment fund units subscribed or redeemed.
UnitsNumber *FinancialInstrumentQuantity1 `xml:"UnitsNb,omitempty"`
// Amount of money used to derive the quantity of investment fund units to be sold or subscribed to.
NetAmount *ActiveOrHistoricCurrencyAndAmount `xml:"NetAmt,omitempty"`
// Amount of money used to derive the quantity of investment fund units to be sold or subscribed to, including all charges, commissions, and tax.
GrossAmount *ActiveOrHistoricCurrencyAndAmount `xml:"GrssAmt,omitempty"`
// Portion of the investor's holdings, in a specific investment fund/ fund class, that is redeemed.
HoldingsRedemptionRate *PercentageRate `xml:"HldgsRedRate,omitempty"`
// Total amount of money paid /to be paid or received in exchange for the financial instrument in the individual order.
SettlementAmount *ActiveCurrencyAndAmount `xml:"SttlmAmt,omitempty"`
// Currency in which the rate of exchange is expressed in a currency exchange. In the example 1GBP = xxxCUR, the unit currency is GBP.
UnitCurrency *ActiveOrHistoricCurrencyCode `xml:"UnitCcy,omitempty"`
// Currency into which the base currency is converted, in a currency exchange.
QuotedCurrency *ActiveOrHistoricCurrencyCode `xml:"QtdCcy,omitempty"`
}
func (f *FundOrderData1) AddInvestmentAccountDetails() *InvestmentAccount13 {
f.InvestmentAccountDetails = new(InvestmentAccount13)
return f.InvestmentAccountDetails
}
func (f *FundOrderData1) AddFinancialInstrumentDetails() *FinancialInstrument10 {
f.FinancialInstrumentDetails = new(FinancialInstrument10)
return f.FinancialInstrumentDetails
}
func (f *FundOrderData1) AddUnitsNumber() *FinancialInstrumentQuantity1 {
f.UnitsNumber = new(FinancialInstrumentQuantity1)
return f.UnitsNumber
}
func (f *FundOrderData1) SetNetAmount(value, currency string) {
f.NetAmount = NewActiveOrHistoricCurrencyAndAmount(value, currency)
}
func (f *FundOrderData1) SetGrossAmount(value, currency string) {
f.GrossAmount = NewActiveOrHistoricCurrencyAndAmount(value, currency)
}
func (f *FundOrderData1) SetHoldingsRedemptionRate(value string) {
f.HoldingsRedemptionRate = (*PercentageRate)(&value)
}
func (f *FundOrderData1) SetSettlementAmount(value, currency string) {
f.SettlementAmount = NewActiveCurrencyAndAmount(value, currency)
}
func (f *FundOrderData1) SetUnitCurrency(value string) {
f.UnitCurrency = (*ActiveOrHistoricCurrencyCode)(&value)
}
func (f *FundOrderData1) SetQuotedCurrency(value string) {
f.QuotedCurrency = (*ActiveOrHistoricCurrencyCode)(&value)
} | data/train/go/d7d67dc99e746faba70bacf12ec765abe4c12e4dFundOrderData1.go | 0.838779 | 0.568895 | d7d67dc99e746faba70bacf12ec765abe4c12e4dFundOrderData1.go | starcoder |
package config
import "github.com/pkg/errors"
// WorkflowDefinition is the consumer friendly data structure that hosts the loaded workflow definition
type WorkflowDefinition struct {
Flowit Flowit
}
// Flowit is the consumer friendly data structure that hosts the loaded workflow definition main body
type Flowit struct {
Version string
Config Config
Variables Variables
StateMachines []StateMachine
Workflows []Workflow
}
// Config is the consumer friendly data structure that hosts the loaded workflow definition configuration
type Config struct {
CheckpointExecution bool
Shell string
}
// Variables is the consumer friendly data structure that hosts the loaded workflow definition variables
type Variables map[string]interface{}
// StateMachine is the consumer friendly data structure that hosts
// the loaded workflow definition state machine
type StateMachine struct {
ID string
Stages []string
InitialStage string
FinalStages []string
Transitions []StateMachineTransition
}
// StateMachineTransition is the consumer friendly data structure that hosts
// the loaded workflow definition state machine transition
type StateMachineTransition struct {
From []string
To []string
}
// Stages is the consumer friendly data structure that hosts
// the loaded workflow definition tag stages
type Stages map[string][]string
// Workflow is the consumer friendly data structure that hosts
// the loaded workflow definition workflow
type Workflow struct {
ID string
StateMachine string
Stages []Stage
}
// Stage is the consumer friendly data structure that hosts
// the loaded workflow definition workflow stage
type Stage struct {
ID string
Args []string
Conditions []string
Actions []string
}
// Transition is the consumer friendly data structure that hosts
// the loaded workflow definition branch transition
type Transition struct {
From string
To []string
}
// StateMachine receives a state machine ID and returns the correspoding
// state machine
func (wd WorkflowDefinition) StateMachine(stateMachineID string) (StateMachine, error) {
for _, stateMachine := range wd.Flowit.StateMachines {
if stateMachine.ID == stateMachineID {
return stateMachine, nil
}
}
return StateMachine{}, errors.New("Invalid state machine ID: " + stateMachineID)
}
// Workflow returns the workflow associated to the specified workflowID
func (wd WorkflowDefinition) Workflow(workflowID string) (Workflow, error) {
for _, workflow := range wd.Flowit.Workflows {
if workflow.ID == workflowID {
return workflow, nil
}
}
return Workflow{}, errors.New("Invalid workflow ID: " + workflowID)
}
// Stages returns the loaded workflow definition stages for the specified workflowID
func (wd WorkflowDefinition) Stages(workflowID string) ([]Stage, error) {
for _, workflow := range wd.Flowit.Workflows {
if workflow.ID == workflowID {
return workflow.Stages, nil
}
}
return nil, errors.New("Invalid workflow ID: " + workflowID)
}
// Stage returns the loaded workflow definition stage for the specified workflowID and stage
func (wd WorkflowDefinition) Stage(workflowID, stageID string) (Stage, error) {
for _, workflow := range wd.Flowit.Workflows {
if workflow.ID == workflowID {
for _, stage := range workflow.Stages {
if stage.ID == stageID {
return stage, nil
}
}
return Stage{}, errors.New("Invalid stage ID: " + stageID)
}
}
return Stage{}, errors.New("Invalid workflow ID: " + workflowID)
} | internal/config/model.go | 0.707304 | 0.465448 | model.go | starcoder |
package testdata
// GetPaymentResponse example
const GetPaymentResponse = `{
"resource": "payment",
"id": "tr_WDqYK6vllg",
"mode": "test",
"createdAt": "2018-03-20T13:13:37+00:00",
"amount": {
"value": "10.00",
"currency": "EUR"
},
"description": "Order #12345",
"method": null,
"metadata": {
"order_id": "12345"
},
"status": "open",
"isCancelable": false,
"locale": "nl_NL",
"restrictPaymentMethodsToCountry": "NL",
"expiresAt": "2018-03-20T13:28:37+00:00",
"details": null,
"profileId": "pfl_QkEhN94Ba",
"sequenceType": "oneoff",
"redirectUrl": "https://webshop.example.org/order/12345/",
"webhookUrl": "https://webshop.example.org/payments/webhook/",
"_links": {
"self": {
"href": "https://api.mollie.com/v2/payments/tr_WDqYK6vllg",
"type": "application/hal+json"
},
"checkout": {
"href": "https://www.mollie.com/payscreen/select-method/WDqYK6vllg",
"type": "text/html"
},
"dashboard": {
"href": "https://www.mollie.com/dashboard/org_12345678/payments/tr_WDqYK6vllg",
"type": "application/json"
},
"documentation": {
"href": "https://docs.mollie.com/reference/v2/payments-api/get-payment",
"type": "text/html"
}
}
}`
// CancelPaymentResponse example
const CancelPaymentResponse = `{
"resource": "payment",
"id": "tr_WDqYK6vllg",
"mode": "live",
"createdAt": "2018-03-19T10:18:33+00:00",
"amount": {
"value": "35.07",
"currency": "EUR"
},
"description": "Order 33",
"method": "banktransfer",
"metadata": null,
"status": "canceled",
"canceledAt": "2018-03-19T10:19:15+00:00",
"details": {
"bankName": "Stichting Mollie Payments",
"bankAccount": "NL53ABNA0627535577",
"bankBic": "ABNANL2A",
"transferReference": "RF12-3456-7890-1234"
},
"profileId": "pfl_QkEhN94Ba",
"sequenceType": "oneoff",
"redirectUrl": "https://webshop.example.org/order/33/",
"_links": {
"self": {
"href": "https://api.mollie.com/v2/payments/tr_WDqYK6vllg",
"type": "application/hal+json"
},
"documentation": {
"href": "https://docs.mollie.com/reference/v2/payments-api/cancel-payment",
"type": "text/html"
}
}
}`
// UpdatePaymentResponse example
const UpdatePaymentResponse = `{
"resource": "payment",
"id": "tr_7UhSN1zuXS",
"mode": "test",
"createdAt": "2018-03-20T09:13:37+00:00",
"amount": {
"value": "10.00",
"currency": "EUR"
},
"description": "Order #98765",
"method": null,
"metadata": {
"order_id": "98765"
},
"status": "open",
"isCancelable": false,
"expiresAt": "2018-03-20T09:28:37+00:00",
"details": null,
"profileId": "pfl_QkEhN94Ba",
"sequenceType": "oneoff",
"redirectUrl": "https://example.org/webshop/order/98765/",
"webhookUrl": "https://example.org/webshop/payments/webhook/",
"_links": {
"self": {
"href": "https://api.mollie.com/v2/payments/tr_7UhSN1zuXS",
"type": "application/json"
},
"checkout": {
"href": "https://www.mollie.com/payscreen/select-method/7UhSN1zuXS",
"type": "text/html"
},
"documentation": {
"href": "https://docs.mollie.com/reference/v2/payments-api/update-payment",
"type": "text/html"
}
}
}
`
// ListPaymentsResponse example
const ListPaymentsResponse = `{
"count": 5,
"_embedded": {
"payments": [
{
"resource": "payment",
"id": "tr_7UhSN1zuXS",
"mode": "test",
"createdAt": "2018-02-12T11:58:35.0Z",
"expiresAt": "2018-02-12T12:13:35.0Z",
"status": "open",
"isCancelable": false,
"amount": {
"value": "75.00",
"currency": "GBP"
},
"description": "Order #12345",
"method": "ideal",
"metadata": null,
"details": null,
"profileId": "pfl_QkEhN94Ba",
"redirectUrl": "https://webshop.example.org/order/12345/",
"_links": {
"checkout": {
"href": "https://www.mollie.com/paymentscreen/issuer/select/ideal/7UhSN1zuXS",
"type": "text/html"
},
"self": {
"href": "https://api.mollie.com/v2/payments/tr_7UhSN1zuXS",
"type": "application/hal+json"
}
}
}
]
},
"_links": {
"self": {
"href": "https://api.mollie.com/v2/payments?limit=5",
"type": "application/hal+json"
},
"previous": null,
"next": {
"href": "https://api.mollie.com/v2/payments?from=tr_SDkzMggpvx&limit=5",
"type": "application/hal+json"
},
"documentation": {
"href": "https://docs.mollie.com/reference/v2/payments-api/list-payments",
"type": "text/html"
}
}
}` | testdata/payments.go | 0.748995 | 0.451508 | payments.go | starcoder |
package bigfloat
import (
"math/big"
)
var zero = big.NewFloat(0)
const (
cmpLt int = -1
cmpEq int = 0
cmpGt int = 1
cmpNil int = -42
)
// Add returns the result of adding the values of params x and y.
// Notes:
// - If x == nil && y == nil, returns nil.
// - If x != nil && y == nil, returns x.
// - If x == nil && y != nil, returns y.
func Add(x, y *big.Float) *big.Float {
switch {
case x == nil && y == nil:
return nil
case x == nil && y != nil:
return y
case x != nil && y == nil:
return x
}
return newBigFloat().Add(x, y)
}
// Sub returns the result of subtracting the value of param y from the value of param x.
// Notes:
// - If x == nil, returns nil.
// - If x != nil && y == nil, returns x.
func Sub(x, y *big.Float) *big.Float {
switch {
case x == nil:
return nil
case x != nil && y == nil:
return x
}
return newBigFloat().Sub(x, y)
}
// Mul returns the result of multiplying the values of params x and y.
// Notes:
// - If x == nil, returns nil.
// - If x != nil && y == nil, returns x.
func Mul(x, y *big.Float) *big.Float {
switch {
case x == nil:
return nil
case x != nil && y == nil:
return x
}
return newBigFloat().Mul(x, y)
}
// Div returns the result of dividing the value of x by the value of y.
// Notes:
// - If x == nil, returns nil.
// - If x != nil && y == nil, returns x.
func Div(x, y *big.Float) *big.Float {
switch {
case x == nil:
return nil
case x != nil && y == nil:
return x
}
return newBigFloat().Quo(x, y)
}
// Cmp wraps the Int.Cmp() function, returning the same values as the function it wraps:
// - -1 if x < y
// - 0 if x == y
// - 1 if x > y
// Note: returns -2 if either x or y == nil.
func Cmp(x, y *big.Float) int {
if x == nil || y == nil {
return cmpNil
}
return x.Cmp(y)
}
// Eq returns true if x == y.
// Returns false if either x or y == nil.
func Eq(x, y *big.Float) bool {
return Cmp(x, y) == cmpEq
}
// Lt returns true if x < y.
// Returns false if either x or y == nil.
func Lt(x, y *big.Float) bool {
return Cmp(x, y) == cmpLt
}
// Lte returns true if x <= y.
// Returns false if either x or y == nil.
func Lte(x, y *big.Float) bool {
return Lt(x, y) || Eq(x, y)
}
// Gt returns true if x > y.
// Returns false if either x or y == nil.
func Gt(x, y *big.Float) bool {
return Cmp(x, y) == cmpGt
}
// Gte returns true if x >= y.
// Returns false if either x or y == nil.
func Gte(x, y *big.Float) bool {
return Gt(x, y) || Eq(x, y)
}
// IsZero returns true if n == 0.
// Returns nil if n == nil.
func IsZero(n *big.Float) bool {
return Eq(zero, n)
}
// FromUint64 returns a new *big.Int with its value set to n.
func FromUint64(n uint64) *big.Float {
return newBigFloat().SetUint64(n)
}
// FromBigInt returns a new *big.Float with is value set to the value of n.
func FromBigInt(n *big.Int) *big.Float {
return newBigFloat().SetInt(n)
}
// FromFloat32 returns a new *big.Float with is value set to f.
func FromFloat32(f float32) *big.Float {
return FromFloat64(float64(f))
}
// FromFloat64 returns a new *big.Float with is value set to f.
func FromFloat64(f float64) *big.Float {
return big.NewFloat(f)
}
func newBigFloat() *big.Float {
return new(big.Float)
} | bigfloat/bigfloat.go | 0.868381 | 0.741814 | bigfloat.go | starcoder |
package main
import "fmt"
// SimulateCovidSpread is a function to generate Boards for each Day and store the them in a slice of the type Board
func SimulateCovidSpread(initialBoard *Board, statePeriods Periods, lambda, gamma []float64, numDays int) []*Board {
boards := make([]*Board, numDays+1) // create blank slice of Board
boards[0] = initialBoard // store initial Board as the first element of the slice of Board
for i := 1; i <= numDays; i++ {
boards[i] = UpdateBoard(boards[i-1], statePeriods, lambda, gamma) // generate new board based on the previous board and rules of disease spread
}
return boards
}
// UpdateBoard generates a new board based on the disease transmission rules
func UpdateBoard(currentBoard *Board, statePeriods Periods, lambda, gamma []float64) *Board {
newBoard := CopyBoard(currentBoard)
for i, cell := range newBoard.cells {
cell.UpdateCell(currentBoard, statePeriods, i, lambda, gamma)
}
return newBoard
}
// UpdateCell updates a cell(region)
func (cell *Cell) UpdateCell(currentBoard *Board, statePeriods Periods, i int, lambda, gamma []float64) {
cell.UpdateCellS(currentBoard, statePeriods, i, lambda)
cell.UpdateCellI(currentBoard, statePeriods, i, lambda, gamma)
cell.UpdateCellR(currentBoard, statePeriods, i, gamma)
}
// UpdateCellS updates the % susceptibles in a cell(region)
func (cell *Cell) UpdateCellS(currentBoard *Board, statePeriods Periods, i int, lambda []float64) {
term1 := currentBoard.cells[i].classProportions[0]
T := statePeriods.Ti + statePeriods.Tp + statePeriods.Tl + statePeriods.Tr
term2 := currentBoard.cells[i].classProportions[T]
cell.classProportions[0] = term1 + term2 - InternalTerm(currentBoard, statePeriods, i, lambda) - ExternalTerm(currentBoard, statePeriods, i, lambda)
}
// UpdateCellI updates the % infected persons in a cell(region)
func (cell *Cell) UpdateCellI(currentBoard *Board, statePeriods Periods, i int, lambda, gamma []float64) {
cell.classProportions[1] = InternalTerm(currentBoard, statePeriods, i, lambda) + ExternalTerm(currentBoard, statePeriods, i, lambda)
T := statePeriods.Ti + statePeriods.Tp + statePeriods.Ti
for q := 2; q <= T; q++ {
cell.classProportions[q] = (1 - gamma[q-1]) * currentBoard.cells[i].classProportions[q-1]
}
}
// UpdateCellR updates the % recovered persons in a cell(region)
func (cell *Cell) UpdateCellR(currentBoard *Board, statePeriods Periods, i int, gamma []float64) {
T := statePeriods.Ti + statePeriods.Tp + statePeriods.Ti
cell.classProportions[T+1] = currentBoard.cells[i].classProportions[T] + InfectiveCuredTerm(currentBoard, statePeriods, i, gamma)
for q := T + 2; q <= T+statePeriods.Tr; q++ {
cell.classProportions[q] = currentBoard.cells[i].classProportions[q-1]
}
}
//InternalTerm calculates term in updation of % Susceptible that relates to infection from within the region
func InternalTerm(currentBoard *Board, statePeriods Periods, i int, lambda []float64) float64 {
var v float64
T := statePeriods.Ti + statePeriods.Tp + statePeriods.Ti
roi := currentBoard.cells[i].classProportions[0] * float64(currentBoard.cells[i].population) / (currentBoard.cells[i].area * 1000.0)
for q := 1; q <= T; q++ {
v = v + lambda[q]*roi*currentBoard.cells[i].classProportions[0]*currentBoard.cells[i].classProportions[q]
}
return v
}
//ExternalTerm calculates term in updation of proportion of Susceptible infected due others regions
func ExternalTerm(currentBoard *Board, statePeriods Periods, i int, lambda []float64) float64 {
var v float64
T := statePeriods.Ti + statePeriods.Tp + statePeriods.Ti
roi := currentBoard.cells[i].classProportions[0] * float64(currentBoard.cells[i].population) / (currentBoard.cells[i].area * 1000.0)
Ci := currentBoard.cells[i].reciprocity
for j := 0; j < len(currentBoard.cells); j++ {
for q := 1; q <= T; q++ {
v = v + Ci[j]*lambda[q]*roi*currentBoard.cells[i].classProportions[0]*currentBoard.cells[j].classProportions[q]
}
}
return v
}
//InfectiveCuredTerm calculates the term in updation of R that relates to cured persons
func InfectiveCuredTerm(currentBoard *Board, statePeriods Periods, i int, gamma []float64) float64 {
T := statePeriods.Ti + statePeriods.Tp + statePeriods.Ti
var v float64
for q := 1; q <= T-1; q++ {
v = gamma[q] * currentBoard.cells[i].classProportions[q]
}
return v
}
// CopyBoard copies the current board
func CopyBoard(currentBoard *Board) *Board {
var newBoard Board
newBoard.cells = make([]*Cell, len(currentBoard.cells))
for i := range newBoard.cells {
newBoard.cells[i] = CopyCell(currentBoard.cells[i])
}
return &newBoard
}
// CopyCell copies the current cell
func CopyCell(currentCell *Cell) *Cell {
var newCell Cell
newCell.classProportions = make([]float64, len(currentCell.classProportions))
newCell.reciprocity = make([]float64, len(currentCell.reciprocity))
copy(newCell.classProportions, currentCell.classProportions)
newCell.population = currentCell.population
newCell.area = currentCell.area
copy(newCell.reciprocity, currentCell.reciprocity)
return &newCell
}
//PrintBoards prints S, I and R of all regions(Cells) on the board for all days- one board for each day
// q is total states till the end of infection period- one state for each day. q= Ti+Tp+Tl
func PrintBoards(boards []*Board, q int) {
for i := range boards {
PrintBoard(boards[i], q)
fmt.Printf("%v the day\n", i)
}
}
//PrintBoard prints cells on a Board
func PrintBoard(board *Board, q int) {
for _, cell := range board.cells {
fmt.Printf("%.2f ", cell.classProportions[0])
fmt.Printf("%.2f ", SumSlice(cell.classProportions[1:q+1]))
fmt.Printf("%.2f ", SumSlice(cell.classProportions[q+1:]))
fmt.Printf("\n")
}
}
//ClassSum retuns the percentage of total persons in different classProportions namely S, I and R for each region/cell
//It takes cell and q=Ti+Tp+Tl, time/days when the infection phase ends
func ClassSum(cell *Cell, q int) (float64, float64, float64) {
var s, i, r float64
s = cell.classProportions[0]
i = SumSlice(cell.classProportions[1 : q+1])
r = SumSlice(cell.classProportions[q+1:])
return s, i, r
}
//SumSlice determines the sum of elements in a slice
func SumSlice(a []float64) float64 {
var v float64
for i := range a {
v = v + a[i]
}
return v
} | covid1/functions.go | 0.684159 | 0.601564 | functions.go | starcoder |
package sctp
import (
"github.com/pkg/errors"
)
/*
chunkHeartbeat represents an SCTP Chunk of type HEARTBEAT
An endpoint should send this chunk to its peer endpoint to probe the
reachability of a particular destination transport address defined in
the present association.
The parameter field contains the Heartbeat Information, which is a
variable-length opaque data structure understood only by the sender.
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type = 4 | Chunk Flags | Heartbeat Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| Heartbeat Information TLV (Variable-Length) |
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Defined as a variable-length parameter using the format described
in Section 3.2.1, i.e.:
Variable Parameters Status Type Value
-------------------------------------------------------------
heartbeat Info Mandatory 1
*/
type chunkHeartbeat struct {
chunkHeader
params []param
}
func (h *chunkHeartbeat) unmarshal(raw []byte) error {
if err := h.chunkHeader.unmarshal(raw); err != nil {
return err
} else if h.typ != ctHeartbeat {
return errors.Errorf("ChunkType is not of type HEARTBEAT, actually is %s", h.typ.String())
}
if len(raw) <= chunkHeaderSize {
return errors.Errorf("Heartbeat is not long enough to contain Heartbeat Info %d", len(raw))
}
pType, err := parseParamType(raw[chunkHeaderSize:])
if err != nil {
return errors.Wrap(err, "failed to parse param type")
}
if pType != heartbeatInfo {
return errors.Errorf("Heartbeat should only have HEARTBEAT param, instead have %s", pType.String())
}
p, err := buildParam(pType, raw[chunkHeaderSize:])
if err != nil {
return errors.Wrap(err, "Failed unmarshalling param in Heartbeat Chunk")
}
h.params = append(h.params, p)
return nil
}
func (h *chunkHeartbeat) Marshal() ([]byte, error) {
return nil, errors.Errorf("Unimplemented")
}
func (h *chunkHeartbeat) check() (abort bool, err error) {
return false, nil
} | trunk/3rdparty/srs-bench/vendor/github.com/pion/sctp/chunk_heartbeat.go | 0.564819 | 0.40031 | chunk_heartbeat.go | starcoder |
package vision
import (
"image"
"math"
"sync"
)
// Grad computes the grad and returns its magnitude and angle.
func Grad(gray *image.Gray) (mag, ang *image.Gray) {
dx := [][]float64{{1, 0, -1}, {2, 0, -2}, {1, 0, -1}}
dy := [][]float64{{1, 2, 1}, {0, 0, 0}, {-1, -2, -1}}
mb, nb := gray.Bounds().Dy(), gray.Bounds().Dx()
//Extend image signal at borders
signal := func(x, y int) float64 {
if x >= 0 && x <= nb-1 && y >= 0 && y <= mb-1 {
return float64(gray.Pix[y*nb+x]) //Inside
} else if x < 0 && y >= 0 && y <= mb-1 {
return float64(gray.Pix[y*nb]) //Left
} else if y < 0 && x >= 0 && x <= nb-1 {
return float64(gray.Pix[x]) //Top
} else if x > nb-1 && y >= 0 && y <= mb-1 {
return float64(gray.Pix[y*nb+(nb-1)]) //Right
} else if y > mb-1 && x >= 0 && x <= nb-1 {
return float64(gray.Pix[(mb-1)*nb+x]) //Bottom
} else if x < 0 && y > mb-1 {
return float64(gray.Pix[(mb-1)*nb]) //Bottom left corner
} else if x < 0 && y < 0 {
return float64(gray.Pix[0]) //Top left corner
} else if x > nb-1 && y < 0 {
return float64(gray.Pix[nb-1]) //Top right corner
} else {
return float64(gray.Pix[(mb-1)*nb+(nb-1)]) //Bottom right corner
}
}
sobel := func(x, y int) (sobelX float64, sobelY float64) {
m, n := -y+1, -x+1
if n < 0 || m < 0 {
return 1, 1
}
if n >= 3 || m >= 3 {
return 1, 1
}
return dx[m][n], dy[m][n]
}
conv := func(x, y int) (convX float64, convY float64) {
y0, y1 := y-1, y+1
x0, x1 := x-1, x+1
convSumX := 0.
convSumY := 0.
for j := y0; j <= y1; j++ {
for k := x0; k <= x1; k++ {
h1, h2 := sobel(x-k, y-j)
s := signal(k, j)
convSumX += s * h1
convSumY += s * h2
}
}
return convSumX, convSumY
}
mag = image.NewGray(gray.Bounds())
ang = image.NewGray(gray.Bounds())
wg := sync.WaitGroup{}
for y := 0; y < mb; y++ {
//Proccess lines concurrently
wg.Add(1)
go func(y int, mag, ang *image.Gray, wg *sync.WaitGroup) {
for x := 0; x < nb; x++ {
convX, convY := conv(x, y)
mag.Pix[y*nb+x] = uint8(rescale(math.Hypot(convX, convY), 0, 1530, 0, 255))
ang.Pix[y*nb+x] = uint8(rescale(math.Atan2(convY, convX), -math.Pi, math.Pi, 0, 255))
}
wg.Done()
}(y, mag, ang, &wg)
}
wg.Wait()
return mag, ang
} | grad.go | 0.703549 | 0.713706 | grad.go | starcoder |
package chassis
import (
"fmt"
"sync"
)
// Attitude represents chassis attitude information.
type Attitude struct {
m sync.RWMutex
pitch float64
roll float64
yaw float64
}
// NewAttitude returns a new Attitude instance with the given pitch, roll and
// yaw values (in degrees).
func NewAttitude(pitch, roll, yaw float64) *Attitude {
return &Attitude{
sync.RWMutex{},
pitch,
roll,
yaw,
}
}
// NewAttitudeFromData returns a new Attitude instance based on the given data
// (which usually comes from push events) and a nil error on success and a
// non-nil error on failure.
func NewAttitudeFromData(data string) (*Attitude, error) {
a := &Attitude{
sync.RWMutex{},
0.0,
0.0,
0.0,
}
err := a.UpdateFromData(data)
if err != nil {
return nil, err
}
return a, nil
}
// Update updates the Attitude instance with the given pitch, roll and yaw angle
// information (in degrees).
func (a *Attitude) Update(pitch, roll, yaw float64) {
a.m.Lock()
a.pitch = pitch
a.roll = roll
a.yaw = yaw
a.m.Unlock()
}
// UpdateFromData updates the Attitude based on the given data (which usually
// comes from push events). Returns a nil error on success and a non-nil error
// on failure.
func (a *Attitude) UpdateFromData(data string) error {
var pitch, roll, yaw float64
n, err := fmt.Sscanf(data, "%f %f %f", &pitch, &roll, &yaw)
if err != nil {
fmt.Errorf("error parsing data: %w", err)
}
if n != 3 {
fmt.Errorf(
"unexpected number of entries in data: %w", err)
}
a.m.Lock()
a.pitch = pitch
a.roll = roll
a.yaw = yaw
a.m.Unlock()
return nil
}
// Pitch returns the attitude instance pitch information (in degrees).
func (a *Attitude) Pitch() float64 {
a.m.RLock()
defer a.m.RUnlock()
return a.pitch
}
// Roll returns the attitude instance roll information (in degrees).
func (a *Attitude) Roll() float64 {
a.m.RLock()
defer a.m.RUnlock()
return a.roll
}
// Yaw returns the attitude instance yaw information (in degrees).
func (a *Attitude) Yaw() float64 {
a.m.RLock()
defer a.m.RUnlock()
return a.yaw
} | sdk/modules/chassis/attitude.go | 0.766206 | 0.538194 | attitude.go | starcoder |
package p381
import (
"math/rand"
)
/**
Design a data structure that supports all following operations in average O(1) time.
Note: Duplicate elements are allowed.
insert(val): Inserts an item val to the collection.
remove(val): Removes an item val from the collection if present.
getRandom: Returns a random element from current collection of elements. The probability of each element being returned is linearly related to the number of same value the collection contains.
*/
type valLoc struct {
val int
loc int
}
type locsLen struct {
locs []int
size int
}
type RandomizedCollection struct {
vec []*valLoc
locs map[int]*locsLen
}
/** Initialize your data structure here. */
func Constructor() RandomizedCollection {
return RandomizedCollection{
vec: make([]*valLoc, 0),
locs: make(map[int]*locsLen),
}
}
/** Inserts a value to the collection. Returns true if the collection did not already contain the specified element. */
func (this *RandomizedCollection) Insert(val int) bool {
v, ok := this.locs[val]
if !ok {
this.locs[val] = &locsLen{locs: make([]int, 0), size: 0}
v = this.locs[val]
}
tail := len(this.vec)
v.locs = append(v.locs, tail)
this.vec = append(this.vec, &valLoc{val: val, loc: v.size})
v.size++
return !ok
}
/** Removes a value from the collection. Returns true if the collection contained the specified element. */
func (this *RandomizedCollection) Remove(val int) bool {
v, ok := this.locs[val]
if ok {
v.size--
vpos := v.locs[v.size]
v.locs = v.locs[:v.size]
if v.size == 0 {
delete(this.locs, val)
}
if vpos != len(this.vec)-1 {
tail := this.vec[len(this.vec)-1]
this.vec[vpos] = tail
this.locs[tail.val].locs[tail.loc] = vpos
}
this.vec = this.vec[:len(this.vec)-1]
}
return ok
}
/** Get a random element from the collection. */
func (this *RandomizedCollection) GetRandom() int {
return this.vec[rand.Intn(len(this.vec))].val
}
/**
* Your RandomizedCollection object will be instantiated and called as such:
* obj := Constructor();
* param_1 := obj.Insert(val);
* param_2 := obj.Remove(val);
* param_3 := obj.GetRandom();
*/ | algorithms/p381/381.go | 0.74158 | 0.659652 | 381.go | starcoder |
package scheme
import (
"fmt"
)
var (
builtinSyntaxes = Binding{
"actor": NewSyntax(actorSyntax),
"and": NewSyntax(andSyntax),
"begin": NewSyntax(beginSyntax),
"cond": NewSyntax(condSyntax),
"define": NewSyntax(defineSyntax),
"define-macro": NewSyntax(defineMacroSyntax),
"do": NewSyntax(doSyntax),
"if": NewSyntax(ifSyntax),
"lambda": NewSyntax(lambdaSyntax),
"let": NewSyntax(letSyntax),
"let*": NewSyntax(letStarSyntax),
"letrec": NewSyntax(letrecSyntax),
"or": NewSyntax(orSyntax),
"quote": NewSyntax(quoteSyntax),
"set!": NewSyntax(setSyntax),
}
)
type Syntax struct {
ObjectBase
function func(*Syntax, Object) Object
}
func NewSyntax(function func(*Syntax, Object) Object) *Syntax {
return &Syntax{ObjectBase: ObjectBase{parent: nil}, function: function}
}
func (s *Syntax) Invoke(arguments Object) Object {
return s.function(s, arguments)
}
func (s *Syntax) String() string {
return fmt.Sprintf("#<syntax %s>", s.Bounder())
}
func (s *Syntax) isSyntax() bool {
return true
}
func (s *Syntax) malformedError() {
syntaxError("malformed %s: %s", s.Bounder(), s.Bounder().Parent())
}
func (s *Syntax) assertListEqual(arguments Object, length int) {
if !arguments.isList() || arguments.(*Pair).ListLength() != length {
s.malformedError()
}
}
func (s *Syntax) assertListMinimum(arguments Object, minimum int) {
if !arguments.isList() || arguments.(*Pair).ListLength() < minimum {
s.malformedError()
}
}
func (s *Syntax) assertListRange(arguments Object, lengthRange []int) {
if !arguments.isList() {
s.malformedError()
}
for _, length := range lengthRange {
if length == arguments.(*Pair).ListLength() {
return
}
}
s.malformedError()
}
// Returns elements in list object with type assertion (syntax form specific error message)
// Assertion is minimum
func (s *Syntax) elementsMinimum(list Object, minimum int) []Object {
if list.isApplication() {
list = list.(*Application).toList()
}
s.assertListMinimum(list, minimum)
return list.(*Pair).Elements()
}
// Returns elements in list object with type assertion (syntax form specific error message)
// Assertion is equal
func (s *Syntax) elementsExact(list Object, value int) []Object {
if list.isApplication() {
list = list.(*Application).toList()
}
s.assertListEqual(list, value)
return list.(*Pair).Elements()
}
// Eval all given objects and returns last object's eval result.
// When 'objects' is empty, returns #<undef>.
func evalAll(objects []Object) Object {
lastResult := undef
for _, object := range objects {
lastResult = object.Eval()
}
return lastResult
}
func actorSyntax(s *Syntax, arguments Object) Object {
elements := s.elementsMinimum(arguments, 0)
// Insert over the application to override scope
application := arguments.Parent()
actor := NewActor(application.Parent())
application.setParent(actor)
for _, element := range elements {
caseElements := s.elementsMinimum(element, 1)
caseArguments := s.elementsMinimum(caseElements[0], 1)
assertObjectType(caseArguments[0], "string")
actor.functions[caseArguments[0].(*String).text] = func(objects []Object) {
if len(caseArguments[1:]) != len(objects) {
runtimeError("invalid message argument length: requires %d, but got %d", len(caseArguments[1:]), len(objects))
}
for index, variable := range caseArguments[1:] {
actor.tryDefine(variable, objects[index].Eval())
}
evalAll(caseElements[1:])
}
}
return actor
}
func andSyntax(s *Syntax, arguments Object) Object {
s.assertListMinimum(arguments, 0)
lastResult := Object(NewBoolean(true))
for _, object := range arguments.(*Pair).Elements() {
lastResult = object.Eval()
if lastResult.isBoolean() && lastResult.(*Boolean).value == false {
return NewBoolean(false)
}
}
return lastResult
}
func beginSyntax(s *Syntax, arguments Object) Object {
elements := s.elementsMinimum(arguments, 0)
return evalAll(elements)
}
func condSyntax(s *Syntax, arguments Object) Object {
elements := s.elementsMinimum(arguments, 0)
if len(elements) == 0 {
syntaxError("at least one clause is required for cond")
}
// First: syntax check
elseExists := false
for _, element := range elements {
if elseExists {
syntaxError("'else' clause followed by more clauses")
} else if element.isApplication() && element.(*Application).procedure.isVariable() &&
element.(*Application).procedure.(*Variable).identifier == "else" {
elseExists = true
}
if element.isNull() || !element.isApplication() {
syntaxError("bad clause in cond")
}
}
// Second: eval cases
for _, element := range elements {
lastResult := undef
application := element.(*Application)
isElse := application.procedure.isVariable() && application.procedure.(*Variable).identifier == "else"
if !isElse {
lastResult = application.procedure.Eval()
}
// first element is 'else' or not '#f'
if isElse || !lastResult.isBoolean() || lastResult.(*Boolean).value == true {
for _, object := range application.arguments.(*Pair).Elements() {
lastResult = object.Eval()
}
return lastResult
}
}
return undef
}
func defineSyntax(s *Syntax, arguments Object) Object {
elements := s.elementsExact(arguments, 2)
if elements[0].isVariable() {
variable := elements[0].(*Variable)
s.Bounder().define(variable.identifier, elements[1].Eval())
return NewSymbol(variable.identifier)
} else if elements[0].isApplication() {
closure := WrapClosure(arguments)
defineElements := s.elementsMinimum(elements[0], 1)
funcName := defineElements[0]
closure.DefineFunction(s, defineElements[1:], elements[1:])
if funcName.isVariable() {
s.Bounder().define(funcName.(*Variable).identifier, closure)
return funcName
}
}
return syntaxError("%s", s.Bounder().Parent())
}
func defineMacroSyntax(s *Syntax, arguments Object) Object {
elements := s.elementsMinimum(arguments, 2)
macroElements := s.elementsMinimum(elements[0], 1)
assertObjectType(macroElements[0], "variable")
macro := NewMacro()
s.Bounder().define(macroElements[0].(*Variable).identifier, macro)
return undef
}
func doSyntax(s *Syntax, arguments Object) Object {
closure := WrapClosure(arguments.Parent())
// Parse iterator list and define first variable
elements := s.elementsMinimum(arguments, 2)
iteratorBodies := s.elementsMinimum(elements[0], 0)
for _, iteratorBody := range iteratorBodies {
iteratorElements := s.elementsMinimum(iteratorBody, 2)
if len(iteratorElements) > 3 {
compileError("bad update expr in %s: %s", s.Bounder(), s.Bounder().Parent())
}
closure.tryDefine(iteratorElements[0], iteratorElements[1].Eval())
}
// eval test ->
// true: eval testBody and returns its result
// false: eval continueBody, eval iterator's update
testElements := s.elementsMinimum(elements[1], 1)
for {
testResult := testElements[0].Eval()
if !testResult.isBoolean() || testResult.(*Boolean).value == true {
for _, element := range testElements[1:] {
testResult = element.Eval()
}
return testResult
} else {
// eval continueBody
evalAll(elements[2:])
// update iterators
for _, iteratorBody := range iteratorBodies {
iteratorElements := s.elementsMinimum(iteratorBody, 2)
if len(iteratorElements) == 3 {
closure.tryDefine(iteratorElements[0], iteratorElements[2].Eval())
}
}
}
}
return undef
}
func ifSyntax(s *Syntax, arguments Object) Object {
s.assertListRange(arguments, []int{2, 3})
elements := arguments.(*Pair).Elements()
result := elements[0].Eval()
if result.isBoolean() && !result.(*Boolean).value {
if len(elements) == 3 {
return elements[2].Eval()
} else {
return undef
}
} else {
return elements[1].Eval()
}
}
func lambdaSyntax(s *Syntax, arguments Object) Object {
closure := WrapClosure(arguments.Parent())
elements := s.elementsMinimum(arguments, 1)
variables := s.elementsMinimum(elements[0], 0)
closure.DefineFunction(s, variables, elements[1:])
return closure
}
func letSyntax(s *Syntax, arguments Object) Object {
closure := WrapClosure(arguments.Parent())
elements := s.elementsMinimum(arguments, 1)
// define arguments to local scope
variables := []Object{}
results := []Object{}
for _, argumentElement := range s.elementsMinimum(elements[0], 0) {
variableElements := s.elementsExact(argumentElement, 2)
variableElements[1].setParent(closure.Parent())
variables = append(variables, variableElements[0])
results = append(results, variableElements[1].Eval())
}
for index, variable := range variables {
closure.tryDefine(variable, results[index])
}
// eval body
return evalAll(elements[1:])
}
func letStarSyntax(s *Syntax, arguments Object) Object {
closure := WrapClosure(arguments.Parent())
elements := s.elementsMinimum(arguments, 1)
// define arguments to local scope
for _, argumentElement := range s.elementsMinimum(elements[0], 0) {
variableElements := s.elementsExact(argumentElement, 2)
variable, result := variableElements[0], variableElements[1].Eval()
closure.tryDefine(variable, result)
result.setParent(closure.Parent())
}
// eval body
return evalAll(elements[1:])
}
func letrecSyntax(s *Syntax, arguments Object) Object {
closure := WrapClosure(arguments.Parent())
elements := s.elementsMinimum(arguments, 1)
// define arguments to local scope
variables := []Object{}
results := []Object{}
for _, argumentElement := range s.elementsMinimum(elements[0], 0) {
variableElements := s.elementsExact(argumentElement, 2)
variables = append(variables, variableElements[0])
results = append(results, variableElements[1].Eval())
}
for index, variable := range variables {
closure.tryDefine(variable, results[index])
}
// eval body
return evalAll(elements[1:])
}
func orSyntax(s *Syntax, arguments Object) Object {
s.assertListMinimum(arguments, 0)
lastResult := Object(NewBoolean(false))
for _, object := range arguments.(*Pair).Elements() {
lastResult = object.Eval()
if !lastResult.isBoolean() || lastResult.(*Boolean).value != false {
return lastResult
}
}
return lastResult
}
func quoteSyntax(s *Syntax, arguments Object) Object {
s.assertListEqual(arguments, 1)
object := arguments.(*Pair).ElementAt(0)
p := NewParser(object.String())
p.Peek()
return p.parseQuotedObject(s.Bounder())
}
func setSyntax(s *Syntax, arguments Object) Object {
elements := s.elementsExact(arguments, 2)
variable := elements[0]
if !variable.isVariable() {
s.malformedError()
}
value := elements[1].Eval()
s.Bounder().set(variable.(*Variable).identifier, value)
return value
} | scheme/syntax.go | 0.574634 | 0.433082 | syntax.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.