code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package main
import (
"image"
"image/color"
"github.com/disintegration/imaging"
)
type lut [256]uint8
type rgbLut struct {
r lut
g lut
b lut
}
type histogram [256]uint32
type rgbHistogram struct {
r histogram
g histogram
b histogram
}
func generateRgbHistogramFromImage(input image.Image) rgbHistogram {
var rgbHistogram rgbHistogram
for y := input.Bounds().Min.Y; y < input.Bounds().Max.Y; y++ {
for x := input.Bounds().Min.X; x < input.Bounds().Max.X; x++ {
r, g, b, _ := input.At(x, y).RGBA()
r = r >> 8
g = g >> 8
b = b >> 8
rgbHistogram.r[r]++
rgbHistogram.g[g]++
rgbHistogram.b[b]++
}
}
return rgbHistogram
}
func convertToCumulativeRgbHistogram(input rgbHistogram) rgbHistogram {
var targetRgbHistogram rgbHistogram
targetRgbHistogram.r[0] = input.r[0]
targetRgbHistogram.g[0] = input.g[0]
targetRgbHistogram.b[0] = input.b[0]
for i := 1; i < 256; i++ {
targetRgbHistogram.r[i] = targetRgbHistogram.r[i-1] + input.r[i]
targetRgbHistogram.g[i] = targetRgbHistogram.g[i-1] + input.g[i]
targetRgbHistogram.b[i] = targetRgbHistogram.b[i-1] + input.b[i]
}
return targetRgbHistogram
}
func generateRgbLutFromRgbHistograms(current rgbHistogram, target rgbHistogram) rgbLut {
currentCumulativeRgbHistogram := convertToCumulativeRgbHistogram(current)
targetCumulativeRgbHistogram := convertToCumulativeRgbHistogram(target)
var ratio [3]float64
ratio[0] = float64(currentCumulativeRgbHistogram.r[255]) / float64(targetCumulativeRgbHistogram.r[255])
ratio[1] = float64(currentCumulativeRgbHistogram.g[255]) / float64(targetCumulativeRgbHistogram.g[255])
ratio[2] = float64(currentCumulativeRgbHistogram.b[255]) / float64(targetCumulativeRgbHistogram.b[255])
for i := 0; i < 256; i++ {
targetCumulativeRgbHistogram.r[i] = uint32(0.5 + float64(targetCumulativeRgbHistogram.r[i])*ratio[0])
targetCumulativeRgbHistogram.g[i] = uint32(0.5 + float64(targetCumulativeRgbHistogram.g[i])*ratio[1])
targetCumulativeRgbHistogram.b[i] = uint32(0.5 + float64(targetCumulativeRgbHistogram.b[i])*ratio[2])
}
//Generate LUT
var lut rgbLut
var p [3]uint8
for i := 0; i < 256; i++ {
for targetCumulativeRgbHistogram.r[p[0]] < currentCumulativeRgbHistogram.r[i] {
p[0]++
}
for targetCumulativeRgbHistogram.g[p[1]] < currentCumulativeRgbHistogram.g[i] {
p[1]++
}
for targetCumulativeRgbHistogram.b[p[2]] < currentCumulativeRgbHistogram.b[i] {
p[2]++
}
lut.r[i] = p[0]
lut.g[i] = p[1]
lut.b[i] = p[2]
}
return lut
}
func applyRgbLutToImage(input image.Image, lut rgbLut) image.Image {
result := imaging.AdjustFunc(input, func(c color.NRGBA) color.NRGBA {
c.R = uint8(lut.r[c.R])
c.G = uint8(lut.g[c.G])
c.B = uint8(lut.b[c.B])
return c
})
return result
} | histogram.go | 0.594198 | 0.598342 | histogram.go | starcoder |
package runtime
import (
"fmt"
"strconv"
"strings"
)
type BinaryTree struct {
}
func NewBinaryTree() BinaryTree {
return BinaryTree{}
}
func (bt *BinaryTree) unserialize1(data string) *TreeNode {
values := strings.Split(data, ",")
var build func() *TreeNode
build = func() *TreeNode {
str := values[0]
values = values[1:]
if str == "null" {
return nil
}
val, _ := strconv.Atoi(str)
root := &TreeNode{Val: val}
root.Left = build()
root.Right = build()
return root
}
return build()
}
func (bt *BinaryTree) serialize1(root *TreeNode) string {
res := ""
var dfs func(root *TreeNode)
dfs = func(root *TreeNode) {
if root == nil {
res += "null,"
return
}
res += fmt.Sprintf("%d,", root.Val)
dfs(root.Left)
dfs(root.Right)
}
dfs(root)
return res[:len(res)-1]
}
func (bt *BinaryTree) unserialize2(data string) *TreeNode {
if data == "null" {
return nil
}
values := strings.Split(data, ",")
rootVal, _ := strconv.Atoi(values[0])
root := &TreeNode{Val: rootVal}
queue := []*TreeNode{root}
n := len(values)
i := 1
for i < n {
node := queue[0]
queue = queue[1:]
LeftVal := values[i]
if LeftVal != "null" {
val, _ := strconv.Atoi(LeftVal)
node.Left = &TreeNode{Val: val}
queue = append(queue, node.Left)
}
i++
rightVal := values[i]
if rightVal != "null" {
val, _ := strconv.Atoi(rightVal)
node.Right = &TreeNode{Val: val}
queue = append(queue, node.Right)
}
i++
}
return root
}
func (bt *BinaryTree) serialize2(root *TreeNode) string {
res := make([]string, 0)
queue := []*TreeNode{root}
for len(queue) > 0 {
node := queue[0]
queue = queue[1:]
if node != nil {
res = append(res, strconv.Itoa(node.Val))
queue = append(queue, node.Left)
queue = append(queue, node.Right)
} else {
res = append(res, "null")
}
}
return strings.Join(res, ",")
}
func (bt *BinaryTree) buildFromPreorderAndInorder1(preorder []int, inorder []int) *TreeNode {
if len(preorder) == 0 || len(inorder) == 0 {
return nil
}
indexMap := make(map[int]int, 0)
for i, v := range inorder {
indexMap[v] = i
}
val := preorder[0]
mid := indexMap[val]
root := &TreeNode{Val: val}
root.Left = bt.buildFromPreorderAndInorder1(preorder[1:mid+1], inorder[:mid])
root.Right = bt.buildFromPreorderAndInorder1(preorder[mid+1:], inorder[mid+1:])
return root
}
func (bt *BinaryTree) buildFromPreorderAndInorder2(preorder []int, inorder []int) *TreeNode {
if len(preorder) == 0 || len(inorder) == 0 {
return nil
}
root := &TreeNode{Val: preorder[0]}
stack := []*TreeNode{root}
inorderIndex := 0
for i := 1; i < len(preorder); i++ {
preorderVal := preorder[i]
node := stack[len(stack)-1]
if node.Val != inorder[inorderIndex] {
node.Left = &TreeNode{Val: preorderVal}
stack = append(stack, node.Left)
} else {
for len(stack) != 0 && stack[len(stack)-1].Val == inorder[inorderIndex] {
node = stack[len(stack)-1]
stack = stack[:len(stack)-1]
inorderIndex++
}
node.Right = &TreeNode{Val: preorderVal}
stack = append(stack, node.Right)
}
}
return root
}
func (bt *BinaryTree) buildFromInorderAndPostorder1(inorder []int, postorder []int) *TreeNode {
if len(inorder) == 0 || len(postorder) == 0 {
return nil
}
indexMap := make(map[int]int, 0)
for i, v := range inorder {
indexMap[v] = i
}
val := postorder[len(postorder)-1]
mid := indexMap[val]
root := &TreeNode{Val: val}
root.Left = bt.buildFromInorderAndPostorder1(inorder[:mid], postorder[:mid])
root.Right = bt.buildFromInorderAndPostorder1(inorder[mid+1:], postorder[mid:len(postorder)-1])
return root
}
func (bt *BinaryTree) buildFromInorderAndPostorder2(inorder []int, postorder []int) *TreeNode {
return nil
}
func (bt *BinaryTree) buildFromPreorderAndPostorder1(preorder []int, postorder []int) *TreeNode {
if len(preorder) == 0 || len(postorder) == 0 {
return nil
}
val := preorder[0]
root := &TreeNode{Val: val}
if len(postorder) == 1 {
return root
}
mid := 0
for i := 0; i < len(postorder); i++ {
if postorder[i] == preorder[1] {
mid = i + 1
break
}
}
root.Left = bt.buildFromPreorderAndPostorder1(preorder[1:mid+1], postorder[:mid])
root.Right = bt.buildFromPreorderAndPostorder1(preorder[mid+1:], postorder[mid:len(postorder)-1])
return root
}
func (bt *BinaryTree) buildFromPreorderAndPostorder2(preorder []int, postorder []int) *TreeNode {
return nil
}
/*
前序遍历顺序为:根 -> 左 -> 右
*/
func (bt *BinaryTree) displayPreorder1(root *TreeNode) []int {
res := make([]int, 0)
var dfs func(root *TreeNode)
dfs = func(root *TreeNode) {
if root == nil {
return
}
res = append(res, root.Val)
dfs(root.Left)
dfs(root.Right)
}
dfs(root)
return res
}
/*
前序遍历顺序为:根 -> 左 -> 右
解题思路:
1、从当前点开始,向左遍历,直到最左叶子节点,遍历过程中,取出当前节点值,并缓存当前节点到临时栈
2、如果临时栈不为空,出栈一个节点,将该节点的右叶子节点赋值给当前节点
3、当节点不为空或临时栈不为空时,循环1、2步
*/
func (bt *BinaryTree) displayPreorder2(root *TreeNode) []int {
res := make([]int, 0)
stack := make([]*TreeNode, 0)
for root != nil || len(stack) > 0 {
for root != nil {
res = append(res, root.Val)
stack = append(stack, root)
root = root.Left
}
if len(stack) > 0 {
root = stack[len(stack)-1]
stack = stack[:len(stack)-1]
root = root.Right
}
}
return res
}
/*
中序遍历顺序为:左 -> 根 -> 右
*/
func (bt *BinaryTree) displayInorder1(root *TreeNode) []int {
res := make([]int, 0)
var dfs func(root *TreeNode)
dfs = func(root *TreeNode) {
if root == nil {
return
}
dfs(root.Left)
res = append(res, root.Val)
dfs(root.Right)
}
dfs(root)
return res
}
/*
中序遍历顺序为:左 -> 根 -> 右
解题思路:
1、从当前点开始,向左遍历,直到最左叶子节点,遍历过程中,缓存当前节点到临时栈
2、如果临时栈不为空,出栈一个节点,取出节点值,将该节点的右叶子节点赋值给当前节点
3、当节点不为空或临时栈不为空时,循环1、2步
*/
func (bt *BinaryTree) displayInorder2(root *TreeNode) []int {
res := make([]int, 0)
stack := make([]*TreeNode, 0)
for root != nil || len(stack) > 0 {
for root != nil {
stack = append(stack, root)
root = root.Left
}
if len(stack) > 0 {
root = stack[len(stack)-1]
stack = stack[:len(stack)-1]
res = append(res, root.Val)
root = root.Right
}
}
return res
}
/*
后序遍历顺序为:左 -> 右 -> 根
*/
func (bt *BinaryTree) displayPostorder1(root *TreeNode) []int {
res := make([]int, 0)
var dfs func(root *TreeNode)
dfs = func(root *TreeNode) {
if root == nil {
return
}
dfs(root.Left)
dfs(root.Right)
res = append(res, root.Val)
}
dfs(root)
return res
}
/*
后序遍历顺序为:左 -> 右 -> 根
解题思路:
1、从当前节点开始,向右遍历,直到最右叶子节点,遍历过程中,取出当前节点值,缓存当前节点到临时栈
2、如果当前节点为空,出栈一个节点,将节点的左叶子节点赋值给当前节点
3、当节点不为空或临时栈不为空时,循环1、2步
*/
func (bt *BinaryTree) displayPostorder2(root *TreeNode) []int {
res := make([]int, 0)
stack := make([]*TreeNode, 0)
for root != nil || len(stack) > 0 {
if root != nil {
res = append([]int{root.Val}, res...)
stack = append(stack, root)
root = root.Right
} else {
root = stack[len(stack)-1]
stack = stack[:len(stack)-1]
root = root.Left
}
}
return res
}
// DisplayLevelOrder1 /*
/*
层序遍历顺序为:每一层从左到右
*/
func (bt *BinaryTree) DisplayLevelOrder1(root *TreeNode) [][]int {
res := make([][]int, 0)
var dfs func(root *TreeNode, level int)
dfs = func(root *TreeNode, level int) {
if root == nil {
return
}
if len(res) == level {
res = append(res, []int{})
}
res[level] = append(res[level], root.Val)
if root.Left != nil {
dfs(root.Left, level+1)
}
if root.Right != nil {
dfs(root.Right, level+1)
}
}
dfs(root, 0)
return res
}
// DisplayLevelOrder2 /*
/* 层序遍历顺序为:每一层从左到右
解题思路:
1、初始化临时队列,将根节点入队
2、遍历临时队列,每次出队一个节点,获取当前节点值,并将当前节点左右叶子节点依次压入临时队列
3、当临时队列不为空时,循环1、2步
*/
func (bt *BinaryTree) DisplayLevelOrder2(root *TreeNode) [][]int {
res := make([][]int, 0)
queue := []*TreeNode{root}
for level := 0; len(queue) > 0; level++ {
length := len(queue)
res = append(res, []int{})
for i := 0; i < length; i++ {
node := queue[0]
queue = queue[1:]
res[level] = append(res[level], node.Val)
if node.Left != nil {
queue = append(queue, node.Left)
}
if node.Right != nil {
queue = append(queue, node.Right)
}
}
//fmt.Println(res[level])
}
return res
} | runtime/BinaryTree.go | 0.650578 | 0.421433 | BinaryTree.go | starcoder |
package influxql
import (
"github.com/gogo/protobuf/proto"
"github.com/influxdata/influxdb/influxql/internal"
)
// FloatPoint represents a point with a float64 value.
type FloatPoint struct {
Name string
Tags Tags
Time int64
Nil bool
Value float64
Aux []interface{}
}
func (v *FloatPoint) name() string { return v.Name }
func (v *FloatPoint) tags() Tags { return v.Tags }
func (v *FloatPoint) time() int64 { return v.Time }
func (v *FloatPoint) nil() bool { return v.Nil }
func (v *FloatPoint) value() interface{} {
if v.Nil {
return nil
}
return v.Value
}
func (v *FloatPoint) aux() []interface{} { return v.Aux }
// Clone returns a copy of v.
func (v *FloatPoint) Clone() *FloatPoint {
if v == nil {
return nil
}
other := *v
if v.Aux != nil {
other.Aux = make([]interface{}, len(v.Aux))
copy(other.Aux, v.Aux)
}
return &other
}
func encodeFloatPoint(p *FloatPoint) *internal.Point {
return &internal.Point{
Name: proto.String(p.Name),
Tags: proto.String(p.Tags.ID()),
Time: proto.Int64(p.Time),
Nil: proto.Bool(p.Nil),
Aux: encodeAux(p.Aux),
FloatValue: proto.Float64(p.Value),
}
}
func decodeFloatPoint(pb *internal.Point) *FloatPoint {
return &FloatPoint{
Name: pb.GetName(),
Tags: newTagsID(pb.GetTags()),
Time: pb.GetTime(),
Nil: pb.GetNil(),
Aux: decodeAux(pb.Aux),
Value: pb.GetFloatValue(),
}
}
// floatPoints represents a slice of points sortable by value.
type floatPoints []FloatPoint
func (a floatPoints) Len() int { return len(a) }
func (a floatPoints) Less(i, j int) bool { return a[i].Time < a[j].Time }
func (a floatPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// floatPointsByValue represents a slice of points sortable by value.
type floatPointsByValue []FloatPoint
func (a floatPointsByValue) Len() int { return len(a) }
func (a floatPointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value }
func (a floatPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// floatPointByFunc represents a slice of points sortable by a function.
type floatPointsByFunc struct {
points []FloatPoint
cmp func(a, b *FloatPoint) bool
}
func (a *floatPointsByFunc) Len() int { return len(a.points) }
func (a *floatPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) }
func (a *floatPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] }
func (a *floatPointsByFunc) Push(x interface{}) {
a.points = append(a.points, x.(FloatPoint))
}
func (a *floatPointsByFunc) Pop() interface{} {
p := a.points[len(a.points)-1]
a.points = a.points[:len(a.points)-1]
return p
}
func floatPointsSortBy(points []FloatPoint, cmp func(a, b *FloatPoint) bool) *floatPointsByFunc {
return &floatPointsByFunc{
points: points,
cmp: cmp,
}
}
// IntegerPoint represents a point with a int64 value.
type IntegerPoint struct {
Name string
Tags Tags
Time int64
Nil bool
Value int64
Aux []interface{}
}
func (v *IntegerPoint) name() string { return v.Name }
func (v *IntegerPoint) tags() Tags { return v.Tags }
func (v *IntegerPoint) time() int64 { return v.Time }
func (v *IntegerPoint) nil() bool { return v.Nil }
func (v *IntegerPoint) value() interface{} {
if v.Nil {
return nil
}
return v.Value
}
func (v *IntegerPoint) aux() []interface{} { return v.Aux }
// Clone returns a copy of v.
func (v *IntegerPoint) Clone() *IntegerPoint {
if v == nil {
return nil
}
other := *v
if v.Aux != nil {
other.Aux = make([]interface{}, len(v.Aux))
copy(other.Aux, v.Aux)
}
return &other
}
func encodeIntegerPoint(p *IntegerPoint) *internal.Point {
return &internal.Point{
Name: proto.String(p.Name),
Tags: proto.String(p.Tags.ID()),
Time: proto.Int64(p.Time),
Nil: proto.Bool(p.Nil),
Aux: encodeAux(p.Aux),
IntegerValue: proto.Int64(p.Value),
}
}
func decodeIntegerPoint(pb *internal.Point) *IntegerPoint {
return &IntegerPoint{
Name: pb.GetName(),
Tags: newTagsID(pb.GetTags()),
Time: pb.GetTime(),
Nil: pb.GetNil(),
Aux: decodeAux(pb.Aux),
Value: pb.GetIntegerValue(),
}
}
// integerPoints represents a slice of points sortable by value.
type integerPoints []IntegerPoint
func (a integerPoints) Len() int { return len(a) }
func (a integerPoints) Less(i, j int) bool { return a[i].Time < a[j].Time }
func (a integerPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// integerPointsByValue represents a slice of points sortable by value.
type integerPointsByValue []IntegerPoint
func (a integerPointsByValue) Len() int { return len(a) }
func (a integerPointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value }
func (a integerPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// integerPointByFunc represents a slice of points sortable by a function.
type integerPointsByFunc struct {
points []IntegerPoint
cmp func(a, b *IntegerPoint) bool
}
func (a *integerPointsByFunc) Len() int { return len(a.points) }
func (a *integerPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) }
func (a *integerPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] }
func (a *integerPointsByFunc) Push(x interface{}) {
a.points = append(a.points, x.(IntegerPoint))
}
func (a *integerPointsByFunc) Pop() interface{} {
p := a.points[len(a.points)-1]
a.points = a.points[:len(a.points)-1]
return p
}
func integerPointsSortBy(points []IntegerPoint, cmp func(a, b *IntegerPoint) bool) *integerPointsByFunc {
return &integerPointsByFunc{
points: points,
cmp: cmp,
}
}
// StringPoint represents a point with a string value.
type StringPoint struct {
Name string
Tags Tags
Time int64
Nil bool
Value string
Aux []interface{}
}
func (v *StringPoint) name() string { return v.Name }
func (v *StringPoint) tags() Tags { return v.Tags }
func (v *StringPoint) time() int64 { return v.Time }
func (v *StringPoint) nil() bool { return v.Nil }
func (v *StringPoint) value() interface{} {
if v.Nil {
return nil
}
return v.Value
}
func (v *StringPoint) aux() []interface{} { return v.Aux }
// Clone returns a copy of v.
func (v *StringPoint) Clone() *StringPoint {
if v == nil {
return nil
}
other := *v
if v.Aux != nil {
other.Aux = make([]interface{}, len(v.Aux))
copy(other.Aux, v.Aux)
}
return &other
}
func encodeStringPoint(p *StringPoint) *internal.Point {
return &internal.Point{
Name: proto.String(p.Name),
Tags: proto.String(p.Tags.ID()),
Time: proto.Int64(p.Time),
Nil: proto.Bool(p.Nil),
Aux: encodeAux(p.Aux),
StringValue: proto.String(p.Value),
}
}
func decodeStringPoint(pb *internal.Point) *StringPoint {
return &StringPoint{
Name: pb.GetName(),
Tags: newTagsID(pb.GetTags()),
Time: pb.GetTime(),
Nil: pb.GetNil(),
Aux: decodeAux(pb.Aux),
Value: pb.GetStringValue(),
}
}
// stringPoints represents a slice of points sortable by value.
type stringPoints []StringPoint
func (a stringPoints) Len() int { return len(a) }
func (a stringPoints) Less(i, j int) bool { return a[i].Time < a[j].Time }
func (a stringPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// stringPointsByValue represents a slice of points sortable by value.
type stringPointsByValue []StringPoint
func (a stringPointsByValue) Len() int { return len(a) }
func (a stringPointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value }
func (a stringPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// stringPointByFunc represents a slice of points sortable by a function.
type stringPointsByFunc struct {
points []StringPoint
cmp func(a, b *StringPoint) bool
}
func (a *stringPointsByFunc) Len() int { return len(a.points) }
func (a *stringPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) }
func (a *stringPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] }
func (a *stringPointsByFunc) Push(x interface{}) {
a.points = append(a.points, x.(StringPoint))
}
func (a *stringPointsByFunc) Pop() interface{} {
p := a.points[len(a.points)-1]
a.points = a.points[:len(a.points)-1]
return p
}
func stringPointsSortBy(points []StringPoint, cmp func(a, b *StringPoint) bool) *stringPointsByFunc {
return &stringPointsByFunc{
points: points,
cmp: cmp,
}
}
// BooleanPoint represents a point with a bool value.
type BooleanPoint struct {
Name string
Tags Tags
Time int64
Nil bool
Value bool
Aux []interface{}
}
func (v *BooleanPoint) name() string { return v.Name }
func (v *BooleanPoint) tags() Tags { return v.Tags }
func (v *BooleanPoint) time() int64 { return v.Time }
func (v *BooleanPoint) nil() bool { return v.Nil }
func (v *BooleanPoint) value() interface{} {
if v.Nil {
return nil
}
return v.Value
}
func (v *BooleanPoint) aux() []interface{} { return v.Aux }
// Clone returns a copy of v.
func (v *BooleanPoint) Clone() *BooleanPoint {
if v == nil {
return nil
}
other := *v
if v.Aux != nil {
other.Aux = make([]interface{}, len(v.Aux))
copy(other.Aux, v.Aux)
}
return &other
}
func encodeBooleanPoint(p *BooleanPoint) *internal.Point {
return &internal.Point{
Name: proto.String(p.Name),
Tags: proto.String(p.Tags.ID()),
Time: proto.Int64(p.Time),
Nil: proto.Bool(p.Nil),
Aux: encodeAux(p.Aux),
BooleanValue: proto.Bool(p.Value),
}
}
func decodeBooleanPoint(pb *internal.Point) *BooleanPoint {
return &BooleanPoint{
Name: pb.GetName(),
Tags: newTagsID(pb.GetTags()),
Time: pb.GetTime(),
Nil: pb.GetNil(),
Aux: decodeAux(pb.Aux),
Value: pb.GetBooleanValue(),
}
}
// booleanPoints represents a slice of points sortable by value.
type booleanPoints []BooleanPoint
func (a booleanPoints) Len() int { return len(a) }
func (a booleanPoints) Less(i, j int) bool { return a[i].Time < a[j].Time }
func (a booleanPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// booleanPointsByValue represents a slice of points sortable by value.
type booleanPointsByValue []BooleanPoint
func (a booleanPointsByValue) Len() int { return len(a) }
func (a booleanPointsByValue) Less(i, j int) bool { return !a[i].Value }
func (a booleanPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// booleanPointByFunc represents a slice of points sortable by a function.
type booleanPointsByFunc struct {
points []BooleanPoint
cmp func(a, b *BooleanPoint) bool
}
func (a *booleanPointsByFunc) Len() int { return len(a.points) }
func (a *booleanPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) }
func (a *booleanPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] }
func (a *booleanPointsByFunc) Push(x interface{}) {
a.points = append(a.points, x.(BooleanPoint))
}
func (a *booleanPointsByFunc) Pop() interface{} {
p := a.points[len(a.points)-1]
a.points = a.points[:len(a.points)-1]
return p
}
func booleanPointsSortBy(points []BooleanPoint, cmp func(a, b *BooleanPoint) bool) *booleanPointsByFunc {
return &booleanPointsByFunc{
points: points,
cmp: cmp,
}
} | vendor/github.com/influxdata/influxdb/influxql/point.gen.go | 0.849129 | 0.512693 | point.gen.go | starcoder |
package pathfinding
import (
"github.com/ThudPoland/Man-Pac/basic"
)
//BreadthFirstSearch is a struct for making pathfinding
type BreadthFirstSearch struct {
lastNode *Node
firstNode *Node
direction basic.Direction
reachable bool
}
//GetSearchResult creates result for Breadth First Search
func (searchData *BreadthFirstSearch) GetSearchResult(startX, startY, endX, endY int, source *Graph) {
searchData.reachable = false
node := source.nodes[startY][startX]
searchData.firstNode = node
nodesSlice := []*Node{node}
for len(nodesSlice) > 0 {
actualNode := nodesSlice[0]
nodesSlice = nodesSlice[1:]
neighbours := searchData.getNeighbours(actualNode.x, actualNode.y, source)
for element := range neighbours {
actualNeighbour := neighbours[element]
if actualNeighbour.state == Unvisited {
actualNeighbour.state = Visited
actualNeighbour.parent = actualNode
actualNeighbour.distance = actualNeighbour.parent.distance + 1
if actualNeighbour.x == endX && actualNeighbour.y == endY {
searchData.reachable = true
searchData.lastNode = actualNeighbour
searchData.getFirstNode(source, actualNeighbour)
return
}
nodesSlice = append(nodesSlice, actualNeighbour)
}
}
actualNode.state = Explored
}
}
func (searchData *BreadthFirstSearch) getNeighbours(startX, startY int, source *Graph) []*Node {
neighbours := []*Node{}
coordinates := []struct{ x, y int }{{startX - 1, startY}, {startX + 1, startY}, {startX, startY - 1}, {startX, startY + 1}}
for element := range coordinates {
if coordinates[element].x < 0 || coordinates[element].x >= source.width {
if coordinates[element].y < 0 || coordinates[element].y >= source.height {
continue
}
}
node := source.nodes[coordinates[element].y][coordinates[element].x]
if node.state == Unvisited {
neighbours = append(neighbours, node)
}
}
return neighbours
}
func (searchData *BreadthFirstSearch) getFirstNode(source *Graph, sourceNode *Node) {
actualNode := searchData.lastNode
if searchData.lastNode != nil && searchData.firstNode != nil && searchData.reachable == true {
for {
if actualNode == searchData.firstNode {
searchData.direction = basic.No
return
}
if actualNode.parent == searchData.firstNode {
deltaX := actualNode.x - searchData.firstNode.x
deltaY := actualNode.y - searchData.firstNode.y
if deltaY > 0 {
searchData.direction = basic.Up
} else if deltaY < 0 {
searchData.direction = basic.Down
} else if deltaX > 0 {
searchData.direction = basic.Right
} else if deltaX < 0 {
searchData.direction = basic.Left
} else {
searchData.direction = basic.No
}
return
}
actualNode = actualNode.parent
}
}
}
//GetDirection gets direction for next item
func (searchData *BreadthFirstSearch) GetDirection() basic.Direction {
if searchData.reachable == false {
return basic.No
}
return searchData.direction
} | pathfinding/breadthfirstsearch.go | 0.544559 | 0.468851 | breadthfirstsearch.go | starcoder |
package main
import (
"fmt"
)
func main() {
// Declaring two integer variables
num1 := 20
num2 := 10
// Checking whether both the numbers are equal or not
// We use EqualTo (==) operator
// Declaring the integer varable to store the result
// If both the numbers are equal, then the result will be 'true', otherwise it will be 'false'
result1 := num1 == num2
fmt.Println("Result 1 =", result1) // Printing the result
// Checking if both the numbers are not equal to each other
// We use NotEqualTo (!=) operator
// Declaring the integer varable to store the result
// If both the numbers are not equal, then the result will be 'true', otherwise it will be 'false'
result2 := num1 != num2
fmt.Println("Result 2 =", result2) // Printing the result
// Checking whether the first number is greater than the second number or not
// We use GreaterThan (>) operator
// Declaring the integer varable to store the result
// If first number is grater than second number, then the result will be 'true', otherwise it will be 'false'
result3 := num1 > num2
fmt.Println("Result 3 =", result3) // Printing the result
// Checking whether the first number is less than the second number or not
// We use LessThan (>) operator
// Declaring the integer varable to store the result
// If first number is less than second number, then the result will be 'true', otherwise it will be 'false'
result4 := num1 < num2
fmt.Println("Result 4 =", result4) // Printing the result
// Checking whether the first number is greater than or equal to the second number or not
// We use GreaterThanOrEqual (>=) operator
// Declaring the integer varable to store the result
// If first number is grater than or equal to the second number, then the result will be 'true', otherwise it will be 'false'
result5 := num1 >= num2
fmt.Println("Result 5 =", result5) // Printing the result
// Checking whether the first number is less than or equal to the second number or not
// We use LessThanOrEqual (<=) operator
// Declaring the integer varable to store the result
// If first number is less than or equal to the second number, then the result will be 'true', otherwise it will be 'false'
result6 := num1 <= num2
fmt.Println("Result 6 =", result6) // Printing the result
} | RelationalOperators.go | 0.500488 | 0.419886 | RelationalOperators.go | starcoder |
package expr
import (
"strings"
"bosun.org/cmd/bosun/expr/parse"
"bosun.org/models"
)
func elasticTagQuery(args []parse.Node) (parse.Tags, error) {
n := args[1].(*parse.StringNode)
t := make(parse.Tags)
for _, s := range strings.Split(n.Text, ",") {
t[s] = struct{}{}
}
return t, nil
}
// ElasticFuncs are specific functions that query an elasticsearch instance.
// They are only loaded when the elastic hosts are set in the config file
var Elastic = map[string]parse.Func{
// Funcs for querying elastic
"escount": {
Args: []models.FuncType{models.TypeESIndexer, models.TypeString, models.TypeESQuery, models.TypeString, models.TypeString, models.TypeString},
Return: models.TypeSeriesSet,
Tags: elasticTagQuery,
F: ESCount,
PrefixEnabled: true,
},
"esstat": {
Args: []models.FuncType{models.TypeESIndexer, models.TypeString, models.TypeESQuery, models.TypeString, models.TypeString, models.TypeString, models.TypeString, models.TypeString},
Return: models.TypeSeriesSet,
Tags: elasticTagQuery,
F: ESStat,
PrefixEnabled: true,
},
// Funcs to create elastic index names (ESIndexer type)
"esindices": {
Args: []models.FuncType{models.TypeString, models.TypeString},
VArgs: true,
VArgsPos: 1,
Return: models.TypeESIndexer,
F: ESIndicies,
},
"esdaily": {
Args: []models.FuncType{models.TypeString, models.TypeString, models.TypeString},
Return: models.TypeESIndexer,
F: ESDaily,
},
"esmonthly": {
Args: []models.FuncType{models.TypeString, models.TypeString, models.TypeString},
Return: models.TypeESIndexer,
F: ESMonthly,
},
"esls": {
Args: []models.FuncType{models.TypeString},
Return: models.TypeESIndexer,
F: ESLS,
},
// Funcs for generate elastic queries (ESQuery Type) to further filter results
"esall": {
Args: []models.FuncType{},
Return: models.TypeESQuery,
F: ESAll,
},
"esregexp": {
Args: []models.FuncType{models.TypeString, models.TypeString},
Return: models.TypeESQuery,
F: ESRegexp,
},
"esquery": {
Args: []models.FuncType{models.TypeString, models.TypeString},
Return: models.TypeESQuery,
F: ESQueryString,
},
"esexists": {
Args: []models.FuncType{models.TypeString},
Return: models.TypeESQuery,
F: ESExists,
},
"esand": {
Args: []models.FuncType{models.TypeESQuery},
VArgs: true,
Return: models.TypeESQuery,
F: ESAnd,
},
"esor": {
Args: []models.FuncType{models.TypeESQuery},
VArgs: true,
Return: models.TypeESQuery,
F: ESOr,
},
"esnot": {
Args: []models.FuncType{models.TypeESQuery},
Return: models.TypeESQuery,
F: ESNot,
},
"esgt": {
Args: []models.FuncType{models.TypeString, models.TypeScalar},
Return: models.TypeESQuery,
F: ESGT,
},
"esgte": {
Args: []models.FuncType{models.TypeString, models.TypeScalar},
Return: models.TypeESQuery,
F: ESGTE,
},
"eslt": {
Args: []models.FuncType{models.TypeString, models.TypeScalar},
Return: models.TypeESQuery,
F: ESLT,
},
"eslte": {
Args: []models.FuncType{models.TypeString, models.TypeScalar},
Return: models.TypeESQuery,
F: ESLTE,
},
} | cmd/bosun/expr/elastic.go | 0.533884 | 0.402157 | elastic.go | starcoder |
package design
import (
"io"
"reflect"
"github.com/gregoryv/go-design/shape"
)
// NewSequenceDiagram returns a sequence diagram with default column
// width.
func NewSequenceDiagram() *SequenceDiagram {
return &SequenceDiagram{
Diagram: NewDiagram(),
ColWidth: 190,
VMargin: 10,
}
}
// SequenceDiagram defines columns and links between columns.
type SequenceDiagram struct {
Diagram
ColWidth int
VMargin int // top margin for each horizontal lane
columns []string
links []*Link
}
// WriteSvg renders the diagram as SVG to the given writer.
func (d *SequenceDiagram) WriteSvg(w io.Writer) error {
var (
colWidth = d.ColWidth
top = d.top()
x = d.Pad.Left
y1 = top + d.TextPad.Bottom + d.Font.LineHeight // below label
y2 = d.Height()
)
lines := make([]*shape.Line, len(d.columns))
for i, column := range d.columns {
label := shape.NewLabel(column)
label.Font = d.Font
label.Pad = d.Pad
label.SetX(i * colWidth)
label.SetY(top)
firstColumn := i == 0
if firstColumn {
x += label.Width() / 2
}
line := shape.NewLine(x, y1, x, y2)
line.SetClass("column-line")
lines[i] = line
x += colWidth
d.VAlignCenter(lines[i], label)
d.Place(lines[i], label)
}
y := y1 + d.plainHeight()
for _, lnk := range d.links {
fromX := lines[lnk.fromIndex].Start.X
toX := lines[lnk.toIndex].Start.X
label := shape.NewLabel(lnk.text)
label.Font = d.Font
label.Pad = d.Pad
label.SetX(fromX)
label.SetY(y - 3 - d.Font.LineHeight)
if lnk.toSelf() {
margin := 15
// add two lines + arrow
l1 := shape.NewLine(fromX, y, fromX+margin, y)
l1.SetClass(lnk.class())
l2 := shape.NewLine(fromX+margin, y, fromX+margin, y+d.Font.LineHeight*2)
l2.SetClass(lnk.class())
d.HAlignCenter(l2, label)
label.SetX(fromX + l1.Width() + d.TextPad.Left)
label.SetY(y + 3)
arrow := shape.NewArrow(
l2.End.X,
l2.End.Y,
l1.Start.X,
l2.End.Y,
)
arrow.SetClass(lnk.class())
d.Place(l1, l2, arrow, label)
y += d.selfHeight()
} else {
arrow := shape.NewArrow(
fromX,
y,
toX,
y,
)
arrow.SetClass(lnk.class())
d.VAlignCenter(arrow, label)
d.Place(arrow, label)
y += d.plainHeight()
}
}
return d.Diagram.WriteSvg(w)
}
// Width returns the total width of the diagram
func (d *SequenceDiagram) Width() int {
if d.Svg.Width != 0 {
return d.Svg.Width
}
return len(d.columns) * d.ColWidth
}
// Height returns the total height of the diagram
func (d *SequenceDiagram) Height() int {
if d.Svg.Height != 0 {
return d.Svg.Height
}
if len(d.columns) == 0 {
return 0
}
height := d.top() + d.plainHeight()
for _, lnk := range d.links {
if lnk.toSelf() {
height += d.selfHeight()
continue
}
height += d.plainHeight()
}
return height
}
// selfHeight is the height of a self referencing link
func (d *SequenceDiagram) selfHeight() int {
return 3*d.Font.LineHeight + d.Pad.Bottom
}
// plainHeight returns the height of and arrow and label
func (d *SequenceDiagram) plainHeight() int {
return d.Font.LineHeight + d.Pad.Bottom + d.VMargin
}
func (d *SequenceDiagram) top() int {
return d.Pad.Top
}
func (d *SequenceDiagram) AddColumns(names ...string) {
d.columns = append(d.columns, names...)
}
func (d *SequenceDiagram) SaveAs(filename string) error {
return saveAs(d, d.Style, filename)
}
func (d *SequenceDiagram) AddStruct(obj interface{}) string {
name := reflect.TypeOf(obj).String()
d.AddColumns(name)
return name
} | seqdia.go | 0.727975 | 0.455078 | seqdia.go | starcoder |
package core
import (
"sync"
uuid "github.com/satori/go.uuid"
)
var instance *PortfolioStateManager
var once sync.Once
func SharedPortfolioManager() *PortfolioStateManager {
once.Do(func() {
instance = &PortfolioStateManager{}
instance.States = make(map[string]PortfolioState)
})
return instance
}
type PortfolioStateManager struct {
States map[string]PortfolioState
LastStateID string
}
// Portfolio wraps all your positions
type PortfolioState struct {
StateID string `json:"stateID"`
Positions map[string]map[Currency]float64 `json:"positions"`
}
// Portfolio wraps all your positions
type PortfolioStateSlice struct {
Exch string
Positions map[Currency]float64
}
// Portfolio wraps all your positions
type Portfolio struct {
Positions map[string]map[Currency]float64
}
// Update position
func (m *PortfolioStateManager) PushState(state PortfolioState) {
m.LastStateID = state.StateID
m.States[state.StateID] = state
}
func NewPortfolioStateFromPositions(positions map[string]map[Currency]float64) PortfolioState {
state := NewPortfolioState()
state.Positions = positions
return state
}
func NewPortfolioState() PortfolioState {
state := PortfolioState{}
uuid := (uuid.NewV4()).String()
state.StateID = uuid
state.Positions = make(map[string]map[Currency]float64)
return state
}
func (m *PortfolioStateManager) LastPositions() map[string]map[Currency]float64 {
return m.States[m.LastStateID].Positions
}
func (m *PortfolioStateManager) Position(stateID, exch string, curr Currency) float64 {
return m.States[stateID].Positions[exch][curr]
}
func (m *PortfolioStateManager) CurrentPosition(exch string, curr Currency) float64 {
return m.States[m.LastStateID].Positions[exch][curr]
}
// Update position
func (m *PortfolioStateManager) UpdateWithNewState(state PortfolioState, override bool) {
if override || len(m.States) == 0 {
m.PushState(state)
} else {
last := m.States[m.LastStateID]
new := NewPortfolioState()
new.StateID = (uuid.NewV4()).String()
for exch := range last.Positions {
new.Positions[exch] = make(map[Currency]float64)
for currency := range state.Positions[exch] {
new.Positions[exch][currency] = last.Positions[exch][currency]
}
}
for exch := range state.Positions {
for currency := range state.Positions[exch] {
new.Positions[exch][currency] = state.Positions[exch][currency]
}
}
m.PushState(new)
}
}
// Update position
func (m *PortfolioStateManager) UpdateWithNewPosition(exch string, c Currency, amount float64) {
current := m.States[m.LastStateID]
next := current.Copy()
next.UpdatePosition(exch, c, amount)
uuid := (uuid.NewV4()).String()
next.StateID = uuid
m.PushState(next)
}
// Fork current state
func (m *PortfolioStateManager) ForkCurrentState() PortfolioState {
current := m.States[m.LastStateID]
fork := current.Copy()
uuid := (uuid.NewV4()).String()
fork.StateID = uuid
return fork
}
// Update position
func (s *PortfolioState) UpdatePosition(exch string, c Currency, amount float64) {
if s.Positions == nil {
s.Positions = make(map[string]map[Currency]float64)
}
if s.Positions[exch] == nil {
s.Positions[exch] = make(map[Currency]float64)
}
s.Positions[exch][c] = Trunc8(amount)
}
// Copy state
func (s *PortfolioState) Copy() PortfolioState {
copy := PortfolioState{}
copy.Positions = make(map[string]map[Currency]float64)
for exch := range s.Positions {
copy.Positions[exch] = make(map[Currency]float64)
for currency := range s.Positions[exch] {
copy.Positions[exch][currency] = s.Positions[exch][currency]
}
}
return copy
}
// Update position
func (s *Portfolio) UpdatePosition(exch string, c Currency, amount float64) {
if s.Positions == nil {
s.Positions = make(map[string]map[Currency]float64)
}
if s.Positions[exch] == nil {
s.Positions[exch] = make(map[Currency]float64)
}
s.Positions[exch][c] = Trunc8(amount)
} | core/portfolio.go | 0.58059 | 0.410402 | portfolio.go | starcoder |
package parser
// IsConstant tells if the category is constant.
func (p Category) IsConstant() bool {
return p == Category_Constant
}
// IsBool tells if the category is bool.
func (p Category) IsBool() bool {
return p == Category_Bool
}
// IsByte tells if the category is byte.
func (p Category) IsByte() bool {
return p == Category_Byte
}
// IsI16 tells if the category is i16.
func (p Category) IsI16() bool {
return p == Category_I16
}
// IsI32 tells if the category is i32.
func (p Category) IsI32() bool {
return p == Category_I32
}
// IsI64 tells if the category is i64.
func (p Category) IsI64() bool {
return p == Category_I64
}
// IsDouble tells if the category is double.
func (p Category) IsDouble() bool {
return p == Category_Double
}
// IsString tells if the category is string.
func (p Category) IsString() bool {
return p == Category_String
}
// IsBinary tells if the category is binary.
func (p Category) IsBinary() bool {
return p == Category_Binary
}
// IsMap tells if the category is map.
func (p Category) IsMap() bool {
return p == Category_Map
}
// IsList tells if the category is list.
func (p Category) IsList() bool {
return p == Category_List
}
// IsSet tells if the category is set.
func (p Category) IsSet() bool {
return p == Category_Set
}
// IsEnum tells if the category is enum.
func (p Category) IsEnum() bool {
return p == Category_Enum
}
// IsStruct tells if the category is struct.
func (p Category) IsStruct() bool {
return p == Category_Struct
}
// IsUnion tells if the category is union.
func (p Category) IsUnion() bool {
return p == Category_Union
}
// IsException tells if the category is exception.
func (p Category) IsException() bool {
return p == Category_Exception
}
// IsTypedef tells if the category is typedef.
func (p Category) IsTypedef() bool {
return p == Category_Typedef
}
// IsService tells if the category is service.
func (p Category) IsService() bool {
return p == Category_Service
}
// IsBaseType tells if the category is one of the basetypes.
func (p Category) IsBaseType() bool {
return int64(Category_Bool) <= int64(p) && int64(p) <= int64(Category_Binary)
}
// IsContainerType tells if the category is one of the container types.
func (p Category) IsContainerType() bool {
return p == Category_Map || p == Category_Set || p == Category_List
}
// IsStructLike tells if the category is a struct-like.
func (p Category) IsStructLike() bool {
return p == Category_Struct || p == Category_Union || p == Category_Exception
} | parser/AST-extend-category.go | 0.849956 | 0.580174 | AST-extend-category.go | starcoder |
package value
import (
"errors"
)
type TypeInfo int64
// These constants describe the container type of a Value.
const (
TypeInfoScalar TypeInfo = iota + 1
TypeInfoSlice
TypeInfoMap
)
// Value is a "generic" type to store different types into flags
// Inspired by https://golang.org/src/flag/flag.go .
// There are two underlying "type" families designed to fit in Value:
// scalar types (Int, String, ...) and container types (IntSlice, StringMap, ...).
type Value interface {
// Description of the type. useful for help messages. Should not be used as an ID.
Description() string
// Get returns the underlying value. It's meant to be type asserted against
// Example: myInt := v.(int)
Get() interface{}
// Len returns 0 for scalar Values and len(underlyingValue) for container Values.
// TODO: I think this will be useful when/if I start enforcing flag grouping (like grabbits subreddit params).
// Those should all have the same length and the same source. I don't think I *need* it now, so leaving it out.
// Len() int
// ReplaceFromInterface replaces a value with one found in an interface.
// Useful to update a Value from a config.
ReplaceFromInterface(interface{}) error
// String returns a string ready to be printed!
String() string
// StringSlice returns a []string ready to be printed for slice values and nil for others
StringSlice() []string
// TypeInfo specifies whether what "overall" type of value this is - scalar, slice, etc.
TypeInfo() TypeInfo
// Update appends to container type Values from a string (useful for CLI flags, env vars, default values)
// and replaces scalar Values
Update(string) error
// UpdateFromInterface updates a container type Value from an interface (useful for configs)
// and replaces scalar values (for scalar values, UpdateFromInterface is the same as ReplaceFromInterface).
// Note that UpdateFromInterface must be called with the "contained" type for container type Values
// For example, the StringSlice.UpdateFromInterface
// must be called with a string, not a []string
// It returns ErrIncompatibleInterface if the interface can't be decoded
UpdateFromInterface(interface{}) error
}
// EmptyConstructur just builds a new value.
// Useful to create new values as well as initialize them
type EmptyConstructor = func() (Value, error)
var ErrIncompatibleInterface = errors.New("could not decode interface into Value") | value/value.go | 0.596668 | 0.418697 | value.go | starcoder |
package gorrect
import "github.com/racerxdl/gorrect/correctwrap"
type ConvolutionCoder struct {
r int
k int
poly []uint16
cc correctwrap.Correct_convolutional
}
// MakeConvolutionCoder creates a new Convolution Decoder / Encoder for r => rate, k => order and specified polys
func MakeConvolutionCoder(r, k int, poly []uint16) *ConvolutionCoder {
if len(poly) != r {
panic("The number of polys should match the rate")
}
return &ConvolutionCoder{
r: r,
k: k,
poly: poly,
cc: correctwrap.Correct_convolutional_create(int64(r), int64(k), &poly[0]),
}
}
// EncodedSize return number of encoded bits given dataLength bytes of data
func (cc *ConvolutionCoder) EncodedSize(dataLength int) int {
return int(correctwrap.Correct_convolutional_encode_len(cc.cc, int64(dataLength)))
}
// DecodedSize return number of decoded bytes given numBits input data
func (cc *ConvolutionCoder) DecodedSize(numBits int) int {
return numBits / (8 * cc.r)
}
// EncodeSoft encodes the byte array to "soft" symbols (each output byte as one bit, 0 for 0 and 255 for 1)
func (cc *ConvolutionCoder) EncodeSoft(data []byte) (output []byte) {
encoded := cc.Encode(data)
output = make([]byte, cc.EncodedSize(len(data)))
bl := len(encoded)
for i := 0; i < bl && i*8 < len(output); i++ {
d := encoded[i]
for z := 7; z >= 0; z-- {
output[i*8+(7-z)] = 0
if d&(1<<uint(z)) == 0 {
output[i*8+(7-z)] = 255
}
}
}
return output
}
// Encode encodes the byte array (each output byte has 8 encoded bits)
func (cc *ConvolutionCoder) Encode(data []byte) (output []byte) {
frameBits := cc.EncodedSize(len(data))
bl := frameBits/8 + 1
if frameBits%8 == 0 {
bl -= 1
}
output = make([]byte, bl)
correctwrap.Correct_convolutional_encode(cc.cc, &data[0], int64(len(data)), &output[0])
return output
}
// Decode decodes a byte array containing 8 hard symbols per byte
func (cc *ConvolutionCoder) Decode(data []byte) (output []byte) {
frameBits := int64(len(data)) * 8
output = make([]byte, int(frameBits)/(8*cc.r))
correctwrap.Correct_convolutional_decode(cc.cc, &data[0], frameBits, &output[0])
return output
}
// DecodeSoft decodes a byte array containing one bit per byte as soft symbol (0 as 0, 1 as 255)
func (cc *ConvolutionCoder) DecodeSoft(data []byte) (output []byte) {
frameBits := int64(len(data))
output = make([]byte, int(frameBits)/(8*cc.r))
correctwrap.Correct_convolutional_decode_soft(cc.cc, &data[0], frameBits, &output[0])
return output
}
// Close cleans the Convolution Coder native resources
func (cc *ConvolutionCoder) Close() {
correctwrap.Correct_convolutional_destroy(cc.cc)
} | Convolution.go | 0.806205 | 0.445047 | Convolution.go | starcoder |
package datadog
import (
"encoding/json"
"fmt"
)
// NotebookDistributionCellAttributes The attributes of a notebook `distribution` cell.
type NotebookDistributionCellAttributes struct {
Definition DistributionWidgetDefinition `json:"definition"`
GraphSize *NotebookGraphSize `json:"graph_size,omitempty"`
SplitBy *NotebookSplitBy `json:"split_by,omitempty"`
Time NullableNotebookCellTime `json:"time,omitempty"`
}
// NewNotebookDistributionCellAttributes instantiates a new NotebookDistributionCellAttributes object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewNotebookDistributionCellAttributes(definition DistributionWidgetDefinition) *NotebookDistributionCellAttributes {
this := NotebookDistributionCellAttributes{}
this.Definition = definition
return &this
}
// NewNotebookDistributionCellAttributesWithDefaults instantiates a new NotebookDistributionCellAttributes object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewNotebookDistributionCellAttributesWithDefaults() *NotebookDistributionCellAttributes {
this := NotebookDistributionCellAttributes{}
return &this
}
// GetDefinition returns the Definition field value
func (o *NotebookDistributionCellAttributes) GetDefinition() DistributionWidgetDefinition {
if o == nil {
var ret DistributionWidgetDefinition
return ret
}
return o.Definition
}
// GetDefinitionOk returns a tuple with the Definition field value
// and a boolean to check if the value has been set.
func (o *NotebookDistributionCellAttributes) GetDefinitionOk() (*DistributionWidgetDefinition, bool) {
if o == nil {
return nil, false
}
return &o.Definition, true
}
// SetDefinition sets field value
func (o *NotebookDistributionCellAttributes) SetDefinition(v DistributionWidgetDefinition) {
o.Definition = v
}
// GetGraphSize returns the GraphSize field value if set, zero value otherwise.
func (o *NotebookDistributionCellAttributes) GetGraphSize() NotebookGraphSize {
if o == nil || o.GraphSize == nil {
var ret NotebookGraphSize
return ret
}
return *o.GraphSize
}
// GetGraphSizeOk returns a tuple with the GraphSize field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *NotebookDistributionCellAttributes) GetGraphSizeOk() (*NotebookGraphSize, bool) {
if o == nil || o.GraphSize == nil {
return nil, false
}
return o.GraphSize, true
}
// HasGraphSize returns a boolean if a field has been set.
func (o *NotebookDistributionCellAttributes) HasGraphSize() bool {
if o != nil && o.GraphSize != nil {
return true
}
return false
}
// SetGraphSize gets a reference to the given NotebookGraphSize and assigns it to the GraphSize field.
func (o *NotebookDistributionCellAttributes) SetGraphSize(v NotebookGraphSize) {
o.GraphSize = &v
}
// GetSplitBy returns the SplitBy field value if set, zero value otherwise.
func (o *NotebookDistributionCellAttributes) GetSplitBy() NotebookSplitBy {
if o == nil || o.SplitBy == nil {
var ret NotebookSplitBy
return ret
}
return *o.SplitBy
}
// GetSplitByOk returns a tuple with the SplitBy field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *NotebookDistributionCellAttributes) GetSplitByOk() (*NotebookSplitBy, bool) {
if o == nil || o.SplitBy == nil {
return nil, false
}
return o.SplitBy, true
}
// HasSplitBy returns a boolean if a field has been set.
func (o *NotebookDistributionCellAttributes) HasSplitBy() bool {
if o != nil && o.SplitBy != nil {
return true
}
return false
}
// SetSplitBy gets a reference to the given NotebookSplitBy and assigns it to the SplitBy field.
func (o *NotebookDistributionCellAttributes) SetSplitBy(v NotebookSplitBy) {
o.SplitBy = &v
}
// GetTime returns the Time field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *NotebookDistributionCellAttributes) GetTime() NotebookCellTime {
if o == nil || o.Time.Get() == nil {
var ret NotebookCellTime
return ret
}
return *o.Time.Get()
}
// GetTimeOk returns a tuple with the Time field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *NotebookDistributionCellAttributes) GetTimeOk() (*NotebookCellTime, bool) {
if o == nil {
return nil, false
}
return o.Time.Get(), o.Time.IsSet()
}
// HasTime returns a boolean if a field has been set.
func (o *NotebookDistributionCellAttributes) HasTime() bool {
if o != nil && o.Time.IsSet() {
return true
}
return false
}
// SetTime gets a reference to the given NullableNotebookCellTime and assigns it to the Time field.
func (o *NotebookDistributionCellAttributes) SetTime(v NotebookCellTime) {
o.Time.Set(&v)
}
// SetTimeNil sets the value for Time to be an explicit nil
func (o *NotebookDistributionCellAttributes) SetTimeNil() {
o.Time.Set(nil)
}
// UnsetTime ensures that no value is present for Time, not even an explicit nil
func (o *NotebookDistributionCellAttributes) UnsetTime() {
o.Time.Unset()
}
func (o NotebookDistributionCellAttributes) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["definition"] = o.Definition
}
if o.GraphSize != nil {
toSerialize["graph_size"] = o.GraphSize
}
if o.SplitBy != nil {
toSerialize["split_by"] = o.SplitBy
}
if o.Time.IsSet() {
toSerialize["time"] = o.Time.Get()
}
return json.Marshal(toSerialize)
}
func (o *NotebookDistributionCellAttributes) UnmarshalJSON(bytes []byte) (err error) {
required := struct {
Definition *DistributionWidgetDefinition `json:"definition"`
}{}
all := struct {
Definition DistributionWidgetDefinition `json:"definition"}`
GraphSize *NotebookGraphSize `json:"graph_size,omitempty"}`
SplitBy *NotebookSplitBy `json:"split_by,omitempty"}`
Time NullableNotebookCellTime `json:"time,omitempty"}`
}{}
err = json.Unmarshal(bytes, &required)
if err != nil {
return err
}
if required.Definition == nil {
return fmt.Errorf("Required field definition missing")
}
err = json.Unmarshal(bytes, &all)
if err != nil {
return err
}
o.Definition = all.Definition
o.GraphSize = all.GraphSize
o.SplitBy = all.SplitBy
o.Time = all.Time
return nil
}
type NullableNotebookDistributionCellAttributes struct {
value *NotebookDistributionCellAttributes
isSet bool
}
func (v NullableNotebookDistributionCellAttributes) Get() *NotebookDistributionCellAttributes {
return v.value
}
func (v *NullableNotebookDistributionCellAttributes) Set(val *NotebookDistributionCellAttributes) {
v.value = val
v.isSet = true
}
func (v NullableNotebookDistributionCellAttributes) IsSet() bool {
return v.isSet
}
func (v *NullableNotebookDistributionCellAttributes) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableNotebookDistributionCellAttributes(val *NotebookDistributionCellAttributes) *NullableNotebookDistributionCellAttributes {
return &NullableNotebookDistributionCellAttributes{value: val, isSet: true}
}
func (v NullableNotebookDistributionCellAttributes) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableNotebookDistributionCellAttributes) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | api/v1/datadog/model_notebook_distribution_cell_attributes.go | 0.828349 | 0.406391 | model_notebook_distribution_cell_attributes.go | starcoder |
package onshape
import (
"encoding/json"
)
// BTMSketch151 struct for BTMSketch151
type BTMSketch151 struct {
BTMFeature134
BtType *string `json:"btType,omitempty"`
Constraints *[]BTMSketchConstraint2 `json:"constraints,omitempty"`
Entities *[]BTMSketchGeomEntity5 `json:"entities,omitempty"`
}
// NewBTMSketch151 instantiates a new BTMSketch151 object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTMSketch151() *BTMSketch151 {
this := BTMSketch151{}
return &this
}
// NewBTMSketch151WithDefaults instantiates a new BTMSketch151 object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTMSketch151WithDefaults() *BTMSketch151 {
this := BTMSketch151{}
return &this
}
// GetBtType returns the BtType field value if set, zero value otherwise.
func (o *BTMSketch151) GetBtType() string {
if o == nil || o.BtType == nil {
var ret string
return ret
}
return *o.BtType
}
// GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTMSketch151) GetBtTypeOk() (*string, bool) {
if o == nil || o.BtType == nil {
return nil, false
}
return o.BtType, true
}
// HasBtType returns a boolean if a field has been set.
func (o *BTMSketch151) HasBtType() bool {
if o != nil && o.BtType != nil {
return true
}
return false
}
// SetBtType gets a reference to the given string and assigns it to the BtType field.
func (o *BTMSketch151) SetBtType(v string) {
o.BtType = &v
}
// GetConstraints returns the Constraints field value if set, zero value otherwise.
func (o *BTMSketch151) GetConstraints() []BTMSketchConstraint2 {
if o == nil || o.Constraints == nil {
var ret []BTMSketchConstraint2
return ret
}
return *o.Constraints
}
// GetConstraintsOk returns a tuple with the Constraints field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTMSketch151) GetConstraintsOk() (*[]BTMSketchConstraint2, bool) {
if o == nil || o.Constraints == nil {
return nil, false
}
return o.Constraints, true
}
// HasConstraints returns a boolean if a field has been set.
func (o *BTMSketch151) HasConstraints() bool {
if o != nil && o.Constraints != nil {
return true
}
return false
}
// SetConstraints gets a reference to the given []BTMSketchConstraint2 and assigns it to the Constraints field.
func (o *BTMSketch151) SetConstraints(v []BTMSketchConstraint2) {
o.Constraints = &v
}
// GetEntities returns the Entities field value if set, zero value otherwise.
func (o *BTMSketch151) GetEntities() []BTMSketchGeomEntity5 {
if o == nil || o.Entities == nil {
var ret []BTMSketchGeomEntity5
return ret
}
return *o.Entities
}
// GetEntitiesOk returns a tuple with the Entities field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTMSketch151) GetEntitiesOk() (*[]BTMSketchGeomEntity5, bool) {
if o == nil || o.Entities == nil {
return nil, false
}
return o.Entities, true
}
// HasEntities returns a boolean if a field has been set.
func (o *BTMSketch151) HasEntities() bool {
if o != nil && o.Entities != nil {
return true
}
return false
}
// SetEntities gets a reference to the given []BTMSketchGeomEntity5 and assigns it to the Entities field.
func (o *BTMSketch151) SetEntities(v []BTMSketchGeomEntity5) {
o.Entities = &v
}
func (o BTMSketch151) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
serializedBTMFeature134, errBTMFeature134 := json.Marshal(o.BTMFeature134)
if errBTMFeature134 != nil {
return []byte{}, errBTMFeature134
}
errBTMFeature134 = json.Unmarshal([]byte(serializedBTMFeature134), &toSerialize)
if errBTMFeature134 != nil {
return []byte{}, errBTMFeature134
}
if o.BtType != nil {
toSerialize["btType"] = o.BtType
}
if o.Constraints != nil {
toSerialize["constraints"] = o.Constraints
}
if o.Entities != nil {
toSerialize["entities"] = o.Entities
}
return json.Marshal(toSerialize)
}
type NullableBTMSketch151 struct {
value *BTMSketch151
isSet bool
}
func (v NullableBTMSketch151) Get() *BTMSketch151 {
return v.value
}
func (v *NullableBTMSketch151) Set(val *BTMSketch151) {
v.value = val
v.isSet = true
}
func (v NullableBTMSketch151) IsSet() bool {
return v.isSet
}
func (v *NullableBTMSketch151) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTMSketch151(val *BTMSketch151) *NullableBTMSketch151 {
return &NullableBTMSketch151{value: val, isSet: true}
}
func (v NullableBTMSketch151) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTMSketch151) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | onshape/model_btm_sketch_151.go | 0.740831 | 0.405655 | model_btm_sketch_151.go | starcoder |
package trie
import (
"errors"
"fmt"
)
type tree struct {
root node
}
func NewTree() *tree {
return &tree{root: node{}}
}
/// Finds associated value for key
/// returns error if key was not found in trie
func (t *tree) Find(key string) (interface{}, error) {
return t.root.find(key)
}
/// Insert key-value-pair into trie
func (t *tree) Insert(key string, value interface{}) {
t.root.insert(key, value)
}
/// If the tree contains the given value, the associated value is removed from the trie
/// returns true if key was deleted successfully
func (t *tree) Delete(key string) bool {
return t.root.delete(key)
}
//Checks if the given key is associated with a value in the trie
func (t *tree) Contains(key string) bool {
v, e := t.root.find(key)
return e == nil && v != nil
}
/// Calls given function for every key-value-pair
/// argument:
/// - f func(string,interface{}) : function, which is called with key and value as arguments
func (t *tree) ForEach(f func(string, interface{})) {
t.root.iterate("", f)
}
/// Returns all existing keys in trie as array of strings
func (t *tree) Keys() []string {
return t.root.iterate("", nil)
}
/// Returns all key-value-pairs as map (golang type)
func (t *tree) ToMap() map[string]interface{} {
m := map[string]interface{}{}
t.root.iterate("", func(key string, value interface{}) {
m[key] = value
})
return m
}
/// Returns all existing values in trie
func (t *tree) Values() []interface{} {
values := []interface{}{}
t.root.iterate("", func(key string, value interface{}) {
values = append(values, value)
})
return values
}
/// Returns the amount of key-value-pairs in the trie as integer
func (t *tree) Size() int {
return len(t.Keys())
}
// Converts trie to string e.g {test=2,go=test,key3={2,5}}
func (t *tree) String() string {
result := "{"
i := 0
mLen := t.Size()
t.ForEach(func(key string, value interface{}) {
result += key + "=" + fmt.Sprintf("%v", value)
if i+1 < mLen {
result += ","
}
i++
})
return result + "}"
}
type node struct {
Value interface{} //abstract value
chars []byte // character array
tries []node // children
}
func (t *node) find(key string) (interface{}, error) {
if len(key) == 0 {
return t.Value, nil
} else {
for i := 0; i < len(t.chars); i++ {
if t.chars[i] == key[0] {
return t.tries[i].find(string(key[1:]))
}
}
return 0, errors.New("Key not found")
}
}
//sorts a nodes chars and tries alphabetically with selection-sort
func (t node) sort() {
for j := 0; j < len(t.chars); j++ {
for i := 0; i < len(t.chars)-1; i++ {
if t.chars[i] > t.chars[i+1] {
t.chars[i], t.chars[i+1] = t.chars[i+1], t.chars[i]
t.tries[i], t.tries[i+1] = t.tries[i+1], t.tries[i]
}
}
}
}
func (t *node) insert(key string, value interface{}) {
if len(key) == 0 {
t.Value = value
} else {
for i := 0; i < len(t.chars); i++ {
if t.chars[i] == key[0] {
t.tries[i].insert(string(key[1:]), value)
return
}
}
newTrie := node{}
newTrie.insert(string(key[1:]), value)
t.chars = append(t.chars, key[0])
t.tries = append(t.tries, newTrie)
t.sort()
}
}
func (t *node) delete(key string) bool {
if len(key) == 0 {
t.Value = nil
return true
} else {
for i := 0; i < len(t.chars); i++ {
if t.chars[i] == key[0] {
result := t.tries[i].delete(string(key[1:]))
if t.tries[i].isEmpty() {
removeIthChar(t.chars, t.chars[i])
removeIthTrie(t.tries, t.tries[i])
}
return result
}
}
}
return false
}
/// iterates recursively through tree and calls the given function for every found key-value-pair in trie
/// arguments:
/// - pre string: The previous keys as string to create the key as string
/// - f func(string,interface{}): The function to be called for every pair
func (t *node) iterate(pre string, f func(string, interface{})) []string {
arr := []string{}
if t.Value != nil {
arr = append(arr, pre)
if f != nil {
f(pre, t.Value)
}
}
if len(t.chars) > 0 {
for i, e := range t.chars {
arr = append(arr, t.tries[i].iterate(pre+string(e), f)...)
}
}
return arr
}
// Checks if node is not filled yet
func (t *node) isEmpty() bool {
return t.Value == nil && len(t.chars) == 0
}
//removes first occurrence of value in array
func removeIthChar(list []byte, val byte) {
index := -1
for i, e := range list {
if &val == &e {
index = i
break
}
}
if index > 0 {
list = append(list[:index], list[index+1:]...)
}
}
//removes first occurrence of value in array
func removeIthTrie(list []node, val node) {
index := -1
for i, e := range list {
if &val == &e {
index = i
break
}
}
if index > 0 {
list = append(list[:index], list[index+1:]...)
}
} | src/trees/trie/trie.go | 0.712332 | 0.435181 | trie.go | starcoder |
package cluster
import (
"fmt"
"math/rand"
)
// Kmeans configuration/option struct
type Kmeans struct {
k int
// deltaThreshold (in percent between 0.0 and 0.1) aborts processing if
// less than n% of data points shifted clusters in the last iteration
deltaThreshold float64
// iterationThreshold aborts processing when the specified amount of
// algorithm iterations was reached
iterationThreshold int
}
// New returns a Kmeans configuration struct with default settings
func NewKmeans() *Kmeans {
return &Kmeans{
deltaThreshold: 0.01,
iterationThreshold: 96,
}
}
func (m *Kmeans) SetK(k int) {
m.k = k
}
func (m *Kmeans) SetMaxIter(iter int) {
m.iterationThreshold = iter
}
func (m *Kmeans) SetDeltaThreshold(deltaThreshold float64) error {
if deltaThreshold <= 0.0 || deltaThreshold >= 1.0 {
return fmt.Errorf("threshold is out of bounds (must be >0.0 and <1.0, in percent)")
}
m.deltaThreshold = deltaThreshold
return nil
}
// Clusterize executes the k-means algorithm on the given dataset and
// partitions it into k clusters
func (m *Kmeans) Clusterize(dataset Observations) (Clusters, error) {
if m.k > len(dataset) {
return nil, fmt.Errorf("the size of the data set must at least equal k")
}
cc, err := RandClusters(m.k, dataset)
if err != nil {
return cc, err
}
points := make([]int, len(dataset))
changes := 1
for i := 0; changes > 0; i++ {
changes = 0
cc.Reset()
for p, point := range dataset {
ci := cc.Nearest(point)
cc[ci].Append(point)
if points[p] != ci {
points[p] = ci
changes++
}
}
for ci := 0; ci < len(cc); ci++ {
if len(cc[ci].Observations) == 0 {
// During the iterations, if any of the cluster centers has no
// data points associated with it, assign a random data point
// to it.
// Also see: http://user.ceng.metu.edu.tr/~tcan/ceng465_f1314/Schedule/KMeansEmpty.html
var ri int
for {
// find a cluster with at least two data points, otherwise
// we're just emptying one cluster to fill another
ri = rand.Intn(len(dataset))
if len(cc[points[ri]].Observations) > 1 {
break
}
}
cc[ci].Append(dataset[ri])
points[ri] = ci
// Ensure that we always see at least one more iteration after
// randomly assigning a data point to a cluster
changes = len(dataset)
}
}
if changes > 0 {
cc.Recenter()
}
if i == m.iterationThreshold ||
changes < int(float64(len(dataset))*m.deltaThreshold) {
// fmt.Println("Aborting:", changes, int(float64(len(dataset))*m.TerminationThreshold))
break
}
}
return cc, nil
} | cluster/kmeans.go | 0.663451 | 0.495789 | kmeans.go | starcoder |
package api
import (
. "github.com/gocircuit/circuit/gocircuit.org/render"
)
func RenderSubscriptionPage() string {
return RenderHtml("Using subscriptions", Render(subscriptionBody, nil))
}
const subscriptionBody = `
<h2>Using subscriptions</h2>
<p>Subscriptions are a way of receiving notifications about events of a given type.
Presently, the circuit provides two types of subscriptions:
<ul>
<li>Subscriptions for notifications about hosts joining the cluster, and
<li>Subscriptions for notifications about hosts leaving the cluster.
</ul>
<p>Just like process and other elements, a subscription is an element (a persistent object) that
is created and attached to an anchor. The methods of a subscription allow the user
to read events, one by one, in order of appearance, at the user's convenience.
<p>To create a new subscription element, use one of the following two <code>Anchor</code>
methods:
<pre>
MakeOnJoin() (Subscription, error)
MakeOnLeave() (Subscription, error)
</pre>
<p><code>MakeOnJoin</code> subscribes to the stream of ‘host joined the cluster’ events,
while <code>MakeOnLeave</code> subscribes to the stream of ‘host left the cluster’ events.
An application error will occur only if the underlying anchor is not free (i.e. it has an element
already attached to it).
<p>The subscription element is represented by the following Go interface:
<pre>
type Subscription interface {
Consume() (interface{}, bool)
Peek() SubscriptionStat
Scrub()
}
</pre>
<p>Subscriptions can be closed and discarded using the <code>Scrub</code> method of the
subscription element or of its anchor.
<h3>Consuming events</h3>
<p>Events are consumed using <code>Consume</code>. The first return value of <code>Consume</code>
holds the description of the event that was popped from the queue.
The second return value is true if an event was successfully retrieved. Otherwise, the end-of-stream
has been reached permanently and the first return value will be nil.
<p>If the stream is still open and there are no events to be consumed, <code>Consume</code> will block.
<p>Host join and leave subscriptions return <code>string</code> events that
hold the textual path of the host that joined or left the network. These strings will
look like <code>/X36f63a7e4ae9df92</code>
<p>After a join-subscription is created, it will produce all the hosts that are currently
in the cluster as events, and then it will continue producing new events as new hosts join later.
<p>After a leave-subscription is created, it will produce events only for hosts leaving the
network after the subscription was created. Some leave events may be reported more than once.
<h3>Status of subscription queue</h3>
<p>The status of a subscription queue can be queried asynchronously using <code>Peek</code>.
The returned structure describes the type of the subscription, the number of pending (not yet consumed)
events, and whether the subscription has already been closed (by the user).
<pre>
type SubscriptionStat struct {
Source string
Pending int
Closed bool
}
</pre>
<h3>Example</h3>
<p>Subscriptions, for join or leave events, are intended to be used via the following programming
pattern:
<pre>
join, err := MakeOnJoin()
if err != nil {
…
}
for {
event, ok := join.Consume()
if !ok {
…
}
host := event.(string)
…
}
</pre>
` | gocircuit.org/api/subscription.go | 0.617628 | 0.593167 | subscription.go | starcoder |
package condition
import (
"encoding/json"
"fmt"
"reflect"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/types"
"github.com/Jeffail/benthos/v3/lib/x/docs"
"github.com/Jeffail/gabs/v2"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeJSON] = TypeSpec{
constructor: NewJSON,
Summary: `
Checks JSON messages against a logical [operator](#operators) and an argument.`,
FieldSpecs: docs.FieldSpecs{
docs.FieldCommon("operator", "A logical [operator](#operators) to check with.").HasOptions(
"exists", "equals", "contains",
),
docs.FieldCommon("path", "The [path](/docs/configuration/field_paths) of a specific field within JSON documents to check."),
docs.FieldCommon("arg", "An argument to check against. May not be applicable for all operators."),
partFieldSpec,
},
Footnotes: `
## Operators
### ` + "`exists`" + `
Checks whether the target path exists within a document. If the path is the root
(empty or '.') then it simply checks that the document is valid JSON.
### ` + "`equals`" + `
Checks whether the target path exists and matches the argument.
### ` + "`contains`" + `
Checks whether the target path is an array containing the argument.`,
}
}
// JSONConfig is a configuration struct containing fields for the JSON
// condition.
type JSONConfig struct {
Operator string `json:"operator" yaml:"operator"`
Part int `json:"part" yaml:"part"`
Path string `json:"path" yaml:"path"`
Arg interface{} `json:"arg" yaml:"arg"`
}
// NewJSONConfig returns a JSONConfig with default values.
func NewJSONConfig() JSONConfig {
return JSONConfig{
Operator: "exists",
Part: 0,
Path: "",
Arg: "",
}
}
type jsonOperator func(c *gabs.Container) bool
func jsonExistOperator(path string) jsonOperator {
return func(c *gabs.Container) bool {
if path == "." || path == "" {
return true
}
return c.ExistsP(path)
}
}
func toFloat64(v interface{}) (float64, bool) {
var argF float64
switch t := v.(type) {
case int:
argF = float64(t)
case int64:
argF = float64(t)
case float64:
argF = float64(t)
case json.Number:
var err error
if argF, err = t.Float64(); err != nil {
argI, _ := t.Int64()
argF = float64(argI)
}
default:
return 0, false
}
return argF, true
}
func jsonContainsOperator(path string, arg interface{}) jsonOperator {
argF, isNum := toFloat64(arg)
if !isNum {
return func(c *gabs.Container) bool {
for _, child := range c.Path(path).Children() {
if reflect.DeepEqual(child.Data(), arg) {
return true
}
}
return false
}
}
return func(c *gabs.Container) bool {
for _, child := range c.Path(path).Children() {
if cF, isNum := toFloat64(child.Data()); isNum {
if cF == argF {
return true
}
}
}
return false
}
}
func jsonEqualsOperator(path string, arg interface{}) jsonOperator {
argF, isNum := toFloat64(arg)
if !isNum {
return func(c *gabs.Container) bool {
return reflect.DeepEqual(c.Path(path).Data(), arg)
}
}
return func(c *gabs.Container) bool {
if cF, isNum := toFloat64(c.Path(path).Data()); isNum {
return cF == argF
}
return false
}
}
func strToJSONOperator(op, path string, arg interface{}) (jsonOperator, error) {
switch op {
case "exists":
return jsonExistOperator(path), nil
case "equals":
return jsonEqualsOperator(path, arg), nil
case "contains":
return jsonContainsOperator(path, arg), nil
}
return nil, fmt.Errorf("unrecognised json operator: %v", op)
}
// JSON is a condition that checks JSON against a simple logic.
type JSON struct {
stats metrics.Type
operator jsonOperator
part int
mCount metrics.StatCounter
mTrue metrics.StatCounter
mFalse metrics.StatCounter
}
// NewJSON returns a JSON condition.
func NewJSON(
conf Config, mgr types.Manager, log log.Modular, stats metrics.Type,
) (Type, error) {
op, err := strToJSONOperator(conf.JSON.Operator, conf.JSON.Path, conf.JSON.Arg)
if err != nil {
return nil, fmt.Errorf("operator '%v': %v", conf.JSON.Operator, err)
}
return &JSON{
stats: stats,
operator: op,
part: conf.JSON.Part,
mCount: stats.GetCounter("count"),
mTrue: stats.GetCounter("true"),
mFalse: stats.GetCounter("false"),
}, nil
}
// Check attempts to check a message part against a configured condition.
func (c *JSON) Check(msg types.Message) bool {
c.mCount.Incr(1)
index := c.part
lParts := msg.Len()
if lParts == 0 {
c.mFalse.Incr(1)
return false
}
msgPart, err := msg.Get(index).JSON()
if err != nil {
c.mFalse.Incr(1)
return false
}
res := c.operator(gabs.Wrap(msgPart))
if res {
c.mTrue.Incr(1)
} else {
c.mFalse.Incr(1)
}
return res
} | lib/condition/json.go | 0.748444 | 0.404331 | json.go | starcoder |
package eaopt
import (
"math/rand"
"sort"
)
// Type specific mutations for slices
// CrossUniformFloat64 crossover combines two individuals (the parents) into one
// (the offspring). Each parent's contribution to the Genome is determined by
// the value of a probability p. Each offspring receives a proportion of both of
// it's parents genomes. The new values are located in the hyper-rectangle
// defined between both parent's position in Cartesian space.
func CrossUniformFloat64(p1 []float64, p2 []float64, rng *rand.Rand) {
for i := range p1 {
var p = rng.Float64()
p1[i] = p*p1[i] + (1-p)*p2[i]
p2[i] = (1-p)*p1[i] + p*p2[i]
}
}
// Generic mutations for slices
// Contains the deterministic part of the GNX method for testing purposes.
func gnx(p1, p2 Slice, indexes []int) {
var (
n = p1.Len()
o1 = p1.Copy()
o2 = p2.Copy()
toggle = true
)
// Add the first and last indexes
indexes = append([]int{0}, indexes...)
indexes = append(indexes, n)
for i := 0; i < len(indexes)-1; i++ {
if toggle {
o1.Slice(indexes[i], indexes[i+1]).Replace(p1.Slice(indexes[i], indexes[i+1]))
o2.Slice(indexes[i], indexes[i+1]).Replace(p2.Slice(indexes[i], indexes[i+1]))
} else {
o1.Slice(indexes[i], indexes[i+1]).Replace(p2.Slice(indexes[i], indexes[i+1]))
o2.Slice(indexes[i], indexes[i+1]).Replace(p1.Slice(indexes[i], indexes[i+1]))
}
toggle = !toggle // Alternate for the new copying
}
p1.Replace(o1)
p2.Replace(o2)
}
// CrossGNX (Generalized N-point Crossover). An identical point is chosen on
// each parent's genome and the mirroring segments are switched. n determines
// the number of crossovers (aka mirroring segments) to perform. n has to be
// equal or lower than the number of genes in each parent.
func CrossGNX(p1 Slice, p2 Slice, n uint, rng *rand.Rand) {
var indexes = randomInts(n, 1, p1.Len(), rng)
sort.Ints(indexes)
gnx(p1, p2, indexes)
}
// CrossGNXInt calls CrossGNX on two int slices.
func CrossGNXInt(s1 []int, s2 []int, n uint, rng *rand.Rand) {
CrossGNX(IntSlice(s1), IntSlice(s2), n, rng)
}
// CrossGNXFloat64 calls CrossGNX on two float64 slices.
func CrossGNXFloat64(s1 []float64, s2 []float64, n uint, rng *rand.Rand) {
CrossGNX(Float64Slice(s1), Float64Slice(s2), n, rng)
}
// CrossGNXString calls CrossGNX on two string slices.
func CrossGNXString(s1 []string, s2 []string, n uint, rng *rand.Rand) {
CrossGNX(StringSlice(s1), StringSlice(s2), n, rng)
}
// Contains the deterministic part of the PMX method for testing purposes.
func pmx(p1, p2 Slice, a, b int) {
var (
n = p1.Len()
o1 = p1.Copy()
o2 = p2.Copy()
)
// Create lookup maps to quickly see if a gene has been visited
var (
p1Visited, p2Visited = make(set), make(set)
o1Visited, o2Visited = make(set), make(set)
)
for i := a; i < b; i++ {
p1Visited[p1.At(i)] = true
p2Visited[p2.At(i)] = true
o1Visited[i] = true
o2Visited[i] = true
}
for i := a; i < b; i++ {
// Find the element in the second parent that has not been copied in the first offspring
if !p1Visited[p2.At(i)] {
var j = i
for o1Visited[j] {
j, _ = search(o1.At(j), p2)
}
o1.Set(j, p2.At(i))
o1Visited[j] = true
}
// Find the element in the first parent that has not been copied in the second offspring
if !p2Visited[p1.At(i)] {
var j = i
for o2Visited[j] {
j, _ = search(o2.At(j), p1)
}
o2.Set(j, p1.At(i))
o2Visited[j] = true
}
}
// Fill in the offspring's missing values with the opposite parent's values
for i := 0; i < n; i++ {
if !o1Visited[i] {
o1.Set(i, p2.At(i))
}
if !o2Visited[i] {
o2.Set(i, p1.At(i))
}
}
p1.Replace(o1)
p2.Replace(o2)
}
// CrossPMX (Partially Mapped Crossover). The offsprings are generated by
// copying one of the parents and then copying the other parent's values up to a
// randomly chosen crossover point. Each gene that is replaced is permuted with
// the gene that is copied in the first parent's genome. Two offsprings are
// generated in such a way (because there are two parents). The PMX method
// preserves gene uniqueness.
func CrossPMX(p1 Slice, p2 Slice, rng *rand.Rand) {
var indexes = randomInts(2, 1, p1.Len(), rng)
sort.Ints(indexes)
pmx(p1, p2, indexes[0], indexes[1])
}
// CrossPMXInt calls CrossPMX on an int slice.
func CrossPMXInt(s1 []int, s2 []int, rng *rand.Rand) {
CrossPMX(IntSlice(s1), IntSlice(s2), rng)
}
// CrossPMXFloat64 calls CrossPMX on a float64 slice.
func CrossPMXFloat64(s1 []float64, s2 []float64, rng *rand.Rand) {
CrossPMX(Float64Slice(s1), Float64Slice(s2), rng)
}
// CrossPMXString calls CrossPMX on a string slice.
func CrossPMXString(s1 []string, s2 []string, rng *rand.Rand) {
CrossPMX(StringSlice(s1), StringSlice(s2), rng)
}
// Contains the deterministic part of the OX method for testing purposes.
func ox(p1, p2 Slice, a, b int) {
var (
n = p1.Len()
o1 = p1.Copy()
o2 = p2.Copy()
)
// Create lookup maps to quickly see if a gene has been copied from a parent or not
var p1Occurences, p2Occurences = make(setInt), make(setInt)
for i := b; i < a+n; i++ {
var k = i % n
p1Occurences[p1.At(k)]++
p2Occurences[p2.At(k)]++
}
// Keep two indicators to know where to fill the offsprings
var j1, j2 = b, b
for i := b; i < b+n; i++ {
var k = i % n
if p1Occurences[p2.At(k)] > 0 {
p1Occurences[p2.At(k)]--
o1.Set(j1%n, p2.At(k))
j1++
}
if p2Occurences[p1.At(k)] > 0 {
p2Occurences[p1.At(k)]--
o2.Set(j2%n, p1.At(k))
j2++
}
}
p1.Replace(o1)
p2.Replace(o2)
}
// CrossOX (Ordered Crossover). Part of the first parent's genome is copied onto
// the first offspring's genome. Then the second parent's genome is iterated
// over, starting on the right of the part that was copied. Each gene of the
// second parent's genome is copied onto the next blank gene of the first
// offspring's genome if it wasn't already copied from the first parent. The OX
// method preserves gene uniqueness.
func CrossOX(p1 Slice, p2 Slice, rng *rand.Rand) {
var indexes = randomInts(2, 1, p1.Len(), rng)
sort.Ints(indexes)
ox(p1, p2, indexes[0], indexes[1])
}
// CrossOXInt calls CrossOX on a int slice.
func CrossOXInt(s1 []int, s2 []int, rng *rand.Rand) {
CrossOX(IntSlice(s1), IntSlice(s2), rng)
}
// CrossOXFloat64 calls CrossOX on a float64 slice.
func CrossOXFloat64(s1 []float64, s2 []float64, rng *rand.Rand) {
CrossOX(Float64Slice(s1), Float64Slice(s2), rng)
}
// CrossOXString calls CrossOX on a string slice.
func CrossOXString(s1 []string, s2 []string, rng *rand.Rand) {
CrossOX(StringSlice(s1), StringSlice(s2), rng)
}
// CrossCX (Cycle Crossover). Cycles between the parents are indentified, they
// are then copied alternatively onto the offsprings. The CX method is
// deterministic and preserves gene uniqueness.
func CrossCX(p1, p2 Slice) {
var (
o1 = p1.Copy()
o2 = p2.Copy()
cycles = getCycles(p1, p2)
toggle = true
)
for i := 0; i < len(cycles); i++ {
for _, j := range cycles[i] {
if toggle {
o1.Set(j, p1.At(j))
o2.Set(j, p2.At(j))
} else {
o2.Set(j, p1.At(j))
o1.Set(j, p2.At(j))
}
}
toggle = !toggle
}
p1.Replace(o1)
p2.Replace(o2)
}
// CrossCXInt calls CrossCX on an int slice.
func CrossCXInt(s1 []int, s2 []int) {
CrossCX(IntSlice(s1), IntSlice(s2))
}
// CrossCXFloat64 calls CrossCX on a float64 slice.
func CrossCXFloat64(s1 []float64, s2 []float64) {
CrossCX(Float64Slice(s1), Float64Slice(s2))
}
// CrossCXString calls CrossCX on a string slice.
func CrossCXString(s1 []string, s2 []string) {
CrossCX(StringSlice(s1), StringSlice(s2))
}
// CrossERX (Edge Recombination Crossover).
func CrossERX(p1, p2 Slice) {
var (
n = p1.Len()
o1 = p1.Copy()
o2 = p2.Copy()
parents = []Slice{p1, p2}
offsprings = []Slice{o1, o2}
p1Neighbours = getNeighbours(p1)
p2Neighbours = getNeighbours(p2)
pNeighbours = make(map[interface{}]set)
)
// Merge the neighbours of each parent whilst ignoring duplicates
for i := range p1Neighbours {
pNeighbours[i] = union(p1Neighbours[i], p2Neighbours[i])
}
// Hold two copies of the parent neighbours (one for each offspring)
var neighbours = []map[interface{}]set{pNeighbours, nil}
neighbours[1] = make(map[interface{}]set)
for k, v := range pNeighbours {
neighbours[1][k] = v
}
// Set the first element of each offspring to be the one of the
// corresponding parent
o1.Set(0, p1.At(0))
o2.Set(0, p2.At(0))
// Delete the neighbour from the adjacency set
for i := range neighbours {
delete(neighbours[i], parents[i].At(0))
for j := range neighbours[i] {
if neighbours[i][j][parents[i].At(0)] {
delete(neighbours[i][j], parents[i].At(0))
}
}
}
for o := range offsprings {
for i := 1; i < n; i++ {
// Find the gene with the least neighbours
var (
j interface{}
min = 5 // There can't be more than 5 neighbours between 2 parents
)
for k, v := range neighbours[o] {
if len(v) < min {
j = k
min = len(v)
}
}
offsprings[o].Set(i, j)
delete(neighbours[o], j)
for k := range neighbours[o] {
if neighbours[o][k][j] {
delete(neighbours[o][k], j)
}
}
}
}
p1.Replace(o1)
p2.Replace(o2)
}
// CrossERXInt calls CrossERX on an int slice.
func CrossERXInt(s1 []int, s2 []int) {
CrossERX(IntSlice(s1), IntSlice(s2))
}
// CrossERXFloat64 callsCrossERX on a float64 slice.
func CrossERXFloat64(s1 []float64, s2 []float64) {
CrossERX(Float64Slice(s1), Float64Slice(s2))
}
// CrossERXString calls CrossERX on a string slice.
func CrossERXString(s1 []string, s2 []string) {
CrossERX(StringSlice(s1), StringSlice(s2))
} | crossover.go | 0.59796 | 0.618809 | crossover.go | starcoder |
package workitem
import (
"fmt"
"reflect"
"strconv"
"time"
"github.com/almighty/almighty-core/convert"
"github.com/almighty/almighty-core/rendering"
"github.com/asaskevich/govalidator"
"github.com/pkg/errors"
)
// SimpleType is an unstructured FieldType
type SimpleType struct {
Kind Kind
}
// Ensure SimpleType implements the Equaler interface
var _ convert.Equaler = SimpleType{}
var _ convert.Equaler = (*SimpleType)(nil)
// Equal returns true if two SimpleType objects are equal; otherwise false is returned.
func (self SimpleType) Equal(u convert.Equaler) bool {
other, ok := u.(SimpleType)
if !ok {
return false
}
return self.Kind == other.Kind
}
// GetKind implements FieldType
func (self SimpleType) GetKind() Kind {
return self.Kind
}
var timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
// ConvertToModel implements the FieldType interface
func (fieldType SimpleType) ConvertToModel(value interface{}) (interface{}, error) {
if value == nil {
return nil, nil
}
valueType := reflect.TypeOf(value)
switch fieldType.GetKind() {
case KindString, KindUser, KindIteration, KindArea:
if valueType.Kind() != reflect.String {
return nil, fmt.Errorf("value %v should be %s, but is %s", value, "string", valueType.Name())
}
return value, nil
case KindURL:
if valueType.Kind() == reflect.String && govalidator.IsURL(value.(string)) {
return value, nil
}
return nil, fmt.Errorf("value %v should be %s, but is %s", value, "URL", valueType.Name())
case KindFloat:
if valueType.Kind() != reflect.Float64 {
return nil, fmt.Errorf("value %v should be %s, but is %s", value, "float64", valueType.Name())
}
return value, nil
case KindInteger, KindDuration:
if valueType.Kind() != reflect.Int {
return nil, fmt.Errorf("value %v should be %s, but is %s", value, "int", valueType.Name())
}
return value, nil
case KindInstant:
// instant == milliseconds
if !valueType.Implements(timeType) {
return nil, fmt.Errorf("value %v should be %s, but is %s", value, "time.Time", valueType.Name())
}
return value.(time.Time).UnixNano(), nil
case KindWorkitemReference:
if valueType.Kind() != reflect.String {
return nil, fmt.Errorf("value %v should be %s, but is %s", value, "string", valueType.Name())
}
idValue, err := strconv.Atoi(value.(string))
return idValue, errors.WithStack(err)
case KindList:
if (valueType.Kind() != reflect.Array) && (valueType.Kind() != reflect.Slice) {
return nil, fmt.Errorf("value %v should be %s, but is %s,", value, "array/slice", valueType.Kind())
}
return value, nil
case KindEnum:
// to be done yet | not sure what to write here as of now.
return value, nil
case KindMarkup:
// 'markup' is just a string in the API layer for now:
// it corresponds to the MarkupContent.Content field. The MarkupContent.Markup is set to the default value
switch value.(type) {
case rendering.MarkupContent:
markupContent := value.(rendering.MarkupContent)
return markupContent.ToMap(), nil
default:
return nil, errors.Errorf("value %v should be %s, but is %s", value, "MarkupContent", valueType)
}
default:
return nil, errors.Errorf("unexpected type constant: '%s'", fieldType.GetKind())
}
}
// ConvertFromModel implements the FieldType interface
func (fieldType SimpleType) ConvertFromModel(value interface{}) (interface{}, error) {
if value == nil {
return nil, nil
}
valueType := reflect.TypeOf(value)
switch fieldType.GetKind() {
case KindString, KindURL, KindUser, KindInteger, KindFloat, KindDuration, KindIteration, KindArea:
return value, nil
case KindInstant:
return time.Unix(0, value.(int64)), nil
case KindWorkitemReference:
if valueType.Kind() != reflect.String {
return nil, fmt.Errorf("value %v should be %s, but is %s", value, "string", valueType.Name())
}
return strconv.FormatUint(value.(uint64), 10), nil
case KindMarkup:
if valueType.Kind() != reflect.Map {
return nil, errors.Errorf("value %v should be %s, but is %s", value, reflect.Map, valueType.Name())
}
markupContent := rendering.NewMarkupContentFromMap(value.(map[string]interface{}))
return markupContent, nil
default:
return nil, errors.Errorf("unexpected field type: %s", fieldType.GetKind())
}
} | workitem/simple_type.go | 0.66072 | 0.442396 | simple_type.go | starcoder |
package list
import (
"github.com/genkami/dogs/classes/algebra"
"github.com/genkami/dogs/classes/cmp"
"github.com/genkami/dogs/types/iterator"
"github.com/genkami/dogs/types/pair"
)
// Some packages are unused depending on -include CLI option.
// This prevents compile error when corresponding functions are not defined.
var _ = (algebra.Monoid[int])(nil)
var _ = (cmp.Ord[int])(nil)
var _ = (iterator.Iterator[int])(nil)
var _ = (*pair.Pair[int, int])(nil)
// Filter returns a collection that only returns elements that satisfies given predicate.
func Filter[T any](xs *List[T], fn func(T) bool) *List[T] {
return FromIterator[T](iterator.Filter[T](xs.Iter(), fn))
}
// Find returns a first element in xs that satisfies the given predicate fn.
// It returns false as a second return value if no elements are found.
func Find[T any](xs *List[T], fn func(T) bool) (T, bool) {
return iterator.Find[T](xs.Iter(), fn)
}
// FindElem returns a first element in xs that equals to e in the sense of given Eq.
// It returns false as a second return value if no elements are found.
func FindElem[T any](eq cmp.Eq[T]) func(xs *List[T], e T) (T, bool) {
return func(xs *List[T], e T) (T, bool) {
return iterator.FindElem[T](eq)(xs.Iter(), e)
}
}
// Fold accumulates every element in a collection by applying fn.
func Fold[T any, U any](init T, xs *List[U], fn func(T, U) T) T {
return iterator.Fold[T, U](init, xs.Iter(), fn)
}
// ForEach applies fn to each element in xs.
func ForEach[T any](xs *List[T], fn func(T)) {
iterator.ForEach[T](xs.Iter(), fn)
}
// Map returns a collection that applies fn to each element of xs.
func Map[T, U any](xs *List[T], fn func(T) U) *List[U] {
return FromIterator[U](iterator.Map[T, U](xs.Iter(), fn))
}
// Max returns the largest element with respect to the given Ord.
// It returns <zero value>, false if the collection is empty.
func Max[T any](ord cmp.Ord[T]) func(xs *List[T]) (T, bool) {
return func(xs *List[T]) (T, bool) {
return iterator.Max(ord)(xs.Iter())
}
}
// MaxBy returns the smallest element with respect to the given function.
// It returns <zero value>, false if the collection is empty.
func MaxBy[T any](xs *List[T], less func(T, T) bool) (T, bool) {
return iterator.MaxBy(xs.Iter(), less)
}
// Min returns the smallest element with respect to the given Ord.
// It returns <zero value>, false if the collection is empty.
func Min[T any](ord cmp.Ord[T]) func(xs *List[T]) (T, bool) {
return func(xs *List[T]) (T, bool) {
return iterator.Min(ord)(xs.Iter())
}
}
// MinBy returns the smallest element with respect to the given function.
// It returns <zero value>, false if the collection is empty.
func MinBy[T any](xs *List[T], less func(T, T) bool) (T, bool) {
return iterator.MinBy(xs.Iter(), less)
}
// Sum sums up all values in xs.
// It returns m.Empty() when xs is empty.
func Sum[T any](m algebra.Monoid[T]) func(xs *List[T]) T {
return func(xs *List[T]) T {
var s algebra.Semigroup[T] = m
return SumWithInit[T](s)(m.Empty(), xs)
}
}
// SumWithInit sums up init and all values in xs.
func SumWithInit[T any](s algebra.Semigroup[T]) func(init T, xs *List[T]) T {
return func(init T, xs *List[T]) T {
return Fold[T, T](init, xs, s.Combine)
}
} | types/list/zz_generated.collection.go | 0.803405 | 0.492554 | zz_generated.collection.go | starcoder |
package matcha
import (
"fmt"
"reflect"
"regexp"
"strings"
snakecase "github.com/segmentio/go-snakecase"
. "github.com/smartystreets/goconvey/convey"
)
// CapturedValues is a map of a slice of values
type CapturedValues map[string][]interface{}
type Matcher struct {
format string // Should be 'json' or 'xml'
capturedValues CapturedValues
}
const (
success = "" // goconvey uses an empty string to signal success
)
func TypeErrorString(fieldName string, expectedType string, actualType string) string {
return fmt.Sprintf("Expected '%v' to be: '%v' (but was: '%v')!", fieldName, expectedType, actualType)
}
func (m *Matcher) getFieldName(field reflect.StructField) string {
dataType := m.format
newFieldName, ok := field.Tag.Lookup(dataType)
if !ok {
// Get field name by looking at StructField name
newFieldName = snakecase.Snakecase(field.Name)
}
return newFieldName
}
func (m *Matcher) shouldMatchPattern(actual interface{}, expectedField reflect.StructField) string {
// Check if we are expecting to match against a pattern for this field
pattern, ok := expectedField.Tag.Lookup("pattern")
if ok {
// If so, check the expected field type is a string and the actual value is also a string
if expectedField.Type.Kind() != reflect.String {
return fmt.Sprintf("'pattern' tag cannot be used on non-string fields: %v", expectedField.Name)
}
actualString, isString := actual.(string)
if !isString {
return fmt.Sprintf("Expected a string value for field: %v but instead got %v", expectedField.Name, reflect.TypeOf(actual))
}
// If ok, then we try to match against the expected pattern
matched, err := regexp.MatchString(pattern, actualString)
if err != nil {
return fmt.Sprintf("Received invalid regular expression: %v", pattern)
}
if !matched {
return fmt.Sprintf("%v: '%v' does not match expected pattern: %v", expectedField.Name, actualString, pattern)
}
}
return success
}
func (m *Matcher) shouldMatchExpectedArray(actual interface{}, expectedType reflect.Type, fieldName string) string {
var errorList []string
actualSlice, ok := actual.([]interface{})
if !ok {
// In XML, with the absence of a schema, it is impossible to distinguish between a single
// field and an array with one element, so we convert to a slice and try again
if m.format == "xml" {
actualSlice = make([]interface{}, 1)
actualSlice[0] = actual
} else {
return fmt.Sprintf("Was expecting an array for field: %v", fieldName)
}
}
// Get the expected type of each element in the array
expectedArrayElementType := expectedType.Elem()
// Compare each element in slice
for _, newActualField := range actualSlice {
// Array fields don't have names, so use something intuitive
newFieldName := fmt.Sprintf("%v array values", fieldName)
equal := m.shouldMatchExpectedField(newActualField, expectedArrayElementType, newFieldName)
if equal != success {
errorList = append(errorList, equal)
}
}
if errorList != nil {
return strings.Join(errorList, "\n")
}
return success
}
func (m *Matcher) captureValue(expectedField reflect.StructField, value interface{}) {
// If we're not interested in capturing any values, just return
if m.capturedValues == nil {
return
}
captureKey, ok := expectedField.Tag.Lookup("capture")
if ok {
if captureKey == "" {
captureKey = m.getFieldName(expectedField)
}
m.capturedValues[captureKey] = append(m.capturedValues[captureKey], value)
}
}
func (m *Matcher) shouldMatchExpectedStructField(actual map[string]interface{}, expectedField reflect.StructField) string {
fieldName := m.getFieldName(expectedField)
expectedFieldType := expectedField.Type
actualField, ok := actual[fieldName]
if !ok {
return fmt.Sprintf("No field '%v' found in response", fieldName)
}
m.captureValue(expectedField, actualField)
equal := m.shouldMatchPattern(actualField, expectedField)
if equal != success {
return equal
}
return m.shouldMatchExpectedField(actualField, expectedFieldType, fieldName)
}
func (m *Matcher) shouldMatchExpectedObject(actual interface{}, expectedType reflect.Type, fieldName string) string {
var errorList []string
actualMap, ok := actual.(map[string]interface{})
if !ok {
return fmt.Sprintf("Was expecting an object for field: %v, but got %v", fieldName, reflect.TypeOf(actual).Kind())
}
for i := 0; i < expectedType.NumField(); i++ {
newField := expectedType.Field(i)
equal := m.shouldMatchExpectedStructField(actualMap, newField)
if equal != success {
errorList = append(errorList, equal)
}
}
if errorList != nil {
return strings.Join(errorList, "\n")
}
return success
}
func (m *Matcher) shouldMatchExpectedField(actual interface{}, expectedType reflect.Type, fieldName string) string {
expectedKind := expectedType.Kind()
actualType := reflect.TypeOf(actual)
switch expectedKind {
case reflect.String:
if equal := ShouldEqual(expectedType, actualType); equal != success {
return TypeErrorString(fieldName, expectedType.String(), actualType.String())
}
case reflect.Float64:
if equal := ShouldEqual(expectedType, actualType); equal != success {
return TypeErrorString(fieldName, expectedType.String(), actualType.String())
}
case reflect.Bool:
if equal := ShouldEqual(expectedType, actualType); equal != success {
return TypeErrorString(fieldName, expectedType.String(), actualType.String())
}
case reflect.Slice:
return m.shouldMatchExpectedArray(actual, expectedType, fieldName)
case reflect.Struct:
// Type is a JSON object
return m.shouldMatchExpectedObject(actual, expectedType, fieldName)
default:
return fmt.Sprintf("'%v' is of a type I don't know how to handle", expectedType)
}
return success
} | matcha/matcher.go | 0.709221 | 0.549278 | matcher.go | starcoder |
package favicon
import (
"bytes"
"encoding/binary"
)
// https://en.wikipedia.org/wiki/BMP_file_format
type BitmapFileHeader struct {
Signature [2]byte // The header field used to identify the BMP and DIB file is 0x42 0x4D in hexadecimal, same as BM in ASCII.
FileSize uint32 // The size of the BMP file in bytes
_ uint16 // Reserved; actual value depends on the application that creates the image
_ uint16 // Reserved; actual value depends on the application that creates the image
Offset uint32 // The offset, i.e. starting address, of the byte where the bitmap image data (pixel array) can be found.
}
// Windows BITMAPINFOHEADER - there are other possibilities, but this looks to be
// the most common possibility.
type DIBHeader struct {
Size uint32 // the size of this header, in bytes (40)
Width uint32 // the bitmap width in pixels (signed integer)
Height uint32 // the bitmap height in pixels (signed integer)
ColorPlanes uint16 // the number of color planes (must be 1)
BitsPerPixel uint16 // the number of bits per pixel, which is the color depth of the image. Typical values are 1, 4, 8, 16, 24 and 32.
Compression uint32 // the compression method being used. See the next table for a list of possible values
ImageSize uint32 // the image size. This is the size of the raw bitmap data; a dummy 0 can be given for BI_RGB bitmaps.
HorizontalResolution uint32 // the horizontal resolution of the image. (pixel per metre, signed integer)
VerticalResolution uint32 // the vertical resolution of the image. (pixel per metre, signed integer)
ColorCount uint32 // the number of colors in the color palette, or 0 to default to 2^n
ImportantColorCount uint32 // the number of important colors used, or 0 when every color is important; generally ignored
}
func isBMP(icoBytes []byte) (bool, error) {
r := bytes.NewReader(icoBytes)
r.Seek(0, 0)
dibHeader := DIBHeader{}
err := binary.Read(r, binary.LittleEndian, &dibHeader)
if err != nil {
return false, err
}
if dibHeader.Size == 40 && dibHeader.ColorPlanes == 1 {
return true, nil
}
return false, nil
}
func appendBitmapFileHeader(entry *IconDirEntry, icoBytes []byte) (*bytes.Buffer, error) {
r := bytes.NewReader(icoBytes)
r.Seek(0, 0)
bitmapFileHeader := BitmapFileHeader{
Signature: [2]byte{'B', 'M'},
FileSize: 14 + entry.Size, // header + current file size
Offset: 14 + 40, // header + dib
}
buf := new(bytes.Buffer)
if err := binary.Write(buf, binary.LittleEndian, bitmapFileHeader); err != nil {
return nil, err
}
outz := append(buf.Bytes(), icoBytes...)
buf2 := new(bytes.Buffer)
if err := binary.Write(buf2, binary.LittleEndian, outz); err != nil {
return nil, err
}
return buf2, nil
} | favicon/bitmap.go | 0.710528 | 0.467149 | bitmap.go | starcoder |
package tomgjson
import (
"encoding/json"
"fmt"
"log"
"math"
"strconv"
"strings"
"time"
)
// Like math.Max but with ints
func maxInt(a, b int) int {
if a > b {
return a
}
return b
}
// Returns both sides of a float number as strings
func sides(n float64) (string, string) {
sides := strings.Split(strconv.FormatFloat(math.Abs(n), 'f', -1, 64), ".")
if len(sides) == 1 {
sides = append(sides, "0")
}
if len(sides) != 2 {
log.Panicf("Badly formatted float: %v %v", n, sides)
}
return sides[0], sides[1]
}
// Make sure float values are within mgJSON's valid values
const largestMgjsonNum = 2147483648.0
func validValue(v float64) float64 {
if math.IsNaN(v) {
return 0
}
return math.Max(math.Min(v, largestMgjsonNum), -largestMgjsonNum)
}
// Stream contains a slice of values or strings and their label
// The slices must be of the same length as the timing slice in their parent's FormattedData
// Only one of the slices must be present, not both
type Stream struct {
Label string
Values []float64
Strings []string
}
// FormattedData is the struct accepted by ToMgjson.
// It consists of a slice of timestamps and a slice with all the streams of labelled values (floats for now)
type FormattedData struct {
Timing []time.Time
Streams []Stream
}
// mgJSON structure. For now, only the fields we are using are specified
type utcInfo struct {
PrecisionLength int `json:"precisionLength"`
IsGMT bool `json:"isGMT"`
}
type dynamicDataInfo struct {
UseTimecodeB bool `json:"useTimecodeB"`
UtcInfo utcInfo `json:"utcInfo"`
}
type pattern struct {
DigitsInteger int `json:"digitsInteger"`
DigitsDecimal int `json:"digitsDecimal"`
IsSigned bool `json:"isSigned"`
}
type minmax struct {
Min float64 `json:"min"`
Max float64 `json:"max"`
}
type mRange struct {
Occuring minmax `json:"occuring"`
Legal minmax `json:"legal"`
}
type numberStringProperties struct {
Pattern pattern `json:"pattern"`
Range mRange `json:"range"`
}
type paddedStringProperties struct {
MaxLen int `json:"maxLen"`
MaxDigitsInStrLength int `json:"maxDigitsInStrLength"`
EventMarkerB bool `json:"eventMarkerB"`
}
type dataType struct {
Type string `json:"type"`
NumberStringProperties numberStringProperties `json:"numberStringProperties"`
PaddedStringProperties paddedStringProperties `json:"paddedStringProperties"`
}
type singleDataOutline struct {
ObjectType string `json:"objectType"`
DisplayName string `json:"displayName"`
SampleSetID string `json:"sampleSetID"`
DataType dataType `json:"dataType"`
Interpolation string `json:"interpolation"`
HasExpectedFrequencyB bool `json:"hasExpectedFrequecyB"`
SampleCount int `json:"sampleCount"`
MatchName string `json:"matchName"`
}
type paddedStringValue struct {
Length string `json:"length"`
Str string `json:"str"`
}
type sample struct {
Time string `json:"time"`
Value interface{} `json:"value"`
}
type dataDynamicSample struct {
SampleSetID string `json:"sampleSetID"`
Samples []sample `json:"samples"`
}
type mgjson struct {
Version string `json:"version"`
Creator string `json:"creator"`
DynamicSamplesPresentB bool `json:"dynamicSamplesPresentB"`
DynamicDataInfo dynamicDataInfo `json:"dynamicDataInfo"`
DataOutline []singleDataOutline `json:"dataOutline"`
DataDynamicSamples []dataDynamicSample `json:"dataDynamicSamples"`
}
// ToMgjson receives a formatted source data (FormattedData) and a creator or author name
// and returns formatted mgjson ready to write to a file
// compatible with Adobe After Effects data-driven animations (or an error)
func ToMgjson(sd FormattedData, creator string) ([]byte, error) {
if len(sd.Streams) < 1 {
return nil, fmt.Errorf("No streams found")
}
if len(sd.Timing) < 1 {
return nil, fmt.Errorf("No timing data")
}
//Hardcode non configurable values (for now)
data := mgjson{
Version: "MGJSON2.0.0",
Creator: creator,
DynamicSamplesPresentB: true,
DynamicDataInfo: dynamicDataInfo{
UseTimecodeB: false,
UtcInfo: utcInfo{
PrecisionLength: 3,
IsGMT: true,
},
},
DataOutline: []singleDataOutline{},
DataDynamicSamples: []dataDynamicSample{},
}
for i, stream := range sd.Streams {
sName := fmt.Sprintf("Stream%d", i)
min := largestMgjsonNum
max := -largestMgjsonNum
digitsInteger := 0
digitsDecimal := 0
maxLen := 0
maxDigitsInStrLength := 0
for _, v := range stream.Values {
v = validValue(v)
min = math.Min(min, v)
max = math.Max(max, v)
integer, decimal := sides(v)
digitsInteger = maxInt(digitsInteger, len(integer))
digitsDecimal = maxInt(digitsDecimal, len(decimal))
}
for _, v := range stream.Strings {
maxLen = maxInt(maxLen, len(v))
maxDigitsInStrLength = len(strconv.Itoa(maxLen))
}
var thisDataType dataType
var thisInterpolation string
var thisSampleCount int
if len(stream.Values) > 0 {
thisDataType = dataType{
Type: "numberString",
NumberStringProperties: numberStringProperties{
Pattern: pattern{
DigitsInteger: digitsInteger,
DigitsDecimal: digitsDecimal,
IsSigned: true,
},
Range: mRange{
Occuring: minmax{min, max},
Legal: minmax{-largestMgjsonNum, largestMgjsonNum},
},
},
}
thisInterpolation = "linear"
thisSampleCount = len(stream.Values)
} else if len(stream.Strings) > 0 {
thisDataType = dataType{
Type: "paddedString",
PaddedStringProperties: paddedStringProperties{
MaxLen: maxLen,
MaxDigitsInStrLength: maxDigitsInStrLength,
EventMarkerB: false,
},
}
thisInterpolation = "hold"
thisSampleCount = len(stream.Strings)
}
if len(sd.Timing) != thisSampleCount {
return nil, fmt.Errorf("Timing data does not match slice length")
}
data.DataOutline = append(data.DataOutline, singleDataOutline{
ObjectType: "dataDynamic",
DisplayName: stream.Label,
SampleSetID: sName,
DataType: thisDataType,
Interpolation: thisInterpolation,
HasExpectedFrequencyB: false,
SampleCount: thisSampleCount,
MatchName: sName,
})
streamSamples := []sample{}
for i, v := range stream.Values {
v = validValue(v)
paddedValue := fmt.Sprintf("%+0*.*f", digitsInteger+digitsDecimal+2, digitsDecimal, v)
timeStr := sd.Timing[i].Format("2006-01-02T15:04:05.000Z")
streamSamples = append(streamSamples, sample{
Time: timeStr,
Value: paddedValue,
})
}
for i, v := range stream.Strings {
stringValue := paddedStringValue{
Length: fmt.Sprintf("%0*d", maxDigitsInStrLength, len(v)),
Str: fmt.Sprintf("%-*v", maxLen, v),
}
timeStr := sd.Timing[i].Format("2006-01-02T15:04:05.000Z")
streamSamples = append(streamSamples, sample{
Time: timeStr,
Value: stringValue,
})
}
data.DataDynamicSamples = append(data.DataDynamicSamples, dataDynamicSample{
SampleSetID: sName,
Samples: streamSamples,
})
}
doc, err := json.Marshal(data)
if err != nil {
return nil, err
}
return doc, nil
} | tomgjson.go | 0.748536 | 0.400808 | tomgjson.go | starcoder |
package goraph
import (
"fmt"
"math"
)
// ID uniquely identify a vertex.
type ID interface{}
// Vertex interface represents a vertex with edges connected to it.
type Vertex interface {
// ID get the unique id of the vertex.
ID() ID
// Edges get all the edges connected to the vertex
Edges() []Edge
}
// Edge interface represents an edge connecting two vertices.
type Edge interface {
// Get returns the edge's inbound vertex, outbound vertex and weight.
Get() (from ID, to ID, weight float64)
}
// Graph is made up of vertices and edges.
// Vertices in the graph must have an unique id.
// Each edges in the graph connects two vertices directed with a weight.
type Graph struct {
vertices map[ID]*vertex
egress map[ID]map[ID]*edge
ingress map[ID]map[ID]*edge
}
type vertex struct {
self interface{}
enable bool
}
type edge struct {
self interface{}
weight float64
enable bool
changed bool
}
func (edge *edge) getWeight() float64 {
return edge.weight
}
// NewGraph creates a new empty graph.
func NewGraph() *Graph {
graph := new(Graph)
graph.vertices = make(map[ID]*vertex)
graph.egress = make(map[ID]map[ID]*edge)
graph.ingress = make(map[ID]map[ID]*edge)
return graph
}
// GetVertex get a vertex by input id.
// Try to get a vertex not in the graph will get an error.
func (graph *Graph) GetVertex(id ID) (vertex interface{}, err error) {
if v, exists := graph.vertices[id]; exists {
vertex = v.self
return
}
err = fmt.Errorf("Vertex %v is not found", id)
return
}
// GetEdge gets the edge between the two vertices by input ids.
// Try to get the edge from or to a vertex not in the graph will get an error.
// Try to get the edge between two disconnected vertices will get an error.
func (graph *Graph) GetEdge(from ID, to ID) (interface{}, error) {
if _, exists := graph.vertices[from]; !exists {
return nil, fmt.Errorf("Vertex(from) %v is not found", from)
}
if _, exists := graph.vertices[to]; !exists {
return nil, fmt.Errorf("Vertex(to) %v is not found", to)
}
if edge, exists := graph.egress[from][to]; exists {
return edge.self, nil
}
return nil, fmt.Errorf("Edge from %v to %v is not found", from, to)
}
// GetEdgeWeight gets the weight of the edge between the two vertices by input ids.
// Try to get the weight of the edge from or to a vertex not in the graph will get an error.
// Try to get the weight of the edge between two disconnected vertices will get +Inf.
func (graph *Graph) GetEdgeWeight(from ID, to ID) (float64, error) {
if _, exists := graph.vertices[from]; !exists {
return math.Inf(1), fmt.Errorf("Vertex(from) %v is not found", from)
}
if _, exists := graph.vertices[to]; !exists {
return math.Inf(1), fmt.Errorf("Vertex(to) %v is not found", to)
}
if edge, exists := graph.egress[from][to]; exists {
return edge.weight, nil
}
return math.Inf(1), nil
}
// AddVertex adds a new vertex into the graph.
// Try to add a duplicate vertex will get an error.
func (graph *Graph) AddVertex(id ID, v interface{}) error {
if _, exists := graph.vertices[id]; exists {
return fmt.Errorf("Vertex %v is duplicate", id)
}
graph.vertices[id] = &vertex{v, true}
graph.egress[id] = make(map[ID]*edge)
graph.ingress[id] = make(map[ID]*edge)
return nil
}
// AddEdge adds a new edge between the vertices by the input ids.
// Try to add an edge with -Inf weight will get an error.
// Try to add an edge from or to a vertex not in the graph will get an error.
// Try to add a duplicate edge will get an error.
func (graph *Graph) AddEdge(from ID, to ID, weight float64, e interface{}) error {
if weight == math.Inf(-1) {
return fmt.Errorf("-inf weight is reserved for internal usage")
}
if _, exists := graph.vertices[from]; !exists {
return fmt.Errorf("Vertex(from) %v is not found", from)
}
if _, exists := graph.vertices[to]; !exists {
return fmt.Errorf("Vertex(to) %v is not found", to)
}
if _, exists := graph.egress[from][to]; exists {
return fmt.Errorf("Edge from %v to %v is duplicate", from, to)
}
graph.egress[from][to] = &edge{e, weight, true, false}
graph.ingress[to][from] = graph.egress[from][to]
return nil
}
// UpdateEdgeWeight updates the weight of the edge between vertices by the input ids.
// Try to update an edge with -Inf weight will get an error.
// Try to update an edge from or to a vertex not in the graph will get an error.
// Try to update an edge between disconnected vertices will get an error.
func (graph *Graph) UpdateEdgeWeight(from ID, to ID, weight float64) error {
if weight == math.Inf(-1) {
return fmt.Errorf("-inf weight is reserved for internal usage")
}
if _, exists := graph.vertices[from]; !exists {
return fmt.Errorf("Vertex(from) %v is not found", from)
}
if _, exists := graph.vertices[to]; !exists {
return fmt.Errorf("Vertex(to) %v is not found", to)
}
if edge, exists := graph.egress[from][to]; exists {
edge.weight = weight
return nil
}
return fmt.Errorf("Edge from %v to %v is not found", from, to)
}
// DeleteVertex deletes a vertex from the graph and gets the value of the vertex.
// Try to delete a vertex not in the graph will get an nil.
func (graph *Graph) DeleteVertex(id ID) interface{} {
if vertex, exists := graph.vertices[id]; exists {
for to := range graph.egress[id] {
delete(graph.ingress[to], id)
}
for from := range graph.ingress[id] {
delete(graph.egress[from], id)
}
delete(graph.egress, id)
delete(graph.ingress, id)
delete(graph.vertices, id)
return vertex.self
}
return nil
}
// DeleteEdge deletes the edge between the vertices by the input id from the graph and gets the value of edge.
// Try to delete an edge from or to a vertex not in the graph will get an error.
// Try to delete an edge between disconnected vertices will get a nil.
func (graph *Graph) DeleteEdge(from ID, to ID) interface{} {
if _, exists := graph.vertices[from]; !exists {
return nil
}
if _, exists := graph.vertices[to]; !exists {
return nil
}
if edge, exists := graph.egress[from][to]; exists {
delete(graph.egress[from], to)
delete(graph.ingress[to], from)
return edge.self
}
return nil
}
// AddVertexWithEdges adds a vertex value which implements Vertex interface.
// AddVertexWithEdges adds edges connected to the vertex at the same time, due to the Vertex interface can get the Edges.
func (graph *Graph) AddVertexWithEdges(v Vertex) error {
if graph.vertices[v.ID()] == nil {
graph.vertices[v.ID()] = &vertex{v, true}
}
if graph.egress[v.ID()] == nil {
graph.egress[v.ID()] = make(map[ID]*edge)
}
if graph.ingress[v.ID()] == nil {
graph.ingress[v.ID()] = make(map[ID]*edge)
}
for _, eachEdge := range v.Edges() {
from, to, weight := eachEdge.Get()
if weight == math.Inf(-1) {
return fmt.Errorf("-inf weight is reserved for internal usage")
}
if from != v.ID() && to != v.ID() {
return fmt.Errorf("Edge from %v to %v is unrelated to the vertex %v", from, to, v.ID())
}
if _, exists := graph.egress[to]; !exists {
graph.egress[to] = make(map[ID]*edge)
}
if _, exists := graph.egress[from]; !exists {
graph.egress[from] = make(map[ID]*edge)
}
if _, exists := graph.ingress[from]; !exists {
graph.ingress[from] = make(map[ID]*edge)
}
if _, exists := graph.ingress[to]; !exists {
graph.ingress[to] = make(map[ID]*edge)
}
graph.egress[from][to] = &edge{eachEdge, weight, true, false}
graph.ingress[to][from] = graph.egress[from][to]
}
return nil
}
// CheckIntegrity checks if any edge connects to or from unknown vertex.
// If the graph is integrate, nil is returned. Otherwise an error is returned.
func (graph *Graph) CheckIntegrity() error {
for from, out := range graph.egress {
if _, exists := graph.vertices[from]; !exists {
return fmt.Errorf("Vertex %v is not found", from)
}
for to := range out {
if _, exists := graph.vertices[to]; !exists {
return fmt.Errorf("Vertex %v is not found", to)
}
}
}
for to, in := range graph.ingress {
if _, exists := graph.vertices[to]; !exists {
return fmt.Errorf("Vertex %v is not found", to)
}
for from := range in {
if _, exists := graph.vertices[from]; !exists {
return fmt.Errorf("Vertex %v is not found", from)
}
}
}
return nil
}
// GetPathWeight gets the total weight along the path by input ids.
// It will get -Inf if the input path is nil or empty.
// It will get -Inf if the path contains vertex not in the graph.
// It will get +Inf if the path contains vertices not connected.
func (graph *Graph) GetPathWeight(path []ID) (totalWeight float64) {
if len(path) == 0 {
return math.Inf(-1)
}
if _, exists := graph.vertices[path[0]]; !exists {
return math.Inf(-1)
}
for i := 0; i < len(path)-1; i++ {
if _, exists := graph.vertices[path[i+1]]; !exists {
return math.Inf(-1)
}
if edge, exists := graph.egress[path[i]][path[i+1]]; exists {
totalWeight += edge.getWeight()
} else {
return math.Inf(1)
}
}
return totalWeight
}
// DisableEdge disables the edge for further calculation.
func (graph *Graph) DisableEdge(from, to ID) {
graph.egress[from][to].enable = false
}
// DisableVertex disables the vertex for further calculation.
func (graph *Graph) DisableVertex(vertex ID) {
for _, edge := range graph.egress[vertex] {
edge.enable = false
}
}
// DisablePath disables all the vertices in the path for further calculation.
func (graph *Graph) DisablePath(path []ID) {
for _, vertex := range path {
graph.DisableVertex(vertex)
}
}
// Reset enables all vertices and edges for further calculation.
func (graph *Graph) Reset() {
for _, out := range graph.egress {
for _, edge := range out {
edge.enable = true
}
}
} | graph.go | 0.857887 | 0.713962 | graph.go | starcoder |
package analysis
import (
"fmt"
"sort"
"time"
"github.com/fogleman/gg"
"gsa.gov/18f/internal/state"
"gsa.gov/18f/internal/structs"
)
func isInDurationRange(diff int) bool {
cfg := state.GetConfig()
return (diff >= cfg.GetMinimumMinutes()) && (diff < cfg.GetMaximumMinutes())
}
func DrawPatronSessions(durations []structs.Duration, outputPath string) {
cfg := state.GetConfig()
if len(durations) == 0 {
cfg.Log().Error("DrawPatronSessions was passed zero durations to draw.")
cfg.Log().Error("Wanted to draw to the output path ", outputPath)
return
}
// Capture the data about the session while running in a `counter` structure.
durationsInRange := 0
sort.Sort(structs.ByStart(durations))
cfg.Log().Debug("about to iterate over [", len(durations), "] durations")
for _, d := range durations {
st := time.Unix(d.Start, 0).In(time.Local)
et := time.Unix(d.End, 0).In(time.Local)
diff := int(et.Sub(st).Minutes())
// log.Println("st", st, "et", et, "diff", diff)
if isInDurationRange(diff) {
// log.Println("KEEP id", d.PatronID, "diff", diff)
durationsInRange += 1
}
}
cfg.Log().Info("durations to write to the image [", durationsInRange, "]")
WIDTH := 1440
hourWidth := WIDTH / 24
HEIGHT := 24 * (durationsInRange + 2)
cfg.Log().Info("image dimensions (WxH) ", WIDTH, " x ", HEIGHT)
dc := gg.NewContext(WIDTH, HEIGHT)
dc.SetRGBA(0.5, 0.5, 0, 0.5)
dc.SetLineWidth(1)
ystep := 0
totalMinutes := 0
totalPatrons := 0
dc.SetRGB(1, 1, 1)
dc.Push()
dc.DrawRectangle(0, 0, float64(WIDTH), float64(HEIGHT))
dc.Fill()
dc.Stroke()
dc.Pop()
for _, d := range durations {
// lw.Debug("duration ", d)
st := time.Unix(d.Start, 0).In(time.Local)
et := time.Unix(d.End, 0).In(time.Local)
diff := int(et.Sub(st).Minutes())
totalPatrons += 1
totalMinutes += diff
if isInDurationRange(diff) {
ystep += 1
// Draw the hour lines
for hour := 1; hour <= 24; hour++ {
x := hourWidth * hour
if hour == 12 {
dc.SetRGBA(0.9, 0.1, 0.1, 0.2)
dc.SetLineWidth(2)
dc.DrawLine(float64(x), 0, float64(x), float64(HEIGHT))
dc.DrawStringAnchored("noon", float64(x+10), float64(10), 0, 0)
} else {
dc.SetRGBA(0.9, 0.9, 0.9, 0.2)
dc.SetLineWidth(0.5)
dc.DrawLine(float64(x), 0, float64(x), float64(HEIGHT))
}
dc.Stroke()
}
// Draw the duration block
// 1440 minutes in a day
dc.SetRGB(0.7, 0.2, 0.2)
dc.SetLineWidth(1)
// Therefore...
// log.Println("eod", eod(st))
stInMinutes := 1440 - int(eod(st).Sub(st).Minutes())
x := stInMinutes
y := 20 + (ystep * 20)
// log.Println("start time", st, "end time", et)
// log.Println("rect", x, y, diff, 20)
dc.DrawRectangle(float64(x), float64(y), float64(diff), 20)
dc.Stroke()
// Position the start time string
dc.SetRGB(0.2, 0.2, 0.2)
if st.Hour() < 1 {
dc.DrawStringAnchored(fmt.Sprintf("%v:%v", st.Hour(), pad(st.Minute())), float64(x+diff), float64(y), -0.5, 1)
} else {
dc.DrawStringAnchored(fmt.Sprintf("%v:%v", st.Hour(), pad(st.Minute())), float64(x), float64(y), 1.1, 1)
}
// Position the duration string
duration := ""
if diff < 60 {
duration = fmt.Sprintf("%vm", pad(diff))
} else {
// log.Println("diff", diff)
hours := (diff / 60)
minutes := diff - ((diff / 60) * 60)
duration = fmt.Sprintf("%vh%vm", hours, pad(minutes))
// log.Println(duration)
}
// For short diffs, position the duration to the right...
// A lot of conditions for such a seemingly simple thing...
if diff < 60 {
// cfg.Log().Debug("drawing a short box")
if x < 100 {
// If we are too far to the left, put it to the right of the box.
dc.DrawStringAnchored(duration, float64(x+diff), float64(y), -2.25, 1)
} else if x > (WIDTH - 100) {
// If we are too far to the right, go left.
dc.DrawStringAnchored(duration, float64(x+diff), float64(y), 3.25, 1)
} else {
// Otherwise, just *mostly* to the right...
dc.DrawStringAnchored(duration, float64(x+diff), float64(y), -1.25, 1)
}
} else {
dc.DrawStringAnchored(duration, float64(x+diff), float64(y), 1.25, 1)
}
dc.Stroke()
}
}
day := time.Unix(durations[0].Start, 0).In(time.Local)
summaryD := fmt.Sprintf("Patron sessions from %v %v, %v - %v %v", day.Month(), day.Day(), day.Year(), cfg.GetFCFSSeqID(), cfg.GetDeviceTag())
summaryA := fmt.Sprintf("%v devices seen", totalPatrons)
summaryP := fmt.Sprintf("%v patron devices", durationsInRange)
summaryM := fmt.Sprintf("%v minutes served", totalMinutes)
// Top string
dc.DrawStringAnchored(summaryD, float64(20), float64(20), 0, 0)
// Bottom block
firstLineY := float64(HEIGHT) - 50
dc.DrawStringAnchored(summaryA, float64(20), float64(firstLineY), 0, 0)
dc.DrawStringAnchored(summaryP, float64(20), float64(firstLineY+15), 0, 0)
dc.DrawStringAnchored(summaryM, float64(20), float64(firstLineY+30), 0, 0)
// LEGEND
xpos := float64(WIDTH - 300)
dc.SetRGB(0.9, 0.1, 0.1)
dc.DrawRectangle(xpos, 7.5, 120, 20)
dc.Stroke()
dc.SetRGB(0.0, 0.0, 0.0)
dc.DrawStringAnchored("LEGEND", xpos-100, 7.5, 1, 1)
dc.DrawStringAnchored("LEGEND", xpos-99, 7.5, 1, 1)
w, _ := dc.MeasureString("LEGEND")
dc.DrawLine(xpos-100-w, 35, xpos+120, 35)
dc.Stroke()
dc.DrawStringAnchored("start time", xpos, 7.5, 1.15, 1)
dc.DrawStringAnchored("duration", xpos, 7.5, -0.95, 1)
// Hours
dc.SetRGB(0.7, 0.7, 0.7)
for hour := 1; hour <= 23; hour++ {
x := float64(hourWidth * hour)
dc.Push()
//gg.Translate(x, float64(HEIGHT-20))
///dc.Rotate(gg.Degrees(90))
dc.DrawStringAnchored(fmt.Sprintf("%v:00", hour), x, float64(HEIGHT-10), 0.5, 0)
dc.Pop()
}
//baseFilename := fmt.Sprint(filepath.Join(outdir, fmt.Sprintf("%v-%v-%v", sid, seqId, dt)))
cfg.Log().Debug("writing summary image to ", outputPath)
err := dc.SavePNG(outputPath)
if err != nil {
cfg.Log().Info("drawing: failed to save png")
cfg.Log().Fatal(err.Error())
}
}
func pad(n int) string {
if n < 10 {
return fmt.Sprintf("0%v", n)
} else {
return fmt.Sprint(n)
}
}
func eod(t time.Time) time.Time {
year, month, day := t.Date()
return time.Date(year, month, day, 23, 59, 59, 0, t.Location())
} | imls-raspberry-pi/internal/analysis/drawing.go | 0.596433 | 0.550064 | drawing.go | starcoder |
package golcas
import (
"regexp"
"strings"
)
var uuidRegex *regexp.Regexp
// FindUUID returns the UUID from the given path or an empty string if it cannot
// find it.
func FindUUID(path string) string {
if uuidRegex == nil {
pattern := "[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}"
uuidRegex = regexp.MustCompile(pattern)
}
return uuidRegex.FindString(path)
}
// IsCategoryPath returns true if the given path describes a JSON file in the
// folder where data sets of type `Category` are stored.
func IsCategoryPath(path string) bool {
return isJSONInFolder(path, "categories")
}
// IsSourcePath returns true if the given path describes a JSON file in the
// folder where data sets of type `Source` are stored.
func IsSourcePath(path string) bool {
return isJSONInFolder(path, "sources")
}
// IsActorPath returns true if the given path describes a JSON file in the
// folder where data sets of type `Actor` are stored.
func IsActorPath(path string) bool {
return isJSONInFolder(path, "actors")
}
// IsUnitGroupPath returns true if the given path describes a JSON file in the
// folder where data sets of type `UnitGroup` are stored.
func IsUnitGroupPath(path string) bool {
return isJSONInFolder(path, "unit_groups")
}
// IsFlowPropertyPath returns true if the given path describes a JSON file in the
// folder where data sets of type `FlowProperty` are stored.
func IsFlowPropertyPath(path string) bool {
return isJSONInFolder(path, "flow_properties")
}
// IsFlowPath returns true if the given path describes a JSON file in the
// folder where data sets of type `Flow` are stored.
func IsFlowPath(path string) bool {
return isJSONInFolder(path, "flows")
}
// IsProcessPath returns true if the given path describes a JSON file in the
// folder where data sets of type `Process` are stored.
func IsProcessPath(path string) bool {
return isJSONInFolder(path, "processes")
}
// IsProductSystemPath returns true if the given path describes a JSON file in the
// folder where data sets of type `ProductSystem` are stored.
func IsProductSystemPath(path string) bool {
return isJSONInFolder(path, "product_systems")
}
// IsCurrencyPath returns true if the given path describes a JSON file in the
// folder where data sets of type `Currency` are stored.
func IsCurrencyPath(path string) bool {
return isJSONInFolder(path, "currencies")
}
// IsImpactCategoryPath returns true if the given path describes a JSON file in the
// folder where data sets of type `ImpactCategory` are stored.
func IsImpactCategoryPath(path string) bool {
return isJSONInFolder(path, "impact_categories")
}
// IsImpactMethodPath returns true if the given path describes a JSON file in the
// folder where data sets of type `ImpactMethod` are stored.
func IsImpactMethodPath(path string) bool {
return isJSONInFolder(path, "impact_methods")
}
// IsLocationPath returns true if the given path describes a JSON file in the
// folder where data sets of type `Location` are stored.
func IsLocationPath(path string) bool {
return isJSONInFolder(path, "locations")
}
// IsNwSetPath returns true if the given path describes a JSON file in the
// folder where data sets of type `NwSet` are stored.
func IsNwSetPath(path string) bool {
return isJSONInFolder(path, "nw_sets")
}
// IsParameterPath returns true if the given path describes a JSON file in the
// folder where data sets of type `Parameter` are stored.
func IsParameterPath(path string) bool {
return isJSONInFolder(path, "parameters")
}
// IsProjectPath returns true if the given path describes a JSON file in the
// folder where data sets of type `Project` are stored.
func IsProjectPath(path string) bool {
return isJSONInFolder(path, "projects")
}
// IsSocialIndicatorPath returns true if the given path describes a JSON file in the
// folder where data sets of type `SocialIndicator` are stored.
func IsSocialIndicatorPath(path string) bool {
return isJSONInFolder(path, "social_indicators")
}
// Returns true if the given path describes a JSON file in the given folder.
func isJSONInFolder(path, folder string) bool {
p := strings.ToLower(path)
if !strings.Contains(p, folder) {
return false
}
return strings.HasSuffix(p, ".json")
} | paths.go | 0.742795 | 0.474144 | paths.go | starcoder |
package qdb
/*
#include <qdb/ts.h>
#include <stdlib.h>
*/
import "C"
import (
"math"
"time"
"unsafe"
)
// TsSymbolPoint : timestamped symbol
type TsSymbolPoint struct {
timestamp time.Time
content string
}
// Timestamp : return data point timestamp
func (t TsSymbolPoint) Timestamp() time.Time {
return t.timestamp
}
// Content : return data point content
func (t TsSymbolPoint) Content() string {
return t.content
}
// NewTsSymbolPoint : Create new timeseries symbol point
func NewTsSymbolPoint(timestamp time.Time, value string) TsSymbolPoint {
return TsSymbolPoint{timestamp, value}
}
// :: internals
func (t TsSymbolPoint) toStructC() C.qdb_ts_symbol_point {
dataSize := C.qdb_size_t(len(t.content))
data := convertToCharStar(string(t.content))
return C.qdb_ts_symbol_point{toQdbTimespec(t.timestamp), data, dataSize}
}
func (t C.qdb_ts_symbol_point) toStructG() TsSymbolPoint {
return TsSymbolPoint{t.timestamp.toStructG(), C.GoStringN(t.content, C.int(t.content_length))}
}
func symbolPointArrayToC(pts ...TsSymbolPoint) *C.qdb_ts_symbol_point {
if len(pts) == 0 {
return nil
}
points := make([]C.qdb_ts_symbol_point, len(pts))
for idx, pt := range pts {
points[idx] = pt.toStructC()
}
return &points[0]
}
func releaseSymbolPointArray(points *C.qdb_ts_symbol_point, length int) {
if length > 0 {
slice := symbolPointArrayToSlice(points, length)
for _, s := range slice {
C.free(unsafe.Pointer(s.content))
}
}
}
func symbolPointArrayToSlice(points *C.qdb_ts_symbol_point, length int) []C.qdb_ts_symbol_point {
// See https://github.com/mattn/go-sqlite3/issues/238 for details.
return (*[(math.MaxInt32 - 1) / unsafe.Sizeof(C.qdb_ts_symbol_point{})]C.qdb_ts_symbol_point)(unsafe.Pointer(points))[:length:length]
}
func symbolPointArrayToGo(points *C.qdb_ts_symbol_point, pointsCount C.qdb_size_t) []TsSymbolPoint {
length := int(pointsCount)
output := make([]TsSymbolPoint, length)
if length > 0 {
slice := symbolPointArrayToSlice(points, length)
for i, s := range slice {
output[i] = s.toStructG()
}
}
return output
}
// TsSymbolColumn : a time series symbol column
type TsSymbolColumn struct {
tsColumn
}
// SymbolColumn : create a column object (the symbol table name is not set)
func (entry TimeseriesEntry) SymbolColumn(columnName string, symtableName string) TsSymbolColumn {
return TsSymbolColumn{tsColumn{NewSymbolColumnInfo(columnName, symtableName), entry}}
}
// Insert symbol points into a timeseries
func (column TsSymbolColumn) Insert(points ...TsSymbolPoint) error {
alias := convertToCharStar(column.parent.alias)
defer releaseCharStar(alias)
columnName := convertToCharStar(column.name)
defer releaseCharStar(columnName)
contentCount := C.qdb_size_t(len(points))
content := symbolPointArrayToC(points...)
defer releaseSymbolPointArray(content, len(points))
err := C.qdb_ts_symbol_insert(column.parent.handle, alias, columnName, content, contentCount)
return makeErrorOrNil(err)
}
// EraseRanges : erase all points in the specified ranges
func (column TsSymbolColumn) EraseRanges(rgs ...TsRange) (uint64, error) {
alias := convertToCharStar(column.parent.alias)
defer releaseCharStar(alias)
columnName := convertToCharStar(column.name)
defer releaseCharStar(columnName)
ranges := rangeArrayToC(rgs...)
rangesCount := C.qdb_size_t(len(rgs))
erasedCount := C.qdb_uint_t(0)
err := C.qdb_ts_erase_ranges(column.parent.handle, alias, columnName, ranges, rangesCount, &erasedCount)
return uint64(erasedCount), makeErrorOrNil(err)
}
// GetRanges : Retrieves symbols in the specified range of the time series column.
// It is an error to call this function on a non existing time-series.
func (column TsSymbolColumn) GetRanges(rgs ...TsRange) ([]TsSymbolPoint, error) {
alias := convertToCharStar(column.parent.alias)
defer releaseCharStar(alias)
columnName := convertToCharStar(column.name)
defer releaseCharStar(columnName)
ranges := rangeArrayToC(rgs...)
rangesCount := C.qdb_size_t(len(rgs))
var points *C.qdb_ts_symbol_point
var pointsCount C.qdb_size_t
err := C.qdb_ts_symbol_get_ranges(column.parent.handle, alias, columnName, ranges, rangesCount, &points, &pointsCount)
if err == 0 {
defer column.parent.Release(unsafe.Pointer(points))
return symbolPointArrayToGo(points, pointsCount), nil
}
return nil, ErrorType(err)
}
// TsSymbolAggregation : Aggregation of double type
type TsSymbolAggregation struct {
kind TsAggregationType
rng TsRange
count int64
point TsSymbolPoint
}
// Type : returns the type of the aggregation
func (t TsSymbolAggregation) Type() TsAggregationType {
return t.kind
}
// Range : returns the range of the aggregation
func (t TsSymbolAggregation) Range() TsRange {
return t.rng
}
// Count : returns the number of points aggregated into the result
func (t TsSymbolAggregation) Count() int64 {
return t.count
}
// Result : result of the aggregation
func (t TsSymbolAggregation) Result() TsSymbolPoint {
return t.point
}
// NewSymbolAggregation : Create new timeseries string aggregation
func NewSymbolAggregation(kind TsAggregationType, rng TsRange) *TsSymbolAggregation {
return &TsSymbolAggregation{kind, rng, 0, TsSymbolPoint{}}
}
// :: internals
func (t TsSymbolAggregation) toStructC() C.qdb_ts_symbol_aggregation_t {
var cAgg C.qdb_ts_symbol_aggregation_t
cAgg._type = C.qdb_ts_aggregation_type_t(t.kind)
cAgg._range = t.rng.toStructC()
cAgg.count = C.qdb_size_t(t.count)
cAgg.result = t.point.toStructC()
return cAgg
}
func (t C.qdb_ts_symbol_aggregation_t) toStructG() TsSymbolAggregation {
var gAgg TsSymbolAggregation
gAgg.kind = TsAggregationType(t._type)
gAgg.rng = t._range.toStructG()
gAgg.count = int64(t.count)
gAgg.point = t.result.toStructG()
return gAgg
}
func symbolAggregationArrayToC(ags ...*TsSymbolAggregation) *C.qdb_ts_symbol_aggregation_t {
if len(ags) == 0 {
return nil
}
var symbolAggregations []C.qdb_ts_symbol_aggregation_t
for _, ag := range ags {
symbolAggregations = append(symbolAggregations, ag.toStructC())
}
return &symbolAggregations[0]
}
func symbolAggregationArrayToSlice(aggregations *C.qdb_ts_symbol_aggregation_t, length int) []C.qdb_ts_symbol_aggregation_t {
// See https://github.com/mattn/go-sqlite3/issues/238 for details.
return (*[(math.MaxInt32 - 1) / unsafe.Sizeof(C.qdb_ts_symbol_aggregation_t{})]C.qdb_ts_symbol_aggregation_t)(unsafe.Pointer(aggregations))[:length:length]
}
func symbolAggregationArrayToGo(aggregations *C.qdb_ts_symbol_aggregation_t, aggregationsCount C.qdb_size_t, aggs []*TsSymbolAggregation) []TsSymbolAggregation {
length := int(aggregationsCount)
output := make([]TsSymbolAggregation, length)
if length > 0 {
slice := symbolAggregationArrayToSlice(aggregations, length)
for i, s := range slice {
*aggs[i] = s.toStructG()
output[i] = s.toStructG()
}
}
return output
}
// Aggregate : Aggregate a sub-part of the time series.
// It is an error to call this function on a non existing time-series.
func (column TsSymbolColumn) Aggregate(aggs ...*TsSymbolAggregation) ([]TsSymbolAggregation, error) {
alias := convertToCharStar(column.parent.alias)
defer releaseCharStar(alias)
columnName := convertToCharStar(column.name)
defer releaseCharStar(columnName)
aggregations := symbolAggregationArrayToC(aggs...)
aggregationsCount := C.qdb_size_t(len(aggs))
var output []TsSymbolAggregation
err := C.qdb_ts_symbol_aggregate(column.parent.handle, alias, columnName, aggregations, aggregationsCount)
if err == 0 {
output = symbolAggregationArrayToGo(aggregations, aggregationsCount, aggs)
}
return output, makeErrorOrNil(err)
}
// Symbol : adds a symbol in row transaction
func (t *TsBulk) Symbol(content string) *TsBulk {
contentSize := C.qdb_size_t(len(content))
contentPtr := convertToCharStar(content)
defer releaseCharStar(contentPtr)
if t.err == nil {
t.err = makeErrorOrNil(C.qdb_ts_row_set_symbol(t.table, C.qdb_size_t(t.index), contentPtr, contentSize))
}
t.index++
return t
}
// GetSymbol : gets a symbol in row
func (t *TsBulk) GetSymbol() (string, error) {
var content *C.char
defer t.h.Release(unsafe.Pointer(content))
var contentLength C.qdb_size_t
err := C.qdb_ts_row_get_symbol(t.table, C.qdb_size_t(t.index), &content, &contentLength)
t.index++
return C.GoStringN(content, C.int(contentLength)), makeErrorOrNil(err)
}
// RowSetSymbol : Set symbol at specified index in current row
func (t *TsBatch) RowSetSymbol(index int64, content string) error {
valueIndex := C.qdb_size_t(index)
contentSize := C.qdb_size_t(len(content))
contentPtr := convertToCharStar(content)
defer releaseCharStar(contentPtr)
return makeErrorOrNil(C.qdb_ts_batch_row_set_symbol(t.table, valueIndex, contentPtr, contentSize))
}
// RowSetSymbolNoCopy : Set symbol at specified index in current row without copying it
func (t *TsBatch) RowSetSymbolNoCopy(index int64, content string) error {
valueIndex := C.qdb_size_t(index)
contentSize := C.qdb_size_t(len(content))
contentPtr := convertToCharStar(content)
defer releaseCharStar(contentPtr)
return makeErrorOrNil(C.qdb_ts_batch_row_set_symbol_no_copy(t.table, valueIndex, contentPtr, contentSize))
} | entry_timeseries_symbol.go | 0.708011 | 0.404625 | entry_timeseries_symbol.go | starcoder |
package parser
import (
"github.com/jxwr/php-parser/ast"
"github.com/jxwr/php-parser/lexer"
"github.com/jxwr/php-parser/token"
)
func (p *Parser) parseFunctionStmt() *ast.FunctionStmt {
stmt := &ast.FunctionStmt{}
stmt.FunctionDefinition = p.parseFunctionDefinition()
stmt.Body = p.parseBlock()
return stmt
}
func (p *Parser) parseFunctionDefinition() *ast.FunctionDefinition {
def := &ast.FunctionDefinition{}
if p.peek().Typ == token.AmpersandOperator {
// This is a function returning a reference ... ignore this for now
p.next()
}
if !p.accept(token.Identifier) {
p.next()
if !lexer.IsKeyword(p.current.Typ, p.current.Val) {
p.errorf("bad function name", p.current.Val)
}
}
def.Name = p.current.Val
def.Arguments = make([]ast.FunctionArgument, 0)
p.expect(token.OpenParen)
if p.peek().Typ == token.CloseParen {
p.expect(token.CloseParen)
return def
}
def.Arguments = append(def.Arguments, p.parseFunctionArgument())
for {
switch p.peek().Typ {
case token.Comma:
p.expect(token.Comma)
def.Arguments = append(def.Arguments, p.parseFunctionArgument())
case token.CloseParen:
p.expect(token.CloseParen)
return def
default:
p.errorf("unexpected argument separator:", p.current)
return def
}
}
}
func (p *Parser) parseFunctionArgument() ast.FunctionArgument {
arg := ast.FunctionArgument{}
switch p.peek().Typ {
case token.Identifier, token.Array, token.Self:
p.next()
arg.TypeHint = p.current.Val
}
if p.peek().Typ == token.AmpersandOperator {
p.next()
}
p.expect(token.VariableOperator)
p.next()
arg.Variable = ast.NewVariable(p.current.Val)
if p.peek().Typ == token.AssignmentOperator {
p.expect(token.AssignmentOperator)
p.next()
arg.Default = p.parseExpression()
}
return arg
}
func (p *Parser) parseFunctionCall(callable ast.Expression) *ast.FunctionCallExpression {
expr := &ast.FunctionCallExpression{}
expr.FunctionName = callable
return p.parseFunctionArguments(expr)
}
func (p *Parser) parseFunctionArguments(expr *ast.FunctionCallExpression) *ast.FunctionCallExpression {
expr.Arguments = make([]ast.Expression, 0)
p.expect(token.OpenParen)
if p.peek().Typ == token.CloseParen {
p.expect(token.CloseParen)
return expr
}
expr.Arguments = append(expr.Arguments, p.parseNextExpression())
for p.peek().Typ != token.CloseParen {
p.expect(token.Comma)
arg := p.parseNextExpression()
if arg == nil {
break
}
expr.Arguments = append(expr.Arguments, arg)
}
p.expect(token.CloseParen)
return expr
}
func (p *Parser) parseAnonymousFunction() ast.Expression {
f := &ast.AnonymousFunction{}
f.Arguments = make([]ast.FunctionArgument, 0)
f.ClosureVariables = make([]ast.FunctionArgument, 0)
p.expect(token.OpenParen)
if p.peek().Typ != token.CloseParen {
f.Arguments = append(f.Arguments, p.parseFunctionArgument())
}
Loop:
for {
switch p.peek().Typ {
case token.Comma:
p.expect(token.Comma)
f.Arguments = append(f.Arguments, p.parseFunctionArgument())
case token.CloseParen:
break Loop
default:
p.errorf("unexpected argument separator:", p.current)
return f
}
}
p.expect(token.CloseParen)
// Closure variables
if p.peek().Typ == token.Use {
p.expect(token.Use)
p.expect(token.OpenParen)
f.ClosureVariables = append(f.ClosureVariables, p.parseFunctionArgument())
ClosureLoop:
for {
switch p.peek().Typ {
case token.Comma:
p.expect(token.Comma)
f.ClosureVariables = append(f.ClosureVariables, p.parseFunctionArgument())
case token.CloseParen:
break ClosureLoop
default:
p.errorf("unexpected argument separator:", p.current)
return f
}
}
p.expect(token.CloseParen)
}
f.Body = p.parseBlock()
return f
} | parser/function.go | 0.512937 | 0.422147 | function.go | starcoder |
package list
// LinkedNode is a record of a linked list.
type LinkedNode struct {
Data interface{}
next *LinkedNode
}
// LinkedList a linear collection of data nodes.
type LinkedList struct {
head *LinkedNode
length uint64
}
// NewLinkedList an empty linked list.
func NewLinkedList() *LinkedList {
return &LinkedList{}
}
// Add the data at an index.
func (l *LinkedList) Add(index uint64, data interface{}) *LinkedNode {
if index > l.length || data == nil {
return nil
}
if index == 0 {
return l.AddHead(data)
}
if index == l.length {
return l.AddTail(data)
}
current := l.head
var i uint64
for i = 1; i < index; i++ {
current = current.next
}
l.length++
node := &LinkedNode{Data: data}
node.next = current.next
current.next = node
return node
}
// Remove at an index.
func (l *LinkedList) Remove(index uint64) *LinkedNode {
if index >= l.length {
return nil
}
if index == 0 {
return l.RemoveHead()
}
if index == l.length-1 {
return l.RemoveTail()
}
l.length--
prevCurrent := l.head
current := l.head
var i uint64
for i = 0; i < index; i++ {
prevCurrent = current
current = current.next
}
prevCurrent.next = current.next
current.next = nil
return current
}
// AddHead to the linked list.
func (l *LinkedList) AddHead(data interface{}) *LinkedNode {
if data == nil {
return nil
}
l.length++
node := &LinkedNode{Data: data}
if l.head == nil {
l.head = node
} else {
node.next = l.head
l.head = node
}
return node
}
// RemoveHead from the linked list.
func (l *LinkedList) RemoveHead() *LinkedNode {
if l.head == nil {
return nil
}
l.length--
prevHead := l.head
head := prevHead.next
l.head = head
prevHead.next = nil
return prevHead
}
// AddTail to the linked list.
func (l *LinkedList) AddTail(data interface{}) *LinkedNode {
if data == nil {
return nil
}
l.length++
node := &LinkedNode{Data: data}
if l.head == nil {
l.head = node
} else {
tail := l.head
for tail.next != nil {
tail = tail.next
}
tail.next = node
}
return node
}
// RemoveTail from the linked list.
func (l *LinkedList) RemoveTail() *LinkedNode {
if l.head == nil {
return nil
}
l.length--
prevTail := l.head
tail := l.head
for tail.next != nil {
prevTail = tail
tail = tail.next
}
prevTail.next = nil
return tail
}
// Length of the Length
func (l *LinkedList) Length() uint64 {
return l.length
} | pkg/list/linked.go | 0.806167 | 0.413892 | linked.go | starcoder |
package ast
import (
"bytes"
"github.com/TurnsCoffeeIntoScripts/git-log-issue-finder/pkg/interpreter/gitoken"
"strings"
)
// Node is an interface that needs to be implemented by every element that the AST will contain
type Node interface {
TokenLiteral() string
String() string
}
// Statement is an interface enclosing a node and to be implemented by the language's statements
type Statement interface {
Node
statementNode()
}
// Expression is an interface enclosing a node and to be implemented by the language's expression
type Expression interface {
Node
expressionNode()
}
// Program is a collection (slice) of statements that represents the input program
type Program struct {
Statements []Statement
}
// Identifier is an identifier node
type Identifier struct {
Token gitoken.Token
Value string
}
// LetStatement is an ast node representing a statement of the form: 'let <IDENT> = <EXPR>
type LetStatement struct {
Token gitoken.Token
Name *Identifier
Value Expression
}
// SetStatement is an ast node representing a statement of the form: 'set <KEYWORD> <LITTERAL>
type SetStatement struct {
Token gitoken.Token
Name *Identifier
Value Expression
}
// ReturnStatement is an ast node representing a statement of the form: 'return <EXPR>'
type ReturnStatement struct {
Token gitoken.Token
ReturnValue Expression
}
// ExpressionStatement is an ast node representing a statement of the form: '<EXPR>'
type ExpressionStatement struct {
Token gitoken.Token
Expression Expression
}
// BlockStatement is an ast node representing a collection of statement and an initiating token
type BlockStatement struct {
Token gitoken.Token
Statements []Statement
}
// FunctionLiteral is an ast node representing a function of the form: 'fn(<PARAMS>) ast.BlockStatement'
type FunctionLiteral struct {
Token gitoken.Token
Parameters []*Identifier
Body *BlockStatement
}
// ArrayLiteral is an ast node representing an array of the form: '[<ELEM1>, <ELEME2>, ..., <ELEMn>]'
type ArrayLiteral struct {
Token gitoken.Token
Elements []Expression
}
// HashLiteral is an ast node representing a hash of the form: '{<KEY1>: <VAL1>, <KEY2>: <VAL2>, ..., <KEYn>: <VALn>}'
type HashLiteral struct {
Token gitoken.Token
Pairs map[Expression]Expression
}
// IntegerLiteral is an ast node representing an integer value
type IntegerLiteral struct {
Token gitoken.Token
Value int64
}
// StringLiteral is an ast node representing a string value
type StringLiteral struct {
Token gitoken.Token
Value string
}
// Boolean is an ast node representing a boolean value
type Boolean struct {
Token gitoken.Token
Value bool
}
// PrefixExpression is an ast node representing an expression of the form: '<OPERATOR><EXPR>'
type PrefixExpression struct {
Token gitoken.Token
Operator string
Right Expression
}
// InfixExpression is an ast node representing an expression of the form: '<EXPR> <OPERATOR> <EXPR>'
type InfixExpression struct {
Token gitoken.Token
Left Expression
Operator string
Right Expression
}
// IfExpression is an ast node representing an expression of the form: 'if(<EXPR>) ast.BlockStatement else ast.BlockStatement '
type IfExpression struct {
Token gitoken.Token
Condition Expression
Consequence *BlockStatement
Alternative *BlockStatement
}
// CallExpression is an ast node representing an expression of the form: 'FNIDENT(PARAMS)'
type CallExpression struct {
Token gitoken.Token
Function Expression
Arguments []Expression
}
// IndexExpression is an ast node representing an expression of the form: 'ARRAY[<EXPR>]'
type IndexExpression struct {
Token gitoken.Token
Left Expression
Index Expression
}
// TokenLiteral returns the literal string of the token
func (p *Program) TokenLiteral() string {
if len(p.Statements) > 0 {
return p.Statements[0].TokenLiteral()
}
return ""
}
func (p *Program) String() string {
var out bytes.Buffer
for _, s := range p.Statements {
out.WriteString(s.String())
}
return out.String()
}
func (i *Identifier) expressionNode() {
}
// TokenLiteral returns the literal string of the token
func (i *Identifier) TokenLiteral() string {
return i.Token.Literal
}
func (i *Identifier) String() string {
return i.Value
}
func (ls *LetStatement) statementNode() {
}
func (ls *LetStatement) String() string {
var out bytes.Buffer
out.WriteString(ls.TokenLiteral() + " ")
out.WriteString(ls.Name.String())
out.WriteString(" = ")
if ls.Value != nil {
out.WriteString(ls.Value.String())
}
out.WriteString(";")
return out.String()
}
// TokenLiteral returns the literal string of the token
func (ls *SetStatement) TokenLiteral() string {
return ls.Token.Literal
}
func (ls *SetStatement) statementNode() {
}
func (ls *SetStatement) String() string {
var out bytes.Buffer
out.WriteString(ls.TokenLiteral() + " ")
out.WriteString(ls.Name.String())
out.WriteString(" ")
if ls.Value != nil {
out.WriteString("\"")
out.WriteString(ls.Value.String())
out.WriteString("\"")
}
out.WriteString(";")
return out.String()
}
// TokenLiteral returns the literal string of the token
func (ls *LetStatement) TokenLiteral() string {
return ls.Token.Literal
}
func (rs *ReturnStatement) statementNode() {
}
// TokenLiteral returns the literal string of the token
func (rs *ReturnStatement) TokenLiteral() string {
return rs.Token.Literal
}
func (rs *ReturnStatement) String() string {
var out bytes.Buffer
out.WriteString(rs.TokenLiteral() + " ")
if rs.ReturnValue != nil {
out.WriteString(rs.ReturnValue.String())
}
out.WriteString(";")
return out.String()
}
func (es *ExpressionStatement) statementNode() {
}
// TokenLiteral returns the literal string of the token
func (es *ExpressionStatement) TokenLiteral() string {
return es.Token.Literal
}
func (es *ExpressionStatement) String() string {
if es.Expression != nil {
return es.Expression.String()
}
return ""
}
func (bs *BlockStatement) statementNode() {
}
// TokenLiteral returns the literal string of the token
func (bs *BlockStatement) TokenLiteral() string {
return bs.Token.Literal
}
func (bs *BlockStatement) String() string {
var out bytes.Buffer
for _, s := range bs.Statements {
out.WriteString(s.String())
}
return out.String()
}
func (fl *FunctionLiteral) expressionNode() {
}
// TokenLiteral returns the literal string of the token
func (fl *FunctionLiteral) TokenLiteral() string {
return fl.Token.Literal
}
func (fl *FunctionLiteral) String() string {
var out bytes.Buffer
var params []string
for _, p := range fl.Parameters {
params = append(params, p.String())
}
out.WriteString(fl.TokenLiteral())
out.WriteString("(")
out.WriteString(strings.Join(params, ", "))
out.WriteString(")")
out.WriteString(fl.Body.String())
return out.String()
}
func (al *ArrayLiteral) expressionNode() {
}
// TokenLiteral returns the literal string of the token
func (al *ArrayLiteral) TokenLiteral() string {
return al.Token.Literal
}
func (al *ArrayLiteral) String() string {
var out bytes.Buffer
var elements []string
for _, el := range al.Elements {
elements = append(elements, el.String())
}
out.WriteString("[")
out.WriteString(strings.Join(elements, ", "))
out.WriteString("]")
return out.String()
}
func (hl *HashLiteral) expressionNode() {
}
// TokenLiteral returns the literal string of the token
func (hl *HashLiteral) TokenLiteral() string {
return hl.Token.Literal
}
func (hl *HashLiteral) String() string {
var out bytes.Buffer
var pairs []string
for key, value := range hl.Pairs {
pairs = append(pairs, key.String()+":"+value.String())
}
out.WriteString("{")
out.WriteString(strings.Join(pairs, ", "))
out.WriteString("}")
return out.String()
}
func (il *IntegerLiteral) expressionNode() {
}
// TokenLiteral returns the literal string of the token
func (il *IntegerLiteral) TokenLiteral() string {
return il.Token.Literal
}
func (il *IntegerLiteral) String() string {
return il.Token.Literal
}
func (sl *StringLiteral) expressionNode() {
}
// TokenLiteral returns the literal string of the token
func (sl *StringLiteral) TokenLiteral() string {
return sl.Token.Literal
}
func (sl *StringLiteral) String() string {
return sl.Token.Literal
}
func (b *Boolean) expressionNode() {
}
// TokenLiteral returns the literal string of the token
func (b *Boolean) TokenLiteral() string {
return b.Token.Literal
}
func (b *Boolean) String() string {
return b.Token.Literal
}
func (pe *PrefixExpression) expressionNode() {
}
// TokenLiteral returns the literal string of the token
func (pe *PrefixExpression) TokenLiteral() string {
return pe.Token.Literal
}
func (pe *PrefixExpression) String() string {
var out bytes.Buffer
out.WriteString("(")
out.WriteString(pe.Operator)
out.WriteString(pe.Right.String())
out.WriteString(")")
return out.String()
}
func (ie *InfixExpression) expressionNode() {
}
// TokenLiteral returns the literal string of the token
func (ie *InfixExpression) TokenLiteral() string {
return ie.Token.Literal
}
func (ie *InfixExpression) String() string {
var out bytes.Buffer
out.WriteString("(")
out.WriteString(ie.Left.String())
out.WriteString(" " + ie.Operator + " ")
out.WriteString(ie.Right.String())
out.WriteString(")")
return out.String()
}
func (ie *IfExpression) expressionNode() {
}
// TokenLiteral returns the literal string of the token
func (ie *IfExpression) TokenLiteral() string {
return ie.Token.Literal
}
func (ie *IfExpression) String() string {
var out bytes.Buffer
out.WriteString("if")
out.WriteString(ie.Condition.String())
out.WriteString(" ")
out.WriteString(ie.Consequence.String())
if ie.Alternative != nil {
out.WriteString("else")
out.WriteString(ie.Alternative.String())
}
return out.String()
}
func (ce *CallExpression) expressionNode() {
}
// TokenLiteral returns the literal string of the token
func (ce *CallExpression) TokenLiteral() string {
return ce.Token.Literal
}
func (ce *CallExpression) String() string {
var out bytes.Buffer
var args []string
for _, a := range ce.Arguments {
args = append(args, a.String())
}
out.WriteString(ce.Function.String())
out.WriteString("(")
out.WriteString(strings.Join(args, ", "))
out.WriteString(")")
return out.String()
}
func (ie *IndexExpression) expressionNode() {
}
// TokenLiteral returns the literal string of the token
func (ie *IndexExpression) TokenLiteral() string {
return ie.Token.Literal
}
func (ie *IndexExpression) String() string {
var out bytes.Buffer
out.WriteString("(")
out.WriteString(ie.Left.String())
out.WriteString("[")
out.WriteString(ie.Index.String())
out.WriteString("]")
out.WriteString(")")
return out.String()
} | pkg/interpreter/ast/ast.go | 0.714728 | 0.412087 | ast.go | starcoder |
package onshape
import (
"encoding/json"
)
// BTVector3d389 struct for BTVector3d389
type BTVector3d389 struct {
BtType *string `json:"btType,omitempty"`
X *float64 `json:"x,omitempty"`
Y *float64 `json:"y,omitempty"`
Z *float64 `json:"z,omitempty"`
}
// NewBTVector3d389 instantiates a new BTVector3d389 object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTVector3d389() *BTVector3d389 {
this := BTVector3d389{}
return &this
}
// NewBTVector3d389WithDefaults instantiates a new BTVector3d389 object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTVector3d389WithDefaults() *BTVector3d389 {
this := BTVector3d389{}
return &this
}
// GetBtType returns the BtType field value if set, zero value otherwise.
func (o *BTVector3d389) GetBtType() string {
if o == nil || o.BtType == nil {
var ret string
return ret
}
return *o.BtType
}
// GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTVector3d389) GetBtTypeOk() (*string, bool) {
if o == nil || o.BtType == nil {
return nil, false
}
return o.BtType, true
}
// HasBtType returns a boolean if a field has been set.
func (o *BTVector3d389) HasBtType() bool {
if o != nil && o.BtType != nil {
return true
}
return false
}
// SetBtType gets a reference to the given string and assigns it to the BtType field.
func (o *BTVector3d389) SetBtType(v string) {
o.BtType = &v
}
// GetX returns the X field value if set, zero value otherwise.
func (o *BTVector3d389) GetX() float64 {
if o == nil || o.X == nil {
var ret float64
return ret
}
return *o.X
}
// GetXOk returns a tuple with the X field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTVector3d389) GetXOk() (*float64, bool) {
if o == nil || o.X == nil {
return nil, false
}
return o.X, true
}
// HasX returns a boolean if a field has been set.
func (o *BTVector3d389) HasX() bool {
if o != nil && o.X != nil {
return true
}
return false
}
// SetX gets a reference to the given float64 and assigns it to the X field.
func (o *BTVector3d389) SetX(v float64) {
o.X = &v
}
// GetY returns the Y field value if set, zero value otherwise.
func (o *BTVector3d389) GetY() float64 {
if o == nil || o.Y == nil {
var ret float64
return ret
}
return *o.Y
}
// GetYOk returns a tuple with the Y field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTVector3d389) GetYOk() (*float64, bool) {
if o == nil || o.Y == nil {
return nil, false
}
return o.Y, true
}
// HasY returns a boolean if a field has been set.
func (o *BTVector3d389) HasY() bool {
if o != nil && o.Y != nil {
return true
}
return false
}
// SetY gets a reference to the given float64 and assigns it to the Y field.
func (o *BTVector3d389) SetY(v float64) {
o.Y = &v
}
// GetZ returns the Z field value if set, zero value otherwise.
func (o *BTVector3d389) GetZ() float64 {
if o == nil || o.Z == nil {
var ret float64
return ret
}
return *o.Z
}
// GetZOk returns a tuple with the Z field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTVector3d389) GetZOk() (*float64, bool) {
if o == nil || o.Z == nil {
return nil, false
}
return o.Z, true
}
// HasZ returns a boolean if a field has been set.
func (o *BTVector3d389) HasZ() bool {
if o != nil && o.Z != nil {
return true
}
return false
}
// SetZ gets a reference to the given float64 and assigns it to the Z field.
func (o *BTVector3d389) SetZ(v float64) {
o.Z = &v
}
func (o BTVector3d389) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.BtType != nil {
toSerialize["btType"] = o.BtType
}
if o.X != nil {
toSerialize["x"] = o.X
}
if o.Y != nil {
toSerialize["y"] = o.Y
}
if o.Z != nil {
toSerialize["z"] = o.Z
}
return json.Marshal(toSerialize)
}
type NullableBTVector3d389 struct {
value *BTVector3d389
isSet bool
}
func (v NullableBTVector3d389) Get() *BTVector3d389 {
return v.value
}
func (v *NullableBTVector3d389) Set(val *BTVector3d389) {
v.value = val
v.isSet = true
}
func (v NullableBTVector3d389) IsSet() bool {
return v.isSet
}
func (v *NullableBTVector3d389) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTVector3d389(val *BTVector3d389) *NullableBTVector3d389 {
return &NullableBTVector3d389{value: val, isSet: true}
}
func (v NullableBTVector3d389) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTVector3d389) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | onshape/model_bt_vector3d_389.go | 0.748995 | 0.542379 | model_bt_vector3d_389.go | starcoder |
package animation
// Code to support mapping from logical 'universes' to physical pixel layout.
import (
"fmt"
"image/color"
"math"
)
// {board, strand, pixel} tuple identifying a physical pixel
type location struct {
board, strand, pixel uint
}
// Mapping captures mapping from logical to physical layer
type Mapping struct {
// Buffer of data mapping to physical pixels
// Three levels of indexing:
// 1. Controller board number
// 2. Strand number within controller board
// 3. Pixel number within strand
physBuf [][][]color.RGBA
// Mapping from 'universes' (logical view of pixels) to physical pixels.
// Two levels of indexing:
// 1. Universe number
// 2. Pixel number within universe
universes [][]location
// Mapping from universe name to universe ID
uniNameToIndex map[string]int
}
// PhysicalRange defines a range of physical pixels within asingle strand
type PhysicalRange struct {
Board, Strand, StartPixel, Size uint
}
// NewMapping creates a new Mapping, using the provided dimensions.
// Size of outer array governs the number of controller boards
// Sizes of inner arrays govern the number of strands within each board
// Values in inner array govern the number of pixels in the strand
func NewMapping(dimension [][]int) Mapping {
// Make the triply-nested physical buffer structure based on the provided dimensions
// Allocate space for a reasonable number of universes
m := Mapping{
physBuf: make([][][]color.RGBA, len(dimension)),
universes: make([][]location, 0, 16),
uniNameToIndex: make(map[string]int),
}
for boardIdx := range dimension {
m.physBuf[boardIdx] = make([][]color.RGBA, len(dimension[boardIdx]))
for strandIdx := range dimension[boardIdx] {
m.physBuf[boardIdx][strandIdx] = make([]color.RGBA, dimension[boardIdx][strandIdx])
}
}
return m
}
// AddUniverse adds a universe mapping with the given name.
// The provided set of physical ranges identifies the set of physical pixels
// corresponding to the universe. The order of physical pixels presented defines
// the logical ordering of the universe, and the size of the universe is equal
// to the number of physical pixels provided
// Returns true if the universe was successfully added; returns false if the
// universe name already exists or a specified physical pixel doesn't exist.
func (m *Mapping) AddUniverse(name string, ranges []PhysicalRange) bool {
if _, exists := m.uniNameToIndex[name]; exists {
return false
}
// Figure out the size
size := uint(0)
for _, r := range ranges {
size += r.Size
}
// Allocate locations array for universe
locs := make([]location, size)
// Populate locations array from pixel ranges
unidx := 0
for _, r := range ranges {
for idx := r.StartPixel; idx < r.StartPixel+r.Size; idx++ {
locs[unidx] = location{r.Board, r.Strand, idx}
unidx++
}
}
// Add the universe to the structure
m.universes = append(m.universes, locs)
m.uniNameToIndex[name] = len(m.universes) - 1
return true
}
// IDForUniverse gets the internal ID associated with the given universe name.
// Returns error and large invalid ID if universe name is not found
func (m *Mapping) IDForUniverse(universeName string) (uint, error) {
id, ok := m.uniNameToIndex[universeName]
if !ok {
return math.MaxUint32, fmt.Errorf("\"%s\" is not a known universe", universeName)
}
return uint(id), nil
}
// UpdateUniverse updates physical pixel color values for pixels corresponding
// to the provided universe.
func (m *Mapping) UpdateUniverse(id uint, rgbData []color.RGBA) (err error) {
u := m.universes[id]
for idx, l := range u {
if idx >= len(rgbData) {
return fmt.Errorf("RGB values (%d) not long enough for universe %d (%+v)", len(rgbData), id, l)
}
m.physBuf[l.board][l.strand][l.pixel] = rgbData[idx]
}
return nil
}
// GetStrandData returns color data for a physical strand. The slice returned
// references the master buffer for the strand and so can be changed by further
// calls to UpdateUniverse. If the caller needs to retain the data, a copy
// should be made
// The strand in question is identified by the board and strand indices provided.
// Returns an empty slice and an error if an invalid strand is specified
func (m *Mapping) GetStrandData(board, strand uint) ([]color.RGBA, error) {
if int(board) >= len(m.physBuf) {
return nil, fmt.Errorf("%d is an invalid board index", board)
}
if int(strand) >= len(m.physBuf[board]) {
return nil, fmt.Errorf("%d is an invalid strand number for board %d",
strand, board)
}
return m.physBuf[board][strand], nil
} | vendor/github.com/TeamNorCal/animation/universe.go | 0.809088 | 0.58053 | universe.go | starcoder |
package plaid
import (
"encoding/json"
)
// SignalAddressData Data about the components comprising an address.
type SignalAddressData struct {
// The full city name
City *string `json:"city,omitempty"`
// The region or state Example: `\"NC\"`
Region NullableString `json:"region,omitempty"`
// The full street address Example: `\"564 Main Street, APT 15\"`
Street *string `json:"street,omitempty"`
// The postal code
PostalCode NullableString `json:"postal_code,omitempty"`
// The ISO 3166-1 alpha-2 country code
Country NullableString `json:"country,omitempty"`
AdditionalProperties map[string]interface{}
}
type _SignalAddressData SignalAddressData
// NewSignalAddressData instantiates a new SignalAddressData object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewSignalAddressData() *SignalAddressData {
this := SignalAddressData{}
return &this
}
// NewSignalAddressDataWithDefaults instantiates a new SignalAddressData object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewSignalAddressDataWithDefaults() *SignalAddressData {
this := SignalAddressData{}
return &this
}
// GetCity returns the City field value if set, zero value otherwise.
func (o *SignalAddressData) GetCity() string {
if o == nil || o.City == nil {
var ret string
return ret
}
return *o.City
}
// GetCityOk returns a tuple with the City field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *SignalAddressData) GetCityOk() (*string, bool) {
if o == nil || o.City == nil {
return nil, false
}
return o.City, true
}
// HasCity returns a boolean if a field has been set.
func (o *SignalAddressData) HasCity() bool {
if o != nil && o.City != nil {
return true
}
return false
}
// SetCity gets a reference to the given string and assigns it to the City field.
func (o *SignalAddressData) SetCity(v string) {
o.City = &v
}
// GetRegion returns the Region field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *SignalAddressData) GetRegion() string {
if o == nil || o.Region.Get() == nil {
var ret string
return ret
}
return *o.Region.Get()
}
// GetRegionOk returns a tuple with the Region field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *SignalAddressData) GetRegionOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.Region.Get(), o.Region.IsSet()
}
// HasRegion returns a boolean if a field has been set.
func (o *SignalAddressData) HasRegion() bool {
if o != nil && o.Region.IsSet() {
return true
}
return false
}
// SetRegion gets a reference to the given NullableString and assigns it to the Region field.
func (o *SignalAddressData) SetRegion(v string) {
o.Region.Set(&v)
}
// SetRegionNil sets the value for Region to be an explicit nil
func (o *SignalAddressData) SetRegionNil() {
o.Region.Set(nil)
}
// UnsetRegion ensures that no value is present for Region, not even an explicit nil
func (o *SignalAddressData) UnsetRegion() {
o.Region.Unset()
}
// GetStreet returns the Street field value if set, zero value otherwise.
func (o *SignalAddressData) GetStreet() string {
if o == nil || o.Street == nil {
var ret string
return ret
}
return *o.Street
}
// GetStreetOk returns a tuple with the Street field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *SignalAddressData) GetStreetOk() (*string, bool) {
if o == nil || o.Street == nil {
return nil, false
}
return o.Street, true
}
// HasStreet returns a boolean if a field has been set.
func (o *SignalAddressData) HasStreet() bool {
if o != nil && o.Street != nil {
return true
}
return false
}
// SetStreet gets a reference to the given string and assigns it to the Street field.
func (o *SignalAddressData) SetStreet(v string) {
o.Street = &v
}
// GetPostalCode returns the PostalCode field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *SignalAddressData) GetPostalCode() string {
if o == nil || o.PostalCode.Get() == nil {
var ret string
return ret
}
return *o.PostalCode.Get()
}
// GetPostalCodeOk returns a tuple with the PostalCode field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *SignalAddressData) GetPostalCodeOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.PostalCode.Get(), o.PostalCode.IsSet()
}
// HasPostalCode returns a boolean if a field has been set.
func (o *SignalAddressData) HasPostalCode() bool {
if o != nil && o.PostalCode.IsSet() {
return true
}
return false
}
// SetPostalCode gets a reference to the given NullableString and assigns it to the PostalCode field.
func (o *SignalAddressData) SetPostalCode(v string) {
o.PostalCode.Set(&v)
}
// SetPostalCodeNil sets the value for PostalCode to be an explicit nil
func (o *SignalAddressData) SetPostalCodeNil() {
o.PostalCode.Set(nil)
}
// UnsetPostalCode ensures that no value is present for PostalCode, not even an explicit nil
func (o *SignalAddressData) UnsetPostalCode() {
o.PostalCode.Unset()
}
// GetCountry returns the Country field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *SignalAddressData) GetCountry() string {
if o == nil || o.Country.Get() == nil {
var ret string
return ret
}
return *o.Country.Get()
}
// GetCountryOk returns a tuple with the Country field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *SignalAddressData) GetCountryOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.Country.Get(), o.Country.IsSet()
}
// HasCountry returns a boolean if a field has been set.
func (o *SignalAddressData) HasCountry() bool {
if o != nil && o.Country.IsSet() {
return true
}
return false
}
// SetCountry gets a reference to the given NullableString and assigns it to the Country field.
func (o *SignalAddressData) SetCountry(v string) {
o.Country.Set(&v)
}
// SetCountryNil sets the value for Country to be an explicit nil
func (o *SignalAddressData) SetCountryNil() {
o.Country.Set(nil)
}
// UnsetCountry ensures that no value is present for Country, not even an explicit nil
func (o *SignalAddressData) UnsetCountry() {
o.Country.Unset()
}
func (o SignalAddressData) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.City != nil {
toSerialize["city"] = o.City
}
if o.Region.IsSet() {
toSerialize["region"] = o.Region.Get()
}
if o.Street != nil {
toSerialize["street"] = o.Street
}
if o.PostalCode.IsSet() {
toSerialize["postal_code"] = o.PostalCode.Get()
}
if o.Country.IsSet() {
toSerialize["country"] = o.Country.Get()
}
for key, value := range o.AdditionalProperties {
toSerialize[key] = value
}
return json.Marshal(toSerialize)
}
func (o *SignalAddressData) UnmarshalJSON(bytes []byte) (err error) {
varSignalAddressData := _SignalAddressData{}
if err = json.Unmarshal(bytes, &varSignalAddressData); err == nil {
*o = SignalAddressData(varSignalAddressData)
}
additionalProperties := make(map[string]interface{})
if err = json.Unmarshal(bytes, &additionalProperties); err == nil {
delete(additionalProperties, "city")
delete(additionalProperties, "region")
delete(additionalProperties, "street")
delete(additionalProperties, "postal_code")
delete(additionalProperties, "country")
o.AdditionalProperties = additionalProperties
}
return err
}
type NullableSignalAddressData struct {
value *SignalAddressData
isSet bool
}
func (v NullableSignalAddressData) Get() *SignalAddressData {
return v.value
}
func (v *NullableSignalAddressData) Set(val *SignalAddressData) {
v.value = val
v.isSet = true
}
func (v NullableSignalAddressData) IsSet() bool {
return v.isSet
}
func (v *NullableSignalAddressData) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableSignalAddressData(val *SignalAddressData) *NullableSignalAddressData {
return &NullableSignalAddressData{value: val, isSet: true}
}
func (v NullableSignalAddressData) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableSignalAddressData) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | plaid/model_signal_address_data.go | 0.828384 | 0.421016 | model_signal_address_data.go | starcoder |
package plan
import (
"github.com/ngaut/log"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/types"
"github.com/pingcap/tipb/go-tipb"
)
// ExpressionsToPB converts expression to tipb.Expr.
func ExpressionsToPB(sc *variable.StatementContext, exprs []expression.Expression, client kv.Client) (pbExpr *tipb.Expr, pushed []expression.Expression, remained []expression.Expression) {
pc := pbConverter{client: client, sc: sc}
for _, expr := range exprs {
v := pc.exprToPB(expr)
if v == nil {
remained = append(remained, expr)
continue
}
pushed = append(pushed, expr)
if pbExpr == nil {
pbExpr = v
} else {
// merge multiple converted pb expression into an AND expression.
pbExpr = &tipb.Expr{
Tp: tipb.ExprType_And,
Children: []*tipb.Expr{pbExpr, v}}
}
}
return
}
type pbConverter struct {
client kv.Client
sc *variable.StatementContext
}
func (pc pbConverter) exprToPB(expr expression.Expression) *tipb.Expr {
switch x := expr.(type) {
case *expression.Constant:
return pc.datumToPBExpr(x.Value)
case *expression.Column:
return pc.columnToPBExpr(x)
case *expression.ScalarFunction:
return pc.scalarFuncToPBExpr(x)
}
return nil
}
func (pc pbConverter) datumToPBExpr(d types.Datum) *tipb.Expr {
var tp tipb.ExprType
var val []byte
switch d.Kind() {
case types.KindNull:
tp = tipb.ExprType_Null
case types.KindInt64:
tp = tipb.ExprType_Int64
val = codec.EncodeInt(nil, d.GetInt64())
case types.KindUint64:
tp = tipb.ExprType_Uint64
val = codec.EncodeUint(nil, d.GetUint64())
case types.KindString:
tp = tipb.ExprType_String
val = d.GetBytes()
case types.KindBytes:
tp = tipb.ExprType_Bytes
val = d.GetBytes()
case types.KindFloat32:
tp = tipb.ExprType_Float32
val = codec.EncodeFloat(nil, d.GetFloat64())
case types.KindFloat64:
tp = tipb.ExprType_Float64
val = codec.EncodeFloat(nil, d.GetFloat64())
case types.KindMysqlDuration:
tp = tipb.ExprType_MysqlDuration
val = codec.EncodeInt(nil, int64(d.GetMysqlDuration().Duration))
case types.KindMysqlDecimal:
tp = tipb.ExprType_MysqlDecimal
val = codec.EncodeDecimal(nil, d)
default:
return nil
}
if !pc.client.SupportRequestType(kv.ReqTypeSelect, int64(tp)) {
return nil
}
return &tipb.Expr{Tp: tp, Val: val}
}
func (pc pbConverter) columnToPBExpr(column *expression.Column) *tipb.Expr {
if !pc.client.SupportRequestType(kv.ReqTypeSelect, int64(tipb.ExprType_ColumnRef)) {
return nil
}
switch column.GetType().Tp {
case mysql.TypeBit, mysql.TypeSet, mysql.TypeEnum, mysql.TypeGeometry, mysql.TypeUnspecified:
return nil
}
id := column.ID
// Zero Column ID is not a column from table, can not support for now.
if id == 0 || id == -1 {
return nil
}
return &tipb.Expr{
Tp: tipb.ExprType_ColumnRef,
Val: codec.EncodeInt(nil, id)}
}
func (pc pbConverter) scalarFuncToPBExpr(expr *expression.ScalarFunction) *tipb.Expr {
switch expr.FuncName.L {
case ast.LT, ast.LE, ast.EQ, ast.NE, ast.GE, ast.GT,
ast.NullEQ, ast.In, ast.Like:
return pc.compareOpsToPBExpr(expr)
case ast.Plus, ast.Minus, ast.Mul, ast.Div, ast.Mod, ast.IntDiv:
return pc.arithmeticalOpsToPBExpr(expr)
case ast.AndAnd, ast.OrOr, ast.UnaryNot, ast.LogicXor:
return pc.logicalOpsToPBExpr(expr)
case ast.And, ast.Or, ast.BitNeg, ast.Xor, ast.LeftShift, ast.RightShift:
return pc.bitwiseFuncToPBExpr(expr)
case ast.Case, ast.Coalesce, ast.If, ast.Ifnull, ast.IsNull, ast.Nullif:
return pc.builtinFuncToPBExpr(expr)
default:
return nil
}
}
func (pc pbConverter) compareOpsToPBExpr(expr *expression.ScalarFunction) *tipb.Expr {
var tp tipb.ExprType
switch expr.FuncName.L {
case ast.LT:
tp = tipb.ExprType_LT
case ast.LE:
tp = tipb.ExprType_LE
case ast.EQ:
tp = tipb.ExprType_EQ
case ast.NE:
tp = tipb.ExprType_NE
case ast.GE:
tp = tipb.ExprType_GE
case ast.GT:
tp = tipb.ExprType_GT
case ast.NullEQ:
tp = tipb.ExprType_NullEQ
case ast.In:
return pc.inToPBExpr(expr)
case ast.Like:
return pc.likeToPBExpr(expr)
}
return pc.convertToPBExpr(expr, tp)
}
func (pc pbConverter) likeToPBExpr(expr *expression.ScalarFunction) *tipb.Expr {
if !pc.client.SupportRequestType(kv.ReqTypeSelect, int64(tipb.ExprType_Like)) {
return nil
}
// Only patterns like 'abc', '%abc', 'abc%', '%abc%' can be converted to *tipb.Expr for now.
escape := expr.GetArgs()[2].(*expression.Constant).Value
if escape.IsNull() || byte(escape.GetInt64()) != '\\' {
return nil
}
pattern, ok := expr.GetArgs()[1].(*expression.Constant)
if !ok || pattern.Value.Kind() != types.KindString {
return nil
}
for i, b := range pattern.Value.GetString() {
switch b {
case '\\', '_':
return nil
case '%':
if i != 0 && i != len(pattern.Value.GetString())-1 {
return nil
}
}
}
expr0 := pc.exprToPB(expr.GetArgs()[0])
if expr0 == nil {
return nil
}
expr1 := pc.exprToPB(expr.GetArgs()[1])
if expr1 == nil {
return nil
}
return &tipb.Expr{
Tp: tipb.ExprType_Like,
Children: []*tipb.Expr{expr0, expr1}}
}
func (pc pbConverter) arithmeticalOpsToPBExpr(expr *expression.ScalarFunction) *tipb.Expr {
var tp tipb.ExprType
switch expr.FuncName.L {
case ast.Plus:
tp = tipb.ExprType_Plus
case ast.Minus:
tp = tipb.ExprType_Minus
case ast.Mul:
tp = tipb.ExprType_Mul
case ast.Div:
tp = tipb.ExprType_Div
case ast.Mod:
tp = tipb.ExprType_Mod
case ast.IntDiv:
tp = tipb.ExprType_IntDiv
}
return pc.convertToPBExpr(expr, tp)
}
func (pc pbConverter) logicalOpsToPBExpr(expr *expression.ScalarFunction) *tipb.Expr {
var tp tipb.ExprType
switch expr.FuncName.L {
case ast.AndAnd:
tp = tipb.ExprType_And
case ast.OrOr:
tp = tipb.ExprType_Or
case ast.LogicXor:
tp = tipb.ExprType_Xor
case ast.UnaryNot:
tp = tipb.ExprType_Not
}
return pc.convertToPBExpr(expr, tp)
}
func (pc pbConverter) bitwiseFuncToPBExpr(expr *expression.ScalarFunction) *tipb.Expr {
var tp tipb.ExprType
switch expr.FuncName.L {
case ast.And:
tp = tipb.ExprType_BitAnd
case ast.Or:
tp = tipb.ExprType_BitOr
case ast.Xor:
tp = tipb.ExprType_BitXor
case ast.LeftShift:
tp = tipb.ExprType_LeftShift
case ast.RightShift:
tp = tipb.ExprType_RighShift
case ast.BitNeg:
tp = tipb.ExprType_BitNeg
}
return pc.convertToPBExpr(expr, tp)
}
func (pc pbConverter) inToPBExpr(expr *expression.ScalarFunction) *tipb.Expr {
if !pc.client.SupportRequestType(kv.ReqTypeSelect, int64(tipb.ExprType_In)) {
return nil
}
pbExpr := pc.exprToPB(expr.GetArgs()[0])
if pbExpr == nil {
return nil
}
listExpr := pc.constListToPB(expr.GetArgs()[1:])
if listExpr == nil {
return nil
}
return &tipb.Expr{
Tp: tipb.ExprType_In,
Children: []*tipb.Expr{pbExpr, listExpr}}
}
func (pc pbConverter) constListToPB(list []expression.Expression) *tipb.Expr {
if !pc.client.SupportRequestType(kv.ReqTypeSelect, int64(tipb.ExprType_ValueList)) {
return nil
}
// Only list of *expression.Constant can be push down.
datums := make([]types.Datum, 0, len(list))
for _, expr := range list {
v, ok := expr.(*expression.Constant)
if !ok {
return nil
}
d := pc.datumToPBExpr(v.Value)
if d == nil {
return nil
}
datums = append(datums, v.Value)
}
return pc.datumsToValueList(datums)
}
func (pc pbConverter) datumsToValueList(datums []types.Datum) *tipb.Expr {
// Don't push value list that has different datum kind.
prevKind := types.KindNull
for _, d := range datums {
if prevKind == types.KindNull {
prevKind = d.Kind()
}
if !d.IsNull() && d.Kind() != prevKind {
return nil
}
}
err := types.SortDatums(pc.sc, datums)
if err != nil {
log.Error(err.Error())
return nil
}
val, err := codec.EncodeValue(nil, datums...)
if err != nil {
log.Error(err.Error())
return nil
}
return &tipb.Expr{Tp: tipb.ExprType_ValueList, Val: val}
}
func groupByItemToPB(sc *variable.StatementContext, client kv.Client, expr expression.Expression) *tipb.ByItem {
pc := pbConverter{client: client, sc: sc}
e := pc.exprToPB(expr)
if e == nil {
return nil
}
return &tipb.ByItem{Expr: e}
}
func sortByItemToPB(sc *variable.StatementContext, client kv.Client, expr expression.Expression, desc bool) *tipb.ByItem {
pc := pbConverter{client: client, sc: sc}
e := pc.exprToPB(expr)
if e == nil {
return nil
}
return &tipb.ByItem{Expr: e, Desc: desc}
}
func aggFuncToPBExpr(sc *variable.StatementContext, client kv.Client, aggFunc expression.AggregationFunction) *tipb.Expr {
pc := pbConverter{client: client, sc: sc}
var tp tipb.ExprType
switch aggFunc.GetName() {
case ast.AggFuncCount:
tp = tipb.ExprType_Count
case ast.AggFuncFirstRow:
tp = tipb.ExprType_First
case ast.AggFuncGroupConcat:
tp = tipb.ExprType_GroupConcat
case ast.AggFuncMax:
tp = tipb.ExprType_Max
case ast.AggFuncMin:
tp = tipb.ExprType_Min
case ast.AggFuncSum:
tp = tipb.ExprType_Sum
case ast.AggFuncAvg:
tp = tipb.ExprType_Avg
}
if !client.SupportRequestType(kv.ReqTypeSelect, int64(tp)) {
return nil
}
children := make([]*tipb.Expr, 0, len(aggFunc.GetArgs()))
for _, arg := range aggFunc.GetArgs() {
pbArg := pc.exprToPB(arg)
if pbArg == nil {
return nil
}
children = append(children, pbArg)
}
return &tipb.Expr{Tp: tp, Children: children}
}
func (pc pbConverter) builtinFuncToPBExpr(expr *expression.ScalarFunction) *tipb.Expr {
switch expr.FuncName.L {
case ast.Case, ast.If, ast.Ifnull, ast.Nullif:
return pc.controlFuncsToPBExpr(expr)
case ast.Coalesce, ast.IsNull:
return pc.otherFuncsToPBExpr(expr)
default:
return nil
}
}
func (pc pbConverter) otherFuncsToPBExpr(expr *expression.ScalarFunction) *tipb.Expr {
var tp tipb.ExprType
switch expr.FuncName.L {
case ast.Coalesce:
tp = tipb.ExprType_Coalesce
case ast.IsNull:
tp = tipb.ExprType_IsNull
}
return pc.convertToPBExpr(expr, tp)
}
func (pc pbConverter) controlFuncsToPBExpr(expr *expression.ScalarFunction) *tipb.Expr {
var tp tipb.ExprType
switch expr.FuncName.L {
case ast.If:
tp = tipb.ExprType_If
case ast.Ifnull:
tp = tipb.ExprType_IfNull
case ast.Case:
tp = tipb.ExprType_Case
case ast.Nullif:
tp = tipb.ExprType_NullIf
}
return pc.convertToPBExpr(expr, tp)
}
func (pc pbConverter) convertToPBExpr(expr *expression.ScalarFunction, tp tipb.ExprType) *tipb.Expr {
if !pc.client.SupportRequestType(kv.ReqTypeSelect, int64(tp)) {
return nil
}
children := make([]*tipb.Expr, 0, len(expr.GetArgs()))
for _, arg := range expr.GetArgs() {
pbArg := pc.exprToPB(arg)
if pbArg == nil {
return nil
}
children = append(children, pbArg)
}
return &tipb.Expr{Tp: tp, Children: children}
} | plan/expr_to_pb.go | 0.506836 | 0.420957 | expr_to_pb.go | starcoder |
package bls12381
import (
"errors"
"math/big"
)
type fp2Temp struct {
t [4]*fe
}
type fp2 struct {
fp2Temp
}
func newFp2Temp() fp2Temp {
t := [4]*fe{}
for i := 0; i < len(t); i++ {
t[i] = &fe{}
}
return fp2Temp{t}
}
func newFp2() *fp2 {
t := newFp2Temp()
return &fp2{t}
}
func (e *fp2) fromBytes(in []byte) (*fe2, error) {
if len(in) != 96 {
return nil, errors.New("input string should be larger than 96 bytes")
}
c1, err := fromBytes(in[:48])
if err != nil {
return nil, err
}
c0, err := fromBytes(in[48:])
if err != nil {
return nil, err
}
return &fe2{*c0, *c1}, nil
}
func (e *fp2) toBytes(a *fe2) []byte {
out := make([]byte, 96)
copy(out[:48], toBytes(&a[1]))
copy(out[48:], toBytes(&a[0]))
return out
}
func (e *fp2) new() *fe2 {
return new(fe2).zero()
}
func (e *fp2) zero() *fe2 {
return new(fe2).zero()
}
func (e *fp2) one() *fe2 {
return new(fe2).one()
}
func (e *fp2) fromMont(c, a *fe2) {
fromMont(&c[0], &a[0])
fromMont(&c[1], &a[1])
}
func (e *fp2) add(c, a, b *fe2) {
add(&c[0], &a[0], &b[0])
add(&c[1], &a[1], &b[1])
}
func (e *fp2) addAssign(a, b *fe2) {
addAssign(&a[0], &b[0])
addAssign(&a[1], &b[1])
}
func (e *fp2) ladd(c, a, b *fe2) {
ladd(&c[0], &a[0], &b[0])
ladd(&c[1], &a[1], &b[1])
}
func (e *fp2) double(c, a *fe2) {
double(&c[0], &a[0])
double(&c[1], &a[1])
}
func (e *fp2) doubleAssign(a *fe2) {
doubleAssign(&a[0])
doubleAssign(&a[1])
}
func (e *fp2) ldouble(c, a *fe2) {
ldouble(&c[0], &a[0])
ldouble(&c[1], &a[1])
}
func (e *fp2) sub(c, a, b *fe2) {
sub(&c[0], &a[0], &b[0])
sub(&c[1], &a[1], &b[1])
}
func (e *fp2) subAssign(c, a *fe2) {
subAssign(&c[0], &a[0])
subAssign(&c[1], &a[1])
}
func (e *fp2) neg(c, a *fe2) {
neg(&c[0], &a[0])
neg(&c[1], &a[1])
}
func (e *fp2) conjugate(c, a *fe2) {
c[0].set(&a[0])
neg(&c[1], &a[1])
}
func (e *fp2) mul(c, a, b *fe2) {
t := e.t
mul(t[1], &a[0], &b[0])
mul(t[2], &a[1], &b[1])
add(t[0], &a[0], &a[1])
add(t[3], &b[0], &b[1])
sub(&c[0], t[1], t[2])
addAssign(t[1], t[2])
mul(t[0], t[0], t[3])
sub(&c[1], t[0], t[1])
}
func (e *fp2) mulAssign(a, b *fe2) {
t := e.t
mul(t[1], &a[0], &b[0])
mul(t[2], &a[1], &b[1])
add(t[0], &a[0], &a[1])
add(t[3], &b[0], &b[1])
sub(&a[0], t[1], t[2])
addAssign(t[1], t[2])
mul(t[0], t[0], t[3])
sub(&a[1], t[0], t[1])
}
func (e *fp2) square(c, a *fe2) {
t := e.t
ladd(t[0], &a[0], &a[1])
sub(t[1], &a[0], &a[1])
ldouble(t[2], &a[0])
mul(&c[0], t[0], t[1])
mul(&c[1], t[2], &a[1])
}
func (e *fp2) squareAssign(a *fe2) {
t := e.t
ladd(t[0], &a[0], &a[1])
sub(t[1], &a[0], &a[1])
ldouble(t[2], &a[0])
mul(&a[0], t[0], t[1])
mul(&a[1], t[2], &a[1])
}
func (e *fp2) mulByNonResidue(c, a *fe2) {
t := e.t
sub(t[0], &a[0], &a[1])
add(&c[1], &a[0], &a[1])
c[0].set(t[0])
}
func (e *fp2) mulByB(c, a *fe2) {
t := e.t
double(t[0], &a[0])
double(t[1], &a[1])
doubleAssign(t[0])
doubleAssign(t[1])
sub(&c[0], t[0], t[1])
add(&c[1], t[0], t[1])
}
func (e *fp2) inverse(c, a *fe2) {
t := e.t
square(t[0], &a[0])
square(t[1], &a[1])
addAssign(t[0], t[1])
inverse(t[0], t[0])
mul(&c[0], &a[0], t[0])
mul(t[0], t[0], &a[1])
neg(&c[1], t[0])
}
func (e *fp2) mulByFq(c, a *fe2, b *fe) {
mul(&c[0], &a[0], b)
mul(&c[1], &a[1], b)
}
func (e *fp2) exp(c, a *fe2, s *big.Int) {
z := e.one()
for i := s.BitLen() - 1; i >= 0; i-- {
e.square(z, z)
if s.Bit(i) == 1 {
e.mul(z, z, a)
}
}
c.set(z)
}
func (e *fp2) frobeniousMap(c, a *fe2, power uint) {
c[0].set(&a[0])
if power%2 == 1 {
neg(&c[1], &a[1])
return
}
c[1].set(&a[1])
}
func (e *fp2) frobeniousMapAssign(a *fe2, power uint) {
if power%2 == 1 {
neg(&a[1], &a[1])
return
}
}
func (e *fp2) sqrt(c, a *fe2) bool {
u, x0, a1, alpha := &fe2{}, &fe2{}, &fe2{}, &fe2{}
u.set(a)
e.exp(a1, a, pMinus3Over4)
e.square(alpha, a1)
e.mul(alpha, alpha, a)
e.mul(x0, a1, a)
if alpha.equal(negativeOne2) {
neg(&c[0], &x0[1])
c[1].set(&x0[0])
return true
}
e.add(alpha, alpha, e.one())
e.exp(alpha, alpha, pMinus1Over2)
e.mul(c, alpha, x0)
e.square(alpha, c)
return alpha.equal(u)
}
func (e *fp2) isQuadraticNonResidue(a *fe2) bool {
c0, c1 := new(fe), new(fe)
square(c0, &a[0])
square(c1, &a[1])
add(c1, c1, c0)
return isQuadraticNonResidue(c1)
} | fp2.go | 0.510008 | 0.455683 | fp2.go | starcoder |
package swamppack
import (
"bytes"
"encoding/binary"
"fmt"
"io"
raff "github.com/piot/raff-go/src"
)
// ConstantType represents the type of constant stored.
type ConstantType uint8
const (
ConstantTypeString ConstantType = iota
ConstantTypeResourceName
ConstantTypeInteger
ConstantTypeBoolean
ConstantTypeExternalFunc
ConstantTypeFunctionDeclaration
)
type TypeRef uint16
// Function has the name, signature and opcodes fot the function.
type Function struct {
name string
signature TypeRef
parameterCount uint
variableCount uint
constants []*Constant
opcodes []byte
}
// NewFunction creates a new function.
func NewFunction(name string, signature TypeRef, parameterCount uint, variableCount uint,
constants []*Constant, opcodes []byte) *Function {
for index, constant := range constants {
if constant == nil {
panic(fmt.Sprintf("you sent in bad constants at index %v %v", index, constants))
}
}
return &Function{
name: name, signature: signature, parameterCount: parameterCount,
variableCount: variableCount, constants: constants, opcodes: opcodes,
}
}
func (f *Function) String() string {
return fmt.Sprintf("[fun %s signature:%v parameter:%d varcount:%d constant count:%d", f.name, f.signature,
f.parameterCount, f.variableCount, len(f.constants))
}
// ExternalFunction represents a engine built in and external function (to the compiler).
type ExternalFunction struct {
name string
signature TypeRef
parameterCount uint
}
// NewExternalFunction creates a new external function constant.
func NewExternalFunction(name string, parameterCount uint) *ExternalFunction {
return &ExternalFunction{name: name, parameterCount: parameterCount}
}
func (f *ExternalFunction) String() string {
return fmt.Sprintf("[fun %s signature:%v parameter:%d", f.name, f.signature, f.parameterCount)
}
// FunctionDeclaration holds the function declaration header.
type FunctionDeclaration struct {
name string
signature TypeRef
parameterCount uint
}
// NewFunctionDeclaration creates a new function declaration.
func NewFunctionDeclaration(name string, signature TypeRef, parameterCount uint) *FunctionDeclaration {
return &FunctionDeclaration{name: name, signature: signature, parameterCount: parameterCount}
}
func (f *FunctionDeclaration) String() string {
return fmt.Sprintf("[fundeclaration %s signature:%v parameter:%d", f.name, f.signature, f.parameterCount)
}
type IndexPositionInFile = uint16
// Constant is a union of all the constant types.
type Constant struct {
v int32
boolean bool
indexPositionInFile IndexPositionInFile
constantType ConstantType
externalFunction *ExternalFunction
functionDeclaration *FunctionDeclaration
str string
}
func (c *Constant) String() string {
switch c.constantType {
case ConstantTypeBoolean:
return fmt.Sprintf("%v", c.boolean)
case ConstantTypeInteger:
return fmt.Sprintf("int: %v", c.v)
case ConstantTypeExternalFunc:
return fmt.Sprintf("externalfunc %v", c.externalFunction)
case ConstantTypeFunctionDeclaration:
return fmt.Sprintf("declarefunc %v", c.functionDeclaration)
case ConstantTypeString:
return fmt.Sprintf("'%v'", c.str)
case ConstantTypeResourceName:
return fmt.Sprintf("resource name '%v'", c.str)
}
panic(fmt.Errorf("unknown constant type %v", c.constantType))
}
// NewStringConstant creates a new string constant.
func NewStringConstant(str string) *Constant {
return &Constant{constantType: ConstantTypeString, str: str}
}
// NewResourceNameConstant creates a new string constant.
func NewResourceNameConstant(str string) *Constant {
return &Constant{constantType: ConstantTypeResourceName, str: str}
}
// NewIntegerConstant creates a new integer constant.
func NewIntegerConstant(v int32) *Constant {
return &Constant{constantType: ConstantTypeInteger, v: v}
}
// NewBooleanConstant creates a new boolean constant.
func NewBooleanConstant(b bool) *Constant {
return &Constant{constantType: ConstantTypeBoolean, boolean: b}
}
// NewExternalFuncConstant creates a new external function constant reference.
func NewExternalFuncConstant(externalFunction *ExternalFunction) *Constant {
return &Constant{constantType: ConstantTypeExternalFunc, externalFunction: externalFunction}
}
// NewFunctionDeclarationConstant creates a new function declaration constant.
func NewFunctionDeclarationConstant(functionDeclaration *FunctionDeclaration) *Constant {
return &Constant{constantType: ConstantTypeFunctionDeclaration, functionDeclaration: functionDeclaration}
}
type FunctionRefIndex uint16
// ConstantRepo contains all the constants.
type ConstantRepo struct {
stringConstants []*Constant
resourceNameConstants []*Constant
integerConstants []*Constant
functions []*Function
externalFuncConstants []*Constant
functionDeclarationConstants []*Constant
booleanConstants []*Constant
}
// NewConstantRepo creates a new constant repo.
func NewConstantRepo() *ConstantRepo {
return &ConstantRepo{}
}
func (s *ConstantRepo) String() string {
return fmt.Sprintf("%v\n%v\n%v\n%v\n", s.stringConstants, s.integerConstants, s.functions, s.booleanConstants)
}
func (s *ConstantRepo) findString(str string) *Constant {
for _, stringConstant := range s.stringConstants {
if stringConstant.str == str {
return stringConstant
}
}
return nil
}
func (s *ConstantRepo) AddString(str string) *Constant {
foundConstant := s.findString(str)
if foundConstant == nil {
foundConstant = NewStringConstant(str)
s.stringConstants = append(s.stringConstants, foundConstant)
}
return foundConstant
}
func (s *ConstantRepo) findResourceName(str string) *Constant {
for _, resourceNameConstant := range s.resourceNameConstants {
if resourceNameConstant.str == str {
return resourceNameConstant
}
}
return nil
}
func (s *ConstantRepo) AddResourceName(str string) *Constant {
foundConstant := s.findResourceName(str)
if foundConstant == nil {
foundConstant = NewResourceNameConstant(str)
s.resourceNameConstants = append(s.resourceNameConstants, foundConstant)
}
return foundConstant
}
func (s *ConstantRepo) findInteger(v int32) *Constant {
for _, integerConstant := range s.integerConstants {
if integerConstant.v == v {
return integerConstant
}
}
return nil
}
func (s *ConstantRepo) AddInteger(v int32) *Constant {
foundConstant := s.findInteger(v)
if foundConstant == nil {
foundConstant = NewIntegerConstant(v)
s.integerConstants = append(s.integerConstants, foundConstant)
}
return foundConstant
}
func (s *ConstantRepo) findBoolean(b bool) *Constant {
for _, booleanConstant := range s.booleanConstants {
if booleanConstant.boolean == b {
return booleanConstant
}
}
return nil
}
func (s *ConstantRepo) AddBoolean(b bool) *Constant {
foundConstant := s.findBoolean(b)
if foundConstant == nil {
foundConstant = NewBooleanConstant(b)
s.booleanConstants = append(s.booleanConstants, foundConstant)
}
return foundConstant
}
func (s *ConstantRepo) AddFunctionReference(uniqueFullyQualifiedName string) (*Constant, error) {
foundConstant := s.FindFunctionDeclaration(uniqueFullyQualifiedName)
if foundConstant == nil {
return nil, fmt.Errorf("couldn't find a previous declaration for '%v' and that is required", uniqueFullyQualifiedName)
}
return foundConstant, nil
}
func (s *ConstantRepo) AddExternalFunctionReference(uniqueFullyQualifiedName string) (*Constant, error) {
foundConstant := s.FindExternalFunction(uniqueFullyQualifiedName)
if foundConstant == nil {
return nil, fmt.Errorf("couldn't find a previous external function declaration for '%v' and that is required", uniqueFullyQualifiedName)
}
return foundConstant, nil
}
func (s *ConstantRepo) FindExternalFunction(name string) *Constant {
for _, externalFuncConstant := range s.externalFuncConstants {
if externalFuncConstant.externalFunction.name == name {
return externalFuncConstant
}
}
return nil
}
func (s *ConstantRepo) FindFunctionDeclaration(name string) *Constant {
for _, functionDeclarationConstant := range s.functionDeclarationConstants {
if functionDeclarationConstant.functionDeclaration.name == name {
return functionDeclarationConstant
}
}
return nil
}
func (s *ConstantRepo) FindFunctionDeclarationByIndex(index FunctionRefIndex) *Constant {
return s.functionDeclarationConstants[index]
}
func (s *ConstantRepo) AddFunction(name string, signature TypeRef, parameterCount uint, variableCount uint,
constants []*Constant, opcodes []byte) *Function {
f := NewFunction(name, signature, parameterCount, variableCount, constants, opcodes)
s.functions = append(s.functions, f)
return f
}
func (s *ConstantRepo) AddExternalFunction(name string, parameterCount uint) *Constant {
foundExternalFuncConst := s.FindExternalFunction(name)
if foundExternalFuncConst == nil {
f := NewExternalFunction(name, parameterCount)
foundExternalFuncConst = NewExternalFuncConstant(f)
s.externalFuncConstants = append(s.externalFuncConstants, foundExternalFuncConst)
}
return foundExternalFuncConst
}
func (s *ConstantRepo) AddFunctionDeclaration(name string, signature TypeRef, parameterCount uint) *Constant {
foundFunctionDeclarationConst := s.FindFunctionDeclaration(name)
if foundFunctionDeclarationConst == nil {
f := NewFunctionDeclaration(name, signature, parameterCount)
foundFunctionDeclarationConst = NewFunctionDeclarationConstant(f)
s.functionDeclarationConstants = append(s.functionDeclarationConstants, foundFunctionDeclarationConst)
}
return foundFunctionDeclarationConst
}
func writeBools(booleanConstants []*Constant, writer io.Writer, indexOffset int) (int, error) {
count := len(booleanConstants)
header := []byte{byte(count)}
booleanIcon := raff.FourOctets{0xF0, 0x9F, 0x90, 0x9C}
if err := raff.WriteInternalChunkMarker(writer, booleanIcon); err != nil {
return -1, err
}
if _, writeErr := writer.Write(header); writeErr != nil {
return 0, writeErr
}
octets := make([]byte, count)
for index, b := range booleanConstants {
if b.constantType != ConstantTypeBoolean {
panic("wrong boolean type")
}
valueToWrite := uint8(0)
if b.boolean {
valueToWrite = uint8(1)
}
b.indexPositionInFile = IndexPositionInFile(indexOffset + index)
octets[index] = valueToWrite
}
if _, err := writer.Write(octets); err != nil {
return 0, err
}
return count, nil
}
func writeIntegers(integerConstants []*Constant, writer io.Writer, indexOffset int) (int, error) {
count := len(integerConstants)
header := []byte{byte(count)}
integerIcon := raff.FourOctets{0xF0, 0x9F, 0x94, 0xA2}
if err := raff.WriteInternalChunkMarker(writer, integerIcon); err != nil {
return -1, err
}
if _, writeErr := writer.Write(header); writeErr != nil {
panic(writeErr)
}
for index, constant := range integerConstants {
if constant.constantType != ConstantTypeInteger {
panic("wrong integer type")
}
constant.indexPositionInFile = IndexPositionInFile(indexOffset + index)
if writeErr := binary.Write(writer, binary.BigEndian, constant.v); writeErr != nil {
return 0, writeErr
}
}
return count, nil
}
func writeString(str string, writer io.Writer) error {
stringOctets := []byte(str)
if _, writeErr := writer.Write([]byte{byte(len(str))}); writeErr != nil {
return writeErr
}
if _, writeErr := writer.Write(stringOctets); writeErr != nil {
return writeErr
}
return nil
}
func writeTypeRef(typeRef TypeRef, writer io.Writer) error {
buf := []byte{byte(0), byte(0)}
binary.BigEndian.PutUint16(buf, uint16(typeRef))
_, err := writer.Write(buf)
return err
}
func writeStrings(stringConstants []*Constant, writer io.Writer, indexOffset int) (int, error) {
count := len(stringConstants)
header := []byte{byte(count)}
stringsIcon := raff.FourOctets{0xF0, 0x9F, 0x8E, 0xBB}
if err := raff.WriteInternalChunkMarker(writer, stringsIcon); err != nil {
return -1, err
}
if _, writeErr := writer.Write(header); writeErr != nil {
return 0, writeErr
}
for index, constant := range stringConstants {
if constant.constantType != ConstantTypeString {
panic("wrong string type")
}
constant.indexPositionInFile = IndexPositionInFile(indexOffset + index)
if writeErr := writeString(constant.str, writer); writeErr != nil {
return 0, writeErr
}
}
return count, nil
}
func writeResourceNames(resourceNameConstants []*Constant, writer io.Writer, indexOffset int) (int, error) {
count := len(resourceNameConstants)
header := []byte{byte(count)}
resourceNameIcon := raff.FourOctets{0xF0, 0x9F, 0x8C, 0xB3}
if err := raff.WriteInternalChunkMarker(writer, resourceNameIcon); err != nil {
return -1, err
}
if _, writeErr := writer.Write(header); writeErr != nil {
return 0, writeErr
}
for index, constant := range resourceNameConstants {
if constant.constantType != ConstantTypeResourceName {
panic("wrong resourceType type")
}
constant.indexPositionInFile = IndexPositionInFile(indexOffset + index)
if writeErr := writeString(constant.str, writer); writeErr != nil {
return 0, writeErr
}
}
return count, nil
}
func writeExternalFunctions(externalFuncConstants []*Constant, writer io.Writer, indexOffset int) (int, error) {
count := len(externalFuncConstants)
header := []byte{byte(count)}
externalFunctionIcons := raff.FourOctets{0xF0, 0x9F, 0x91, 0xBE}
if err := raff.WriteInternalChunkMarker(writer, externalFunctionIcons); err != nil {
return -1, err
}
if _, writeErr := writer.Write(header); writeErr != nil {
return 0, writeErr
}
for index, constant := range externalFuncConstants {
if constant.constantType != ConstantTypeExternalFunc {
panic("external_func: wrong func type")
}
constant.indexPositionInFile = IndexPositionInFile(indexOffset + index)
f := constant.externalFunction
header := []byte{byte(f.parameterCount)}
if _, writeErr := writer.Write(header); writeErr != nil {
return 0, writeErr
}
if writeErr := writeString(f.name, writer); writeErr != nil {
return 0, writeErr
}
if writeErr := writeTypeRef(f.signature, writer); writeErr != nil {
return 0, writeErr
}
}
return count, nil
}
func writeFunctionDeclarations(externalFuncConstants []*Constant, writer io.Writer, indexOffset int) (int, error) {
count := len(externalFuncConstants)
header := []byte{0, 0, 0, 0}
functionDeclarationIcon := raff.FourOctets{0xF0, 0x9F, 0x9B, 0x82}
if err := raff.WriteInternalChunkMarker(writer, functionDeclarationIcon); err != nil {
return -1, err
}
binary.BigEndian.PutUint32(header[0:], uint32(count))
if _, writeErr := writer.Write(header); writeErr != nil {
return 0, writeErr
}
for index, constant := range externalFuncConstants {
if constant.constantType != ConstantTypeFunctionDeclaration {
panic("external_func: wrong func type")
}
constant.indexPositionInFile = IndexPositionInFile(indexOffset + index)
f := constant.functionDeclaration
header := []byte{byte(f.parameterCount)}
if _, writeErr := writer.Write(header); writeErr != nil {
return 0, writeErr
}
if writeErr := writeString(f.name, writer); writeErr != nil {
return 0, writeErr
}
if writeErr := writeTypeRef(f.signature, writer); writeErr != nil {
return 0, writeErr
}
}
return count, nil
}
func writeFunctions(functions []*Function, writer io.Writer) (int, error) {
count := len(functions)
header := []byte{0, 0, 0, 0}
binary.BigEndian.PutUint32(header[0:], uint32(count))
functionIcon := raff.FourOctets{0xF0, 0x9F, 0x90, 0x8A}
if err := raff.WriteInternalChunkMarker(writer, functionIcon); err != nil {
return -1, err
}
if _, writeErr := writer.Write(header); writeErr != nil {
return 0, writeErr
}
for _, f := range functions {
header := []byte{
byte(f.parameterCount), byte(f.variableCount), byte(0),
byte(len(f.constants)),
} // was temp count
if _, writeErr := writer.Write(header); writeErr != nil {
return 0, writeErr
}
for _, subConstant := range f.constants {
if subConstant == nil {
panic(fmt.Sprintf("how can subconstant be nil? %v", f.constants))
}
indexInFile := subConstant.indexPositionInFile
const NotSetPosition IndexPositionInFile = 0xffff
if indexInFile == NotSetPosition {
panic(fmt.Errorf("wrong index for constant %v in function %v", subConstant, f))
}
constantIndexBigEndian := []byte{0, 0}
binary.BigEndian.PutUint16(constantIndexBigEndian, indexInFile)
if _, writeErr := writer.Write(constantIndexBigEndian); writeErr != nil {
return 0, writeErr
}
}
opcodeCountHeader := []byte{0, 0}
binary.BigEndian.PutUint16(opcodeCountHeader, uint16(len(f.opcodes)))
if _, writeErr := writer.Write(opcodeCountHeader); writeErr != nil {
return 0, writeErr
}
if _, writeErr := writer.Write(f.opcodes); writeErr != nil {
return 0, writeErr
}
}
return count, nil
}
type Version struct {
Major uint8
Minor uint8
Patch uint8
}
func writeChunkHeader(writer io.Writer, icon raff.FourOctets, name raff.FourOctets, payload []byte) error {
if err := raff.WriteChunk(writer, icon, name, payload); err != nil {
return err
}
return nil
}
func writePackHeader(writer io.Writer) error {
name := raff.FourOctets{'s', 'p', 'k', '4'}
packetIcon := raff.FourOctets{0xF0, 0x9F, 0x93, 0xA6}
return writeChunkHeader(writer, packetIcon, name, nil)
}
func writeVersion(writer io.Writer, version Version) error {
header := []byte{version.Major, version.Minor, version.Patch}
if _, writeErr := writer.Write(header); writeErr != nil {
return writeErr
}
return nil
}
func writeTypeInfo(writer io.Writer, payload []byte) error {
name := raff.FourOctets{'s', 't', 'i', '0'}
packetIcon := raff.FourOctets{0xF0, 0x9F, 0x93, 0x9C}
return writeChunkHeader(writer, packetIcon, name, payload)
}
func writeCodeChunk(writer io.Writer, payload []byte) error {
name := raff.FourOctets{'s', 'c', 'd', '0'}
packetIcon := raff.FourOctets{0xF0, 0x9F, 0x92, 0xBB}
return writeChunkHeader(writer, packetIcon, name, payload)
}
func packCode(constants *ConstantRepo) ([]byte, error) {
var err error
var buf bytes.Buffer
indexOffset := 0
offset := 0
offset, err = writeExternalFunctions(constants.externalFuncConstants, &buf, indexOffset)
if err != nil {
return nil, err
}
indexOffset += offset
offset, err = writeFunctionDeclarations(constants.functionDeclarationConstants, &buf, indexOffset)
if err != nil {
return nil, err
}
indexOffset += offset
offset, err = writeBools(constants.booleanConstants, &buf, indexOffset)
if err != nil {
return nil, err
}
indexOffset += offset
offset, err = writeIntegers(constants.integerConstants, &buf, indexOffset)
if err != nil {
return nil, err
}
indexOffset += offset
offset, err = writeStrings(constants.stringConstants, &buf, indexOffset)
if err != nil {
return nil, err
}
indexOffset += offset
if _, err := writeResourceNames(constants.resourceNameConstants, &buf, indexOffset); err != nil {
return nil, err
}
if _, err := writeFunctions(constants.functions, &buf); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func writeCode(writer io.Writer, constants *ConstantRepo) error {
payload, codeErr := packCode(constants)
if codeErr != nil {
return codeErr
}
return writeCodeChunk(writer, payload)
}
// Pack writes all constants to a .swamp-pack file.
func Pack(constants *ConstantRepo, typeInfo []byte) ([]byte, error) {
var buf bytes.Buffer
if err := raff.WriteHeader(&buf); err != nil {
return nil, err
}
if writeErr := writePackHeader(&buf); writeErr != nil {
return nil, writeErr
}
if writeErr := writeTypeInfo(&buf, typeInfo); writeErr != nil {
return nil, writeErr
}
if writeErr := writeCode(&buf, constants); writeErr != nil {
return nil, writeErr
}
return buf.Bytes(), nil
} | lib/pack.go | 0.760384 | 0.403508 | pack.go | starcoder |
package entry
import (
"github.com/iancoleman/orderedmap"
)
// OrderedMap is an ordered map of entries.
type OrderedMap struct {
orderedMap *orderedmap.OrderedMap
}
// NewOrderedMap creates a new OrderedMap of entries.
func NewOrderedMap() *OrderedMap {
return &OrderedMap{
orderedMap: orderedmap.New(),
}
}
// NewOrderedMapFromEntries creates a new OrderedMap of entries from a slice.
func NewOrderedMapFromEntries(entries []*Entry) *OrderedMap {
orderedMap := NewOrderedMap()
for _, e := range entries {
if e == nil {
continue
}
orderedMap.Set(e.Hash.String(), e)
}
return orderedMap
}
// Merge will fusion two OrderedMap of entries.
func (o *OrderedMap) Merge(other *OrderedMap) *OrderedMap {
newMap := o.Copy()
otherKeys := other.Keys()
for _, k := range otherKeys {
val, _ := other.Get(k)
newMap.Set(k, val)
}
return newMap
}
// Copy creates a copy of an OrderedMap.
func (o *OrderedMap) Copy() *OrderedMap {
newMap := NewOrderedMap()
keys := o.Keys()
for _, k := range keys {
val, _ := o.Get(k)
newMap.Set(k, val)
}
return newMap
}
// Get retrieves an Entry using its key.
func (o *OrderedMap) Get(key string) (*Entry, bool) {
val, exists := o.orderedMap.Get(key)
entry, ok := val.(*Entry)
if !ok {
exists = false
}
return entry, exists
}
// UnsafeGet retrieves an Entry using its key, returns nil if not found.
func (o *OrderedMap) UnsafeGet(key string) *Entry {
val, _ := o.Get(key)
return val
}
// Set defines an Entry in the map for a given key.
func (o *OrderedMap) Set(key string, value *Entry) {
o.orderedMap.Set(key, value)
}
// Slice returns an ordered slice of the values existing in the map.
func (o *OrderedMap) Slice() []*Entry {
out := []*Entry{}
keys := o.orderedMap.Keys()
for _, k := range keys {
out = append(out, o.UnsafeGet(k))
}
return out
}
// Delete removes an Entry from the map for a given key.
func (o *OrderedMap) Delete(key string) {
o.orderedMap.Delete(key)
}
// Keys retrieves the ordered list of keys in the map.
func (o *OrderedMap) Keys() []string {
return o.orderedMap.Keys()
}
// SortKeys orders the map keys using your sort func.
func (o *OrderedMap) SortKeys(sortFunc func(keys []string)) {
o.orderedMap.SortKeys(sortFunc)
}
// Sort orders the map using your sort func.
func (o *OrderedMap) Sort(lessFunc func(a *orderedmap.Pair, b *orderedmap.Pair) bool) {
o.orderedMap.Sort(lessFunc)
}
// Len gets the length of the map.
func (o *OrderedMap) Len() int {
return len(o.orderedMap.Keys())
}
// At gets an item at the given index in the map, returns nil if not found.
func (o *OrderedMap) At(index uint) *Entry {
keys := o.Keys()
if uint(len(keys)) < index {
return nil
}
return o.UnsafeGet(keys[index])
} | entry/entry_map.go | 0.844505 | 0.474509 | entry_map.go | starcoder |
package xidenticon
import (
"crypto/md5"
"encoding/hex"
"fmt"
"image"
"image/color"
"image/draw"
)
const (
tilesPerDimension = 5
)
var (
defaultBackgroundColor = RGB(240, 240, 240)
defaultImageSize = 100
)
// Options control some inner mechanics
type Options struct {
BackgroundColor color.NRGBA
Debug bool
ImageSize int
}
// Identicon defines an identicon
type Identicon struct {
Color color.Color
Hash []byte
ID string
Options *Options
Tiles [][]bool
// Pix holds the image's pixels, in R, G, B, A order. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*4].
Pix []uint8
// Stride is the Pix stride (in bytes) between vertically adjacent pixels.
Stride int
// Rect is the image's bounds.
Rect image.Rectangle
}
// New returns a new identicon based on given ID string
func New(ID string, opts *Options) (*Identicon, error) {
if opts == nil {
opts = &Options{
BackgroundColor: defaultBackgroundColor,
}
}
if opts.ImageSize <= 0 {
opts.ImageSize = defaultImageSize
}
rect := image.Rectangle{
Min: image.Point{0, 0},
Max: image.Point{opts.ImageSize, opts.ImageSize},
}
w, h := rect.Dx(), rect.Dy()
buf := make([]uint8, 4*w*h)
ic := &Identicon{
ID: ID,
Hash: MD5(ID),
Options: opts,
Pix: buf,
Rect: rect,
Stride: 4 * w,
}
ic.generateImage()
return ic, nil
}
// ColorModel returns the Image's color model.
func (ic *Identicon) ColorModel() color.Model {
return color.NRGBAModel
}
// Bounds returns the domain for which At can return non-zero color.
func (ic *Identicon) Bounds() image.Rectangle {
return ic.Rect
}
// At returns the color of the pixel at (x, y).
// At(Bounds().Min.X, Bounds().Min.Y) returns the upper-left pixel of the grid.
// At(Bounds().Max.X-1, Bounds().Max.Y-1) returns the lower-right one.
func (ic *Identicon) At(x, y int) color.Color {
return ic.NRGBAAt(x, y)
}
// NRGBAAt returns the color of the pixel at (x, y) as color.NRGBA.
func (ic *Identicon) NRGBAAt(x, y int) color.NRGBA {
if !(image.Point{x, y}.In(ic.Rect)) {
return color.NRGBA{}
}
i := ic.PixOffset(x, y)
return color.NRGBA{ic.Pix[i+0], ic.Pix[i+1], ic.Pix[i+2], ic.Pix[i+3]}
}
// PixOffset returns the index of the first element of Pix that corresponds to
// the pixel at (x, y).
func (ic *Identicon) PixOffset(x, y int) int {
return (y-ic.Rect.Min.Y)*ic.Stride + (x-ic.Rect.Min.X)*4
}
// Set stores given color at position (x, y).
func (ic *Identicon) Set(x, y int, c color.Color) {
if !(image.Point{x, y}.In(ic.Rect)) {
return
}
i := ic.PixOffset(x, y)
c1 := ic.ColorModel().Convert(c).(color.NRGBA)
ic.Pix[i+0] = c1.R
ic.Pix[i+1] = c1.G
ic.Pix[i+2] = c1.B
ic.Pix[i+3] = c1.A
}
// HashString returns hash value as string
func (ic *Identicon) HashString() string {
return hex.EncodeToString(ic.Hash)
}
// generateImage generates image.Image representation of the identicon
func (ic *Identicon) generateImage() {
ic.populateTiles()
ic.defineColor()
if ic.Options.Debug {
ic.debugPrintTiles()
}
// Background fill
draw.Draw(ic, ic.Bounds(), &image.Uniform{ic.Options.BackgroundColor}, image.ZP, draw.Src)
// Iterate tiles and draw
for xTile := 0; xTile < tilesPerDimension; xTile++ {
for yTile := 0; yTile < tilesPerDimension; yTile++ {
if ic.Tiles[xTile][yTile] {
ic.drawTile(xTile, yTile)
}
}
}
}
func (ic *Identicon) drawTile(xTile, yTile int) {
xStart := (xTile * (ic.Options.ImageSize / tilesPerDimension))
xEnd := xStart + (ic.Options.ImageSize / tilesPerDimension)
yStart := (yTile * (ic.Options.ImageSize / tilesPerDimension))
yEnd := yStart + (ic.Options.ImageSize / tilesPerDimension)
// fmt.Println("x", xStart, xEnd)
// fmt.Println("y", yStart, yEnd)
bounds := image.Rect(xStart, yStart, xEnd, yEnd)
// @todo possibly faster to just iterate pixels and use ic.Set(), benchmark
draw.Draw(ic, bounds, &image.Uniform{ic.Color}, image.ZP, draw.Src)
}
func (ic *Identicon) populateTiles() {
tiles := make([][]bool, tilesPerDimension)
for i := range tiles {
tiles[i] = make([]bool, tilesPerDimension)
}
ic.Tiles = tiles
// Per image, we have 5x5 tiles available.
// First 15 bytes of hash define tiles:
// - first 10 are the two leftmost cols and get mirrored to the rightmost cols
// - next 5 for the middle col
// Last 3 bytes for the pixel color RGB values
// Left
var i int8
for i = 0; i < 10; i++ {
ic.setTileValue(i, ic.Hash[i])
}
// Middle
for i = 10; i < 15; i++ {
ic.setTileValue(i, ic.Hash[i])
}
// Mirror to right
ic.mirror()
}
func (ic *Identicon) setTileValue(pos int8, b byte) {
lever := (int(b) & 2) > 0
x, y := posToXY(pos)
ic.Tiles[x][y] = lever
}
func (ic *Identicon) mirror() {
for x := 0; x <= 1; x++ {
xi := tilesPerDimension - 1 - x // mirror offset for cols
for y := 0; y < tilesPerDimension; y++ {
ic.Tiles[xi][y] = ic.Tiles[x][y]
if ic.Options.Debug {
fmt.Printf("Mirroring %d:%d to %d:%d (%v)\n", x, y, xi, y, ic.Tiles[x][y])
}
}
}
}
func (ic *Identicon) defineColor() {
// Last 3 bytes of hash are the RGB values
// @todo too random? custom palette?
ic.Color = color.NRGBA{
R: uint8(ic.Hash[15]),
G: uint8(ic.Hash[14]),
B: uint8(ic.Hash[13]),
A: uint8(255),
}
}
// debugPrintTiles prints the tiles at positions x,y
func (ic *Identicon) debugPrintTiles() {
for x := range ic.Tiles {
for y, v := range ic.Tiles[x] {
fmt.Printf("Tile %d:%d = %v\n", x, y, v)
}
}
}
// MD5 returns MD5 hash of given input string as byte slice
func MD5(text string) []byte {
hasher := md5.New()
hasher.Write([]byte(text))
hasher.Size()
return hasher.Sum(nil)
}
// RGB returns color.NRGBA struct for given red, green and blue values
func RGB(r, g, b uint8) color.NRGBA {
return color.NRGBA{R: r, G: g, B: b, A: 255}
}
func posToXY(pos int8) (x, y int) {
// The two leftmost cols
if pos < 10 {
if pos%2 != 0 {
x = 1
}
y = int(float32(pos) / 2.0)
} else {
// Middle col
x = 2
y = int(float32(pos) / 3.0)
}
return
} | xidenticon/identicon.go | 0.788868 | 0.456289 | identicon.go | starcoder |
package encoding
import (
"bytes"
"encoding/binary"
"io"
"github.com/lindb/lindb/pkg/stream"
)
// FixedOffsetEncoder represents the offset encoder with fixed length
type FixedOffsetEncoder struct {
values []uint32
buf *bytes.Buffer
max uint32
bw *stream.BufferWriter
}
// NewFixedOffsetEncoder creates the fixed length offset encoder
func NewFixedOffsetEncoder() *FixedOffsetEncoder {
var buf bytes.Buffer
bw := stream.NewBufferWriter(&buf)
return &FixedOffsetEncoder{
buf: &buf,
bw: bw,
}
}
// IsEmpty returns if is empty
func (e *FixedOffsetEncoder) IsEmpty() bool {
return len(e.values) == 0
}
// Size returns the size
func (e *FixedOffsetEncoder) Size() int {
return len(e.values)
}
// Reset resets the encoder context for reuse
func (e *FixedOffsetEncoder) Reset() {
e.bw.Reset()
e.max = 0
e.values = e.values[:0]
}
// Add adds the offset value,
func (e *FixedOffsetEncoder) Add(v uint32) {
e.values = append(e.values, v)
if e.max < v {
e.max = v
}
}
// FromValues resets the encoder, then init it with multi values.
func (e *FixedOffsetEncoder) FromValues(values []uint32) {
e.Reset()
e.values = values
for _, value := range values {
if e.max < value {
e.max = value
}
}
}
// MarshalBinary marshals the values to binary
func (e *FixedOffsetEncoder) MarshalBinary() []byte {
_ = e.WriteTo(e.buf)
return e.buf.Bytes()
}
// WriteTo writes the data to the writer.
func (e *FixedOffsetEncoder) WriteTo(writer io.Writer) error {
if len(e.values) == 0 {
return nil
}
width := Uint32MinWidth(e.max)
// fixed value width
e.bw.PutByte(byte(width))
// put all values with fixed length
buf := make([]byte, 4)
for _, value := range e.values {
binary.LittleEndian.PutUint32(buf, value)
if _, err := writer.Write(buf[:width]); err != nil {
return err
}
}
return nil
}
// FixedOffsetDecoder represents the fixed offset decoder, supports random reads offset by index
type FixedOffsetDecoder struct {
buf []byte
width int
scratch []byte
}
// NewFixedOffsetDecoder creates the fixed offset decoder
func NewFixedOffsetDecoder(buf []byte) *FixedOffsetDecoder {
if len(buf) == 0 {
return &FixedOffsetDecoder{
buf: nil,
}
}
return &FixedOffsetDecoder{
buf: buf[1:],
width: int(buf[0]),
scratch: make([]byte, 4),
}
}
// ValueWidth returns the width of all stored values
func (d *FixedOffsetDecoder) ValueWidth() int {
return d.width
}
// Size returns the size of offset values
func (d *FixedOffsetDecoder) Size() int {
if d.width == 0 {
return 0
}
return len(d.buf) / d.width
}
// Get gets the offset value by index
func (d *FixedOffsetDecoder) Get(index int) (uint32, bool) {
start := index * d.width
if start < 0 || len(d.buf) == 0 || start >= len(d.buf) || d.width > 4 {
return 0, false
}
end := start + d.width
if end > len(d.buf) {
return 0, false
}
copy(d.scratch, d.buf[start:end])
return binary.LittleEndian.Uint32(d.scratch), true
}
func ByteSlice2Uint32(slice []byte) uint32 {
var buf = make([]byte, 4)
copy(buf, slice)
return binary.LittleEndian.Uint32(buf)
} | pkg/encoding/fixed_offset.go | 0.790085 | 0.425367 | fixed_offset.go | starcoder |
package engine
import (
"github.com/yuin/gopher-lua"
"github.com/Member1221/raylib-go/raylib"
"fmt"
)
type Vector2 = raylib.Vector2
type Camera struct {
iCam raylib.Camera2D
Position Vector2
Origin Vector2
Rotation float32
Zoom float32
}
func RegisterCameraType(state *lua.LState) {
fmt.Println("[Polyplex:raylib]", "Register type: Camera2D...")
mt := state.NewTypeMetatable("camera2d")
state.SetGlobal("camera2d", mt)
state.SetGlobal("Camera2D", state.NewFunction(NewCamera))
state.SetField(mt, "new", state.NewFunction(NewCamera))
state.SetField(mt, "__index", state.SetFuncs(state.NewTable(), cameraMembers))
}
func NewCamera(state *lua.LState) int {
cam := &Camera{}
pos := state.CheckTable(1)
ori := state.CheckTable(2)
rot := state.CheckNumber(3)
zom := state.CheckNumber(4)
cam.Position = Vector2{float32(pos.RawGetInt(1).(lua.LNumber)), float32(pos.RawGetInt(2).(lua.LNumber))}
cam.Origin = Vector2{float32(ori.RawGetInt(1).(lua.LNumber)), float32(ori.RawGetInt(2).(lua.LNumber))}
cam.Rotation = float32(rot)
cam.Zoom = float32(zom)
ud := state.NewUserData()
ud.Value = cam
state.SetMetatable(ud, state.GetTypeMetatable("camera2d"))
state.Push(ud)
return 1
}
func (c *Camera) Update() {
c.iCam.Offset = c.Position
c.iCam.Target = c.Origin
c.iCam.Rotation = c.Rotation
c.iCam.Zoom = c.Zoom
}
func checkCamera(L *lua.LState) *Camera {
ud := L.CheckUserData(1)
if v, ok := ud.Value.(*Camera); ok {
return v
}
L.ArgError(1, "Expected Camera!")
return nil
}
func camUpdate(state *lua.LState) int {
cam := checkCamera(state)
if cam == nil {
return 0
}
cam.Update()
return 1
}
var cameraMembers = map[string]lua.LGFunction {
"update": camUpdate,
"position": func(state *lua.LState) int {
this := checkCamera(state)
if this == nil {
return 0
}
if state.GetTop() >= 2 {
this.Position = Vector2{float32(state.CheckNumber(2)), float32(state.CheckNumber(3))}
return 0
}
state.Push(lua.LNumber(this.Position.X))
state.Push(lua.LNumber(this.Position.Y))
return 2
},
"origin": func(state *lua.LState) int {
this := checkCamera(state)
if state.GetTop() == 3 {
x := state.CheckNumber(2)
y := state.CheckNumber(3)
this.Origin = Vector2{float32(x), float32(y)}
return 0
}
state.Push(lua.LNumber(this.Origin.X))
state.Push(lua.LNumber(this.Origin.Y))
return 2
},
"rotation": func(state *lua.LState) int {
this := checkCamera(state)
if state.GetTop() == 2 {
x := state.CheckNumber(2)
this.Rotation = float32(x)
return 0
}
state.Push(lua.LNumber(this.Rotation))
return 1
},
"zoom": func(state *lua.LState) int {
this := checkCamera(state)
if state.GetTop() == 2 {
x := state.CheckNumber(2)
this.Zoom = float32(x)
return 0
}
state.Push(lua.LNumber(this.Zoom))
return 1
},
} | engine/camera.go | 0.619126 | 0.479443 | camera.go | starcoder |
Package xdr implements the data representation portion of the External Data
Representation (XDR) standard protocol as specified in RFC 4506 (obsoletes
RFC 1832 and RFC 1014).
The XDR RFC defines both a data specification language and a data
representation standard. This package implements methods to encode
and decode XDR data per the data representation standard with the exception
of 128-bit quadruple-precision floating points. It does not currently implement
parsing of the data specification language. In other words, the ability to
automatically generate Go code by parsing an XDR data specification file
(typically .x extension) is not supported. In practice, this limitation of the
package is fairly minor since it is largely unnecessary due to the reflection
capabilities of Go as described below.
This package provides two approaches for encoding and decoding XDR data:
1) Marshal/Unmarshal functions which automatically map between XDR and Go types
2) Individual Encoder/Decoder objects to manually work with XDR primitives
For the Marshal/Unmarshal functions, Go reflection capabilities are used to choose
the type of the underlying XDR data based upon the Go type to encode or the target
Go type to decode into. A description of how each type is mapped is provided
below, however one important type worth reviewing is Go structs. In the case of
structs, each exported field (first letter capitalized) is reflected and mapped
in order. As a result, this means a Go struct with exported fields of the
appropriate types listed in the expected order can be used to automatically
encode / decode the XDR data thereby eliminating the need to write a lot of
boilerplate code to encode/decode and error check each piece of XDR data as is
typically required with C based XDR libraries.
Go Type to XDR Type Mappings
The following chart shows an overview of how Go types are mapped to XDR types
for automatic marshalling and unmarshalling. The documentation for the Marshal
and Unmarshal functions has specific details of how the mapping proceeds.
Go Type <-> XDR Type
--------------------
int8, int16, int32, int <-> XDR Integer
uint8, uint16, uint32, uint <-> XDR Unsigned Integer
int64 <-> XDR Hyper Integer
uint64 <-> XDR Unsigned Hyper Integer
bool <-> XDR Boolean
float32 <-> XDR Floating-Point
float64 <-> XDR Double-Precision Floating-Point
string <-> XDR String
byte <-> XDR Integer
[]byte <-> XDR Variable-Length Opaque Data
[#]byte <-> XDR Fixed-Length Opaque Data
[]<type> <-> XDR Variable-Length Array
[#]<type> <-> XDR Fixed-Length Array
struct <-> XDR Structure
map <-> XDR Variable-Length Array of two-element XDR Structures
time.Time <-> XDR String encoded with RFC3339 nanosecond precision
Notes and Limitations:
* Automatic marshalling and unmarshalling of variable and fixed-length arrays
of uint8s require a special struct tag `xdropaque:"false"` since byte
slices and byte arrays are assumed to be opaque data and byte is a Go
alias for uint8 thus indistinguishable under reflection
* Channel, complex, and function types cannot be encoded
* Interfaces without a concrete value cannot be encoded
* Cyclic data structures are not supported and will result in infinite loops
* Strings are marshalled and unmarshalled with UTF-8 character encoding
which differs from the XDR specification of ASCII, however UTF-8 is
backwards compatible with ASCII so this should rarely cause issues
Encoding
To encode XDR data, use the Marshal function.
func Marshal(v interface{}) (rv []byte, err error)
For example, given the following code snippet:
type ImageHeader struct {
Signature [3]byte
Version uint32
IsGrayscale bool
NumSections uint32
}
h := ImageHeader{[3]byte{0xAB, 0xCD, 0xEF}, 2, true, 10}
encodedData, err := xdr.Marshal(&h)
// Error check elided
The result, encodedData, will then contain the following XDR encoded byte
sequence:
0xAB, 0xCD, 0xEF, 0x00,
0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x0A
In addition, while the automatic marshalling discussed above will work for the
vast majority of cases, an Encoder object is provided that can be used to
manually encode XDR primitives for complex scenarios where automatic
reflection-based encoding won't work. The included examples provide a sample of
manual usage via an Encoder.
Decoding
To decode XDR data, use the Unmarshal function.
func Unmarshal(data []byte, v interface{}) (rest []byte, err error)
For example, given the following code snippet:
type ImageHeader struct {
Signature [3]byte
Version uint32
IsGrayscale bool
NumSections uint32
}
// Using output from the Encoding section above.
encodedData := []byte{
0xAB, 0xCD, 0xEF, 0x00,
0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x0A}
var h ImageHeader
remainingBytes, err := xdr.Unmarshal(encodedData, &h)
// Error check elided
The struct instance, h, will then contain the following values:
h.Signature = [3]byte{0xAB, 0xCD, 0xEF}
h.Version = 2
h.IsGrayscale = true
h.NumSections = 10
In addition, while the automatic unmarshalling discussed above will work for the
vast majority of cases, a Decoder object is provided that can be used to
manually decode XDR primitives for complex scenarios where automatic
reflection-based decoding won't work. The included examples provide a sample of
manual usage via an Decoder.
Errors
All errors are either of type UnmarshalError or MarshalError. Both provide
human readable output as well as an ErrorCode field which can be inspected by
sophisticated callers if necessary
See the documentation of UnmarshalError, MarshalError, and ErrorCode for further
details.
*/
package xdr | xdr/doc.go | 0.893646 | 0.928311 | doc.go | starcoder |
package femto
// FromCharPos converts from a character position to an x, y position
func FromCharPos(loc int, buf *Buffer) Loc {
charNum := 0
x, y := 0, 0
lineLen := Count(buf.Line(y)) + 1
for charNum+lineLen <= loc {
charNum += lineLen
y++
lineLen = Count(buf.Line(y)) + 1
}
x = loc - charNum
return Loc{x, y}
}
// ToCharPos converts from an x, y position to a character position
func ToCharPos(start Loc, buf *Buffer) int {
x, y := start.X, start.Y
loc := 0
for i := 0; i < y; i++ {
// + 1 for the newline
loc += Count(buf.Line(i)) + 1
}
loc += x
return loc
}
// InBounds returns whether the given location is a valid character position in the given buffer
func InBounds(pos Loc, buf *Buffer) bool {
if pos.Y < 0 || pos.Y >= buf.NumLines || pos.X < 0 || pos.X > Count(buf.Line(pos.Y)) {
return false
}
return true
}
// ByteOffset is just like ToCharPos except it counts bytes instead of runes
func ByteOffset(pos Loc, buf *Buffer) int {
x, y := pos.X, pos.Y
loc := 0
for i := 0; i < y; i++ {
// + 1 for the newline
loc += len(buf.Line(i)) + 1
}
loc += len(buf.Line(y)[:x])
return loc
}
// Loc stores a location
type Loc struct {
X, Y int
}
// Diff returns the distance between two locations
func Diff(a, b Loc, buf *Buffer) int {
if a.Y == b.Y {
if a.X > b.X {
return a.X - b.X
}
return b.X - a.X
}
// Make sure a is guaranteed to be less than b
if b.LessThan(a) {
a, b = b, a
}
loc := 0
for i := a.Y + 1; i < b.Y; i++ {
// + 1 for the newline
loc += Count(buf.Line(i)) + 1
}
loc += Count(buf.Line(a.Y)) - a.X + b.X + 1
return loc
}
// LessThan returns true if b is smaller
func (l Loc) LessThan(b Loc) bool {
if l.Y < b.Y {
return true
}
if l.Y == b.Y && l.X < b.X {
return true
}
return false
}
// GreaterThan returns true if b is bigger
func (l Loc) GreaterThan(b Loc) bool {
if l.Y > b.Y {
return true
}
if l.Y == b.Y && l.X > b.X {
return true
}
return false
}
// GreaterEqual returns true if b is greater than or equal to b
func (l Loc) GreaterEqual(b Loc) bool {
if l.Y > b.Y {
return true
}
if l.Y == b.Y && l.X > b.X {
return true
}
if l == b {
return true
}
return false
}
// LessEqual returns true if b is less than or equal to b
func (l Loc) LessEqual(b Loc) bool {
if l.Y < b.Y {
return true
}
if l.Y == b.Y && l.X < b.X {
return true
}
if l == b {
return true
}
return false
}
// This moves the location one character to the right
func (l Loc) right(buf *Buffer) Loc {
if l == buf.End() {
return Loc{l.X + 1, l.Y}
}
var res Loc
if l.X < Count(buf.Line(l.Y)) {
res = Loc{l.X + 1, l.Y}
} else {
res = Loc{0, l.Y + 1}
}
return res
}
// This moves the given location one character to the left
func (l Loc) left(buf *Buffer) Loc {
if l == buf.Start() {
return Loc{l.X - 1, l.Y}
}
var res Loc
if l.X > 0 {
res = Loc{l.X - 1, l.Y}
} else {
res = Loc{Count(buf.Line(l.Y - 1)), l.Y - 1}
}
return res
}
// Move moves the cursor n characters to the left or right
// It moves the cursor left if n is negative
func (l Loc) Move(n int, buf *Buffer) Loc {
if n > 0 {
for i := 0; i < n; i++ {
l = l.right(buf)
}
return l
}
for i := 0; i < Abs(n); i++ {
l = l.left(buf)
}
return l
} | femto/loc.go | 0.739234 | 0.543651 | loc.go | starcoder |
package goquery
import (
"github.com/lynx-seu/goquery/cascadia"
"github.com/lynx-seu/goquery/exp/html"
"regexp"
"strings"
)
//var rxNeedsContext = `^[\x20\t\r\n\f]*[>+~]|:(nth|eq|gt|lt|first|last|even|odd)(-child)?(?:\((\d*)\)|)(?:[^-]|$)`
// Is() checks the current matched set of elements against a selector and
// returns true if at least one of these elements matches.
func (this *Selection) Is(selector string) bool {
if len(this.Nodes) > 0 {
// The selector must be done on the document if it has positional criteria
// TODO : Not sure it is required, as Cascadia's selector checks within the parent of the
// node when there is such a positionaly selector... In jQuery, this is for the
// non-css selectors (Sizzle-implemented selectors, an extension of CSS)
/*if ok, e := regexp.MatchString(rxNeedsContext, selector); ok {
sel := this.document.Root.Find(selector)
for _, n := range this.Nodes {
if sel.IndexOfNode(n) > -1 {
return true
}
}
} else if e != nil {
panic(e.Error())
} else {*/
// Attempt a match with the selector
cs := cascadia.MustCompile(selector)
if len(this.Nodes) == 1 {
return cs.Match(this.Nodes[0])
} else {
return len(cs.Filter(this.Nodes)) > 0
}
//}
}
return false
}
// IsFunction() checks the current matched set of elements against a predicate and
// returns true if at least one of these elements matches.
func (this *Selection) IsFunction(f func(int, *Selection) bool) bool {
return this.FilterFunction(f).Length() > 0
}
// IsSelection() checks the current matched set of elements against a Selection object
// and returns true if at least one of these elements matches.
func (this *Selection) IsSelection(s *Selection) bool {
return this.FilterSelection(s).Length() > 0
}
// IsNodes() checks the current matched set of elements against the specified nodes
// and returns true if at least one of these elements matches.
func (this *Selection) IsNodes(nodes ...*html.Node) bool {
return this.FilterNodes(nodes...).Length() > 0
}
// HasClass() determines whether any of the matched elements are assigned the
// given class.
func (this *Selection) HasClass(class string) bool {
var rx = regexp.MustCompile("[\t\r\n]")
class = " " + class + " "
for _, n := range this.Nodes {
// Applies only to element nodes
if n.Type == html.ElementNode {
if elClass, ok := getAttributeValue("class", n); ok {
elClass = rx.ReplaceAllString(" "+elClass+" ", " ")
if strings.Index(elClass, class) > -1 {
return true
}
}
}
}
return false
}
// Contains() returns true if the specified Node is within,
// at any depth, one of the nodes in the Selection object.
// It is NOT inclusive, to behave like jQuery's implementation, and
// unlike Javascript's .contains(), so if the contained
// node is itself in the selection, it returns false.
func (this *Selection) Contains(n *html.Node) bool {
return sliceContains(this.Nodes, n)
} | query.go | 0.517815 | 0.410756 | query.go | starcoder |
package data
// Message stores the rendering information about message
type Message struct {
// Nested shows whether this message is a nested message and needs to be exported
Nested bool
// Name is the name of the Message
Name string
//FQType is the fully qualified type name for the message itself
FQType string
// Enums is a list of NestedEnums inside
Enums []*NestedEnum
// Fields is a list of fields to render
Fields []*Field
// NonOneOfFields contains a subset of fields that are not in the one-of groups
NonOneOfFields []*Field
// Message is the nested messages defined inside the message
Messages []*Message
// OneOfFieldsGroups is the grouped list of one of fields with same index. so that renderer can render the clearing of other fields on set.
OneOfFieldsGroups map[int32][]*Field
// OneOfFieldNames is the names of one of fields with same index. so that renderer can render the clearing of other fields on set.
OneOfFieldsNames map[int32]string
// Comment is the comment of the message.
Comment string
}
// HasOneOfFields returns true when the message has a one of field.
func (m *Message) HasOneOfFields() bool {
return len(m.OneOfFieldsGroups) > 0
}
// NewMessage initialises and return a Message
func NewMessage() *Message {
return &Message{
Nested: false,
Name: "",
Enums: make([]*NestedEnum, 0),
Fields: make([]*Field, 0),
Messages: make([]*Message, 0),
OneOfFieldsGroups: make(map[int32][]*Field),
OneOfFieldsNames: make(map[int32]string),
Comment: "",
}
}
// NestedEnum stores the information of enums defined inside a message
type NestedEnum struct {
// Name of the Enum inside the class, which will be identical to the name
// defined inside the message
Name string
// Type will have two types of value, and the difference can be told by
// IsExternal attribute.
// For external one, because during analysis stage there might not be a full map
// of the types inside Registry. So the actual translation of this will
// be left in the render time
// If it is only types inside the file, it will be filled with the unique type name defined
// up at the top level
Type string
}
// Field stores the information about a field inside message
type Field struct {
Name string
// Type will be similar to NestedEnum.Type. Where scalar type and types inside
// the same file will be short type
// external types will have fully-qualified name and translated during render time
Type string
// IsExternal tells whether the type of this field is an external dependency
IsExternal bool
// IsOneOfField tells whether this field is part of a one of field.
// one of fields will have extra method clearXXX,
// and the setter accessor will clear out other fields in the group on set
IsOneOfField bool
// Message is the reference back to the parent message
Message *Message
// OneOfIndex is the index in the one of fields
OneOfIndex int32
// IsRepeated indicates whether the field is a repeated field
IsRepeated bool
// JSONName is the name of JSON.
JSONName string
// Comment is the comment of the field
Comment string
}
// GetType returns some information of the type to aid the rendering
func (f *Field) GetType() *TypeInfo {
return &TypeInfo{
Type: f.Type,
IsRepeated: f.IsRepeated,
IsExternal: f.IsExternal,
}
}
// SetExternal mutate the IsExternal attribute
func (f *Field) SetExternal(external bool) {
f.IsExternal = external
}
// MapEntryType is the generic entry type for both key and value
type MapEntryType struct {
// Type of the map entry
Type string
// IsExternal indicates the field typeis external to its own package
IsExternal bool
}
// GetType returns the type information for the type entry
func (m *MapEntryType) GetType() *TypeInfo {
return &TypeInfo{
Type: m.Type,
IsRepeated: false,
IsExternal: m.IsExternal,
}
}
// SetExternal mutate the IsExternal attribute inside
func (m *MapEntryType) SetExternal(external bool) {
m.IsExternal = external
} | data/message.go | 0.629547 | 0.457016 | message.go | starcoder |
package geom
import (
"math"
)
type Polygon struct {
Path
}
func wrapIndex(index, length int) (i int) {
i = index % length
if i < 0 {
i = length + i
}
return
}
func (p *Polygon) Clone() (op *Polygon) {
op = &Polygon{*p.Path.Clone()}
return
}
func (p *Polygon) Equals(oi interface{}) bool {
o, ok := oi.(*Polygon)
if !ok {
return false
}
return (&p.Path).Equals(&o.Path)
}
func (p *Polygon) Register(op *Polygon) (offset Coord, match bool) {
offset, match = p.Path.Register(&op.Path)
return
}
func (me *Polygon) Vertex(index int) (v Coord) {
v = me.vertices[wrapIndex(index, len(me.vertices))]
return
}
func (me *Polygon) Segment(index int) (s *Segment) {
s = &Segment{me.Vertex(index), me.Vertex(index + 1)}
return
}
func (me *Polygon) VertexAngle(index int) (r float64) {
a := me.Vertex(index - 1)
b := me.Vertex(index)
c := me.Vertex(index + 1)
r = VertexAngle(a, b, c)
return
}
func (me *Polygon) WindingOrder() (winding float64) {
for i := 0; i < len(me.vertices); i++ {
winding += me.VertexAngle(i)
}
return
}
func (me *Polygon) ContainsCoord(p Coord) bool {
fakeSegment := &Segment{p, Coord{p.X, p.Y + 1}}
above := 0
for i := 0; i < me.Length(); i++ {
s := me.Segment(i)
uh, uv := s.IntersectParameters(fakeSegment)
if uh < 0 || uh >= 1 {
continue
}
if uv > 0 {
above++
}
}
return above%2 == 1
}
//bisect a polygon by joining vertices i and j
func (me *Polygon) Bisect(i, j int) (p1, p2 *Polygon) {
i = wrapIndex(i, len(me.vertices))
j = wrapIndex(j, len(me.vertices))
//build the first one, starting at i and ending at j
p1 = &Polygon{}
for c := i; c != wrapIndex(j+1, len(me.vertices)); c = wrapIndex(c+1, len(me.vertices)) {
p1.AddVertex(me.Vertex(c))
}
//build the second one, starting at j and ending at i
p2 = &Polygon{}
for c := j; c != wrapIndex(i+1, len(me.vertices)); c = wrapIndex(c+1, len(me.vertices)) {
p2.AddVertex(me.Vertex(c))
}
return
}
func (me *Polygon) Error(other *Polygon) (offset Coord, error float64) {
return me.Path.Error(&other.Path)
}
func (me *Polygon) Triangles() (tris []Triangle, ok bool) {
dbg("%v.Triangles()", me)
if me.Length() == 3 {
dbg("already a triangle")
tris = []Triangle{Triangle{me.Vertex(0), me.Vertex(1), me.Vertex(2)}}
ok = true
return
}
for i := 0; i < me.Length(); i++ {
iv := me.Vertex(i)
v2:
for j := i + 2; j != wrapIndex(i-1, me.Length()); j = wrapIndex(j+1, me.Length()) {
jv := me.Vertex(j)
bisectingSegment := &Segment{iv, jv}
dbg("bisectingSegment(%d, %d) = %v", i, j, bisectingSegment)
//first check to see that it doesn't intersect any other segments
for si := 0; si < me.Length(); si++ {
s := me.Segment(si)
u1, u2 := s.IntersectParameters(bisectingSegment)
if math.IsNaN(u1) || math.IsNaN(u2) || (u1 > 0 && u1 < 1 && u2 > 0 && u2 < 1) {
dbg(" Segment(%d, %d) %v\n%f %f", si, si+1, s, u1, u2)
continue v2
} else {
dbg(" doesn't intersect %v: %f %f", s, u1, u2)
}
}
//second check to see that it is in the interior of the polygon
midCoord := bisectingSegment.Extrapolate(0.5)
if !me.ContainsCoord(midCoord) {
dbg(" poly contains %v", midCoord)
continue v2
}
dbg(" Segment %v is good", bisectingSegment)
p1, p2 := me.Bisect(i, j)
t1, ok1 := p1.Triangles()
t2, ok2 := p2.Triangles()
tris = append(t1, t2...)
ok = ok1 && ok2
return
}
}
dbg("failed with %v", me)
//panic("couldn't find any valid bisecting segment")
return
} | vendor/github.com/skelterjohn/geom/poly.go | 0.601477 | 0.444806 | poly.go | starcoder |
package labradar
import (
"strconv"
"time"
)
// Series is a structure that holds the data from a Labradar series, and some details of the load
// and firearm that was used.
type Series struct {
// TODO [TO20220404] Maybe this should be an interface?
Number SeriesNumber
deviceId DeviceId
Date string
Time string
Velocities *VelocityData
Firearm *Firearm
LoadData *LoadData
Notes string
UnitsOfMeasure *UnitsOfMeasure
}
// SeriesMutatorFn describes a function that can be used to manipulate the values of a Series
type SeriesMutatorFn = func(s *Series)
func (s Series) String() string {
return s.Number.String()
}
func (s Series) DeviceId() DeviceId {
return s.deviceId
}
// CountOfShots will retrieve the number of shots in the series.
func (s Series) CountOfShots() int {
return s.Velocities.CountOfShots()
}
// Update will use the provided mutators to update values in the Series
func (s *Series) Update(mutators ...SeriesMutatorFn) {
for _, mutate := range mutators {
mutate(s)
}
}
// NewSeries will take a collection of SeriesMutatorFn functions, create a new Series, and then
// update it accordingly.
func NewSeries(mutators ...SeriesMutatorFn) *Series {
s := &Series{
Number: 0,
Velocities: emptyVelocityData(),
Firearm: &Firearm{
Name: "",
Cartridge: "",
},
LoadData: emptyLoadData(),
UnitsOfMeasure: emptyUnitsOfMeasure(),
Notes: "",
Date: "",
Time: "",
}
s.Update(mutators...)
return s
}
func TryParseSeriesNumber(sr string) (SeriesNumber, bool) {
if len(sr) != 6 {
return SeriesNumber(0), false
}
if sr[0:2] != "SR" {
return SeriesNumber(0), false
}
i, err := strconv.Atoi(sr[2:6])
if err != nil {
return 0, false
}
return SeriesNumber(i), true
}
//UpdateDeviceForSeries will update the series.Series with the device id of the specified device.
func UpdateDeviceForSeries(device *DeviceDirectory) SeriesMutatorFn {
// TODO [TO20220119] Needs unit tests
return func(s *Series) {
s.deviceId = device.DeviceId()
}
}
// WithSeriesNumber will initialize the number assigned by a specific Labradar device
func WithSeriesNumber(n int) SeriesMutatorFn {
return func(s *Series) {
s.Number = SeriesNumber(n)
}
}
// WithFirearm will set the cartridge and name of the firearm. This does not update the cartridge on the LoadData
func WithFirearm(name string) SeriesMutatorFn {
return func(s *Series) {
s.Firearm.Name = name
}
}
// WithCartridge will set the cartridge of the LoadData. This does update the cartridge on the Firearm.
func WithCartridge(cartridge string) SeriesMutatorFn {
// TODO [TO20220123] What should we do if the cartridge cartridge doesn't match the cartridge on the firearm?
return func(s *Series) {
s.Firearm.Cartridge = cartridge
s.LoadData.Cartridge = cartridge
}
}
// WithNotes will update the notes field.
func WithNotes(notes string) SeriesMutatorFn {
return func(s *Series) {
s.Notes = notes
}
}
// WithPowder will set the name of the gunpowder used and weight on the PowderCharge.
func WithPowder(name string, weight float32) SeriesMutatorFn {
return func(s *Series) {
s.LoadData.Powder.Name = name
s.LoadData.Powder.Amount = weight
}
}
// WithProjecticle will set the name and weight of the projectile on the Projectile.
func WithProjecticle(name string, weight int) SeriesMutatorFn {
return func(s *Series) {
s.LoadData.Projectile.Name = name
s.LoadData.Projectile.Weight = weight
}
}
// UsingGrainsForWeight sets the units of measure to grains.
func UsingGrainsForWeight() SeriesMutatorFn {
return func(s *Series) {
s.UnitsOfMeasure.Weight = "gr"
}
}
// UsingCurrentDateAndTime will use the time from this process as the default.
func UsingCurrentDateAndTime() SeriesMutatorFn {
return func(s *Series) {
now := time.Now()
s.Date = now.Format("2006-01-02")
s.Time = now.Format("15:04")
}
}
// UsingFeetPerSecondForMuzzleVelocity will set the default velocity units to FPS
func UsingFeetPerSecondForMuzzleVelocity() SeriesMutatorFn {
return func(s *Series) {
s.UnitsOfMeasure.Velocity = "fps"
}
}
// UsingMetresPerSecondForMuzzleVelocity will set the default velocity units to m/s
func UsingMetresPerSecondForMuzzleVelocity() SeriesMutatorFn {
return func(s *Series) {
s.UnitsOfMeasure.Velocity = "m/s"
}
}
// UsingYardsForDistance will set the default distance units to yards.
func UsingYardsForDistance() SeriesMutatorFn {
return func(s *Series) {
s.UnitsOfMeasure.Distance = "y"
}
}
// UsingMetresForDistance will set the default distance units to metres.
func UsingMetresForDistance() SeriesMutatorFn {
return func(s *Series) {
s.UnitsOfMeasure.Distance = "m"
}
}
// UsingFeetForDistance will set the default distance units to feet.
func UsingFeetForDistance() SeriesMutatorFn {
return func(s *Series) {
s.UnitsOfMeasure.Distance = "ft"
}
}
// LabradarSeriesDefaults returns the mutators that will set default values on a Series.
func LabradarSeriesDefaults() []SeriesMutatorFn {
defaults := []SeriesMutatorFn{
UsingGrainsForWeight(),
UsingMetresForDistance(),
UsingFeetPerSecondForMuzzleVelocity(),
UsingCurrentDateAndTime(),
UsingCelsiusForTemperature(),
}
return defaults
}
// UsingCelsiusForTemperature will return a mutator to set the temperature Units of Measure to Centigrade.
func UsingCelsiusForTemperature() SeriesMutatorFn {
return func(s *Series) {
s.UnitsOfMeasure.Temperature = "Celsius"
}
}
// UsingFarenheitForTemperature will return a mutator to set the temperature Units of Measure to Fahrenheit.
func UsingFarenheitForTemperature() SeriesMutatorFn {
return func(s *Series) {
s.UnitsOfMeasure.Temperature = "Fahrenheit"
}
}
// combineMutators will combine two separate arrays of SeriesMutatorFn into one. The items in the first
// array will appear first.
func combineMutators(first []SeriesMutatorFn, second []SeriesMutatorFn) []SeriesMutatorFn {
mutators := make([]SeriesMutatorFn, len(first)+len(second))
index := 0
for i := 0; i < len(first); i++ {
mutators[index] = first[i]
index++
}
for i := 0; i < len(second); i++ {
mutators[index] = second[i]
index++
}
return mutators
} | code/cli/pkg/labradar/series.go | 0.538012 | 0.592519 | series.go | starcoder |
package swessn
import (
"errors"
"fmt"
"regexp"
"strconv"
"strings"
)
// Divider represents the divider between birth date and control digits.
type Divider string
const (
DividerPlus Divider = "+"
DividerMinus Divider = "-"
DividerNone Divider = ""
)
// Parsed represents a parsed string. The fields are named as date parts but may
// be of other types in case of an organisation number or coordination number.
type Parsed struct {
Century int
Year int
Month int
Day int
Serial int
ControlDigit *int
Divider Divider
}
// nolint: gochecknoglobal
var validFormatRe = regexp.MustCompile(`^(\d{2})?(\d{2})(\d{2})(\d{2})([-+])?(\d{3})(\d)?$`)
// Parse will parse a string and returned a pointer to a Parsed type. If the
// string passed isn't in a valid format an error will be returned.
func Parse(input string) (*Parsed, error) {
matches := validFormatRe.FindStringSubmatch(input)
if len(matches) != 8 {
return nil, errors.New("invalid format")
}
var (
century, _ = strconv.Atoi(matches[1])
year, _ = strconv.Atoi(matches[2])
month, _ = strconv.Atoi(matches[3])
day, _ = strconv.Atoi(matches[4])
serial, _ = strconv.Atoi(matches[6])
divider = Divider(strings.ToUpper(matches[5]))
)
p := &Parsed{
Year: year,
Month: month,
Day: day,
Serial: serial,
Divider: divider,
}
if century > 0 {
p.Century = century * 100
}
if p.Divider == DividerNone {
p.Divider = DividerMinus
}
if cd, err := strconv.Atoi(matches[7]); err == nil {
p.ControlDigit = &cd
}
if p.ControlDigit == nil {
cd := p.LuhnControlDigit(p.LuhnChecksum())
p.ControlDigit = &cd
}
return p, nil
}
// LuhnCHecksum calculates the sum of the parsed digits with the Luhn algorithm.
func (p *Parsed) LuhnChecksum() int {
var (
sum = 0
digits = fmt.Sprintf("%02d%02d%02d%03d", p.Year, p.Month, p.Day, p.Serial)
)
for i := range digits {
digit, err := strconv.Atoi(string(digits[i]))
if err != nil {
panic("invalid luhn iteration value")
}
if i%2 == 0 {
digit *= 2
}
if digit > 9 {
digit -= 9
}
sum += digit
}
return sum
}
// LuhnControlDigit calculates the control digit based on a checksum.
func (p *Parsed) LuhnControlDigit(cs int) int {
checksum := 10 - (cs % 10)
if checksum == 10 {
return 0
}
return checksum
}
// Valid returns if a parsed string is valid, that is if the given control digit
// matches the checksum of the digits.
func (p *Parsed) Valid() bool {
var (
controlDigit = p.LuhnControlDigit(p.LuhnChecksum())
cd = controlDigit
)
if p.ControlDigit != nil {
cd = *p.ControlDigit
}
return controlDigit == cd
}
// ValidPerson returns if a parsed string is valid if validated as a private
// person.
func (p *Parsed) ValidPerson() bool {
person, err := NewPersonFromParsed(p)
if err != nil {
return false
}
return person.Valid()
}
// ValidOrganization returns if a parsed string is valid if validated as an
// organization.
func (p *Parsed) ValidOrganization() bool {
org, err := NewOrganizationFromParsed(p)
if err != nil {
return false
}
return org.Valid()
}
// stringFromInterface returns the string value from an interface.
func stringFromInterface(input interface{}) string {
var nr string
switch v := input.(type) {
case string:
nr = v
case []byte:
nr = string(v)
case int:
nr = strconv.Itoa(v)
case int32:
nr = strconv.Itoa(int(v))
case int64:
nr = strconv.Itoa(int(v))
case float32:
nr = strconv.Itoa(int(v))
case float64:
nr = strconv.Itoa(int(v))
default:
nr = ""
}
return nr
} | luhn.go | 0.681833 | 0.402099 | luhn.go | starcoder |
// Package string provides the implementation of the python's 'string' module.
package string
import (
"strings"
"github.com/go-python/gpython/py"
)
func init() {
py.RegisterModule(&py.ModuleImpl{
Info: py.ModuleInfo{
Name: "string",
Doc: module_doc,
},
Methods: []*py.Method{
py.MustNewMethod("capwords", capwords, 0, capwords_doc),
},
Globals: py.StringDict{
"whitespace": whitespace,
"ascii_lowercase": ascii_lowercase,
"ascii_uppercase": ascii_uppercase,
"ascii_letters": ascii_letters,
"digits": digits,
"hexdigits": hexdigits,
"octdigits": octdigits,
"punctuation": punctuation,
"printable": printable,
},
})
}
const module_doc = `A collection of string constants.
Public module variables:
whitespace -- a string containing all ASCII whitespace
ascii_lowercase -- a string containing all ASCII lowercase letters
ascii_uppercase -- a string containing all ASCII uppercase letters
ascii_letters -- a string containing all ASCII letters
digits -- a string containing all ASCII decimal digits
hexdigits -- a string containing all ASCII hexadecimal digits
octdigits -- a string containing all ASCII octal digits
punctuation -- a string containing all ASCII punctuation characters
printable -- a string containing all ASCII characters considered printable
`
var (
whitespace = py.String(" \t\n\r\x0b\x0c")
ascii_lowercase = py.String("abcdefghijklmnopqrstuvwxyz")
ascii_uppercase = py.String("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
ascii_letters = ascii_lowercase + ascii_uppercase
digits = py.String("0123456789")
hexdigits = py.String("0123456789abcdefABCDEF")
octdigits = py.String("01234567")
punctuation = py.String("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~")
printable = py.String("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ \t\n\r\x0b\x0c")
)
const capwords_doc = `capwords(s [,sep]) -> string
Split the argument into words using split, capitalize each
word using capitalize, and join the capitalized words using
join. If the optional second argument sep is absent or None,
runs of whitespace characters are replaced by a single space
and leading and trailing whitespace are removed, otherwise
sep is used to split and join the words.`
func capwords(self py.Object, args py.Tuple, kwargs py.StringDict) (py.Object, error) {
var (
pystr py.Object
pysep py.Object = py.None
)
err := py.ParseTupleAndKeywords(args, kwargs, "s|z", []string{"s", "sep"}, &pystr, &pysep)
if err != nil {
return nil, err
}
pystr = py.String(strings.ToLower(string(pystr.(py.String))))
pyvs, err := pystr.(py.String).Split(py.Tuple{pysep}, nil)
if err != nil {
return nil, err
}
var (
lst = pyvs.(*py.List).Items
vs = make([]string, len(lst))
sep = ""
title = func(s string) string {
if s == "" {
return s
}
return strings.ToUpper(s[:1]) + s[1:]
}
)
switch pysep {
case py.None:
for i := range vs {
v := string(lst[i].(py.String))
vs[i] = title(strings.Trim(v, string(whitespace)))
}
sep = " "
default:
sep = string(pysep.(py.String))
for i := range vs {
v := string(lst[i].(py.String))
vs[i] = title(v)
}
}
return py.String(strings.Join(vs, sep)), nil
} | stdlib/string/string.go | 0.66628 | 0.422922 | string.go | starcoder |
package challenges
import (
"errors"
"github.com/offchainlabs/arbitrum/packages/arb-util/inbox"
"github.com/offchainlabs/arbitrum/packages/arb-util/protocol"
"github.com/offchainlabs/arbitrum/packages/arb-validator-core/arbbridge"
"github.com/offchainlabs/arbitrum/packages/arb-validator/structures"
"log"
"github.com/offchainlabs/arbitrum/packages/arb-util/machine"
"github.com/offchainlabs/arbitrum/packages/arb-validator-core/valprotocol"
)
type AssertionDefender struct {
numSteps uint64
initState machine.Machine
inbox *structures.MessageStack
assertion *valprotocol.ExecutionAssertionStub
}
func NewAssertionDefender(numSteps uint64, initState machine.Machine, inbox *structures.MessageStack, assertion *valprotocol.ExecutionAssertionStub) AssertionDefender {
return AssertionDefender{
numSteps: numSteps,
initState: initState.Clone(),
inbox: inbox,
assertion: assertion,
}
}
func (ad AssertionDefender) NumSteps() uint64 {
return ad.numSteps
}
func (ad AssertionDefender) AssertionStub() *valprotocol.ExecutionAssertionStub {
return ad.assertion
}
func (ad AssertionDefender) MoveDefender(
bisectionEvent arbbridge.ExecutionBisectionEvent,
continueEvent arbbridge.ContinueChallengeEvent,
) AssertionDefender {
segmentCount := uint64(len(bisectionEvent.AssertionHashes))
stepsToSkip := computeStepsUpTo(continueEvent.SegmentIndex.Uint64(), segmentCount, ad.numSteps)
steps := valprotocol.CalculateBisectionStepCount(
continueEvent.SegmentIndex.Uint64(),
segmentCount,
ad.numSteps,
)
// Update mach, precondition, deadline
messages, err := ad.inbox.GetAssertionMessages(ad.assertion.BeforeInboxHash, ad.assertion.AfterInboxHash)
if err != nil {
log.Fatal("assertion defender must have valid messages", ad.assertion.BeforeInboxHash, ad.assertion.AfterInboxHash)
}
skippedAssertion, _ := ad.initState.ExecuteAssertion(
stepsToSkip,
messages,
0,
)
skippedAssertionStub := structures.NewExecutionAssertionStubFromAssertion(
skippedAssertion,
ad.assertion.BeforeInboxHash,
ad.assertion.FirstLogHash,
ad.assertion.FirstMessageHash,
ad.inbox,
)
assertion, _ := ad.initState.Clone().ExecuteAssertion(steps, messages[skippedAssertion.InboxMessagesConsumed:], 0)
assertionStub := structures.NewExecutionAssertionStubFromAssertion(
assertion,
skippedAssertionStub.AfterInboxHash,
skippedAssertionStub.LastLogHash,
skippedAssertionStub.LastMessageHash,
ad.inbox,
)
return NewAssertionDefender(steps, ad.initState, ad.inbox, assertionStub)
}
func (ad AssertionDefender) NBisect(slices uint64) []AssertionDefender {
nsteps := ad.NumSteps()
if nsteps < slices {
slices = nsteps
}
defenders := make([]AssertionDefender, 0, slices)
m := ad.initState.Clone()
beforeInboxHash := ad.assertion.BeforeInboxHash
firstLogHash := ad.assertion.FirstLogHash
firstMessageHash := ad.assertion.FirstMessageHash
for i := uint64(0); i < slices; i++ {
steps := valprotocol.CalculateBisectionStepCount(i, slices, nsteps)
initState := m.Clone()
inboxMessages, err := ad.inbox.GetAssertionMessages(beforeInboxHash, ad.assertion.AfterInboxHash)
if err != nil {
log.Fatal("inbox messages must exist for assertion that you're defending ", beforeInboxHash, ad.assertion.AfterInboxHash)
}
assertion, numSteps := m.ExecuteAssertion(
steps,
inboxMessages,
0,
)
stub := structures.NewExecutionAssertionStubFromAssertion(assertion, beforeInboxHash, firstLogHash, firstMessageHash, ad.inbox)
defenders = append(defenders, NewAssertionDefender(
numSteps,
initState,
ad.inbox,
stub,
))
beforeInboxHash = stub.AfterInboxHash
firstLogHash = stub.LastLogHash
firstMessageHash = stub.LastMessageHash
}
return defenders
}
func (ad AssertionDefender) SolidityOneStepProof() ([]byte, *inbox.InboxMessage, error) {
proofData, err := ad.initState.MarshalForProof()
if err != nil {
return nil, nil, err
}
messages, err := ad.inbox.GetAssertionMessages(ad.assertion.BeforeInboxHash, ad.assertion.AfterInboxHash)
if err != nil {
return nil, nil, err
}
if len(messages) > 1 {
return nil, nil, errors.New("can't prove assertion with more than one message")
}
if len(messages) == 1 {
return proofData, &messages[0], nil
}
return proofData, nil, nil
}
func assertionMatches(
stub *valprotocol.ExecutionAssertionStub,
assertion *protocol.ExecutionAssertion,
inboxMessages []inbox.InboxMessage,
) bool {
return assertion.InboxMessagesConsumed != uint64(len(inboxMessages)) &&
assertion.NumGas == stub.NumGas &&
assertion.BeforeMachineHash.Unmarshal() == stub.BeforeMachineHash &&
assertion.AfterMachineHash.Unmarshal() == stub.AfterMachineHash &&
valprotocol.BytesArrayAccumHash(stub.FirstMessageHash, assertion.OutMsgsData, assertion.OutMsgsCount) == stub.LastMessageHash &&
assertion.OutMsgsCount == stub.MessageCount &&
valprotocol.BytesArrayAccumHash(stub.FirstLogHash, assertion.LogsData, assertion.LogsCount) == stub.LastLogHash &&
assertion.LogsCount == stub.LogCount
}
func ChooseAssertionToChallenge(
m machine.Machine,
inbox *structures.MessageStack,
assertions []*valprotocol.ExecutionAssertionStub,
totalSteps uint64,
) (uint16, machine.Machine, error) {
assertionCount := uint64(len(assertions))
for i := range assertions {
steps := valprotocol.CalculateBisectionStepCount(uint64(i), assertionCount, totalSteps)
inboxMessages, err := inbox.GetAssertionMessages(assertions[i].BeforeInboxHash, assertions[i].AfterInboxHash)
if err != nil {
// AfterInboxHash must have been invalid
return uint16(i), m, nil
}
initState := m.Clone()
generatedAssertion, numSteps := m.ExecuteAssertion(
steps,
inboxMessages,
0,
)
if numSteps != steps || !assertionMatches(assertions[i], generatedAssertion, inboxMessages) {
return uint16(i), initState, nil
}
inboxMessages = inboxMessages[generatedAssertion.InboxMessagesConsumed:]
}
return 0, nil, errors.New("all segments in false ExecutionAssertion are valid")
} | packages/arb-validator/challenges/defender.go | 0.513668 | 0.475605 | defender.go | starcoder |
package list
const orderedFunctions = `
{{if .Type.Ordered}}
//-------------------------------------------------------------------------------------------------
// These methods are provided because {{.TName}} is ordered.
// Min returns the element with the minimum value. In the case of multiple items being equally minimal,
// the first such element is returned. Panics if the collection is empty.
func (list {{.TName}}List) Min() (result {{.PName}}) {
if len(list) == 0 {
panic("Cannot determine the Min of an empty list.")
}
result = list[0]
for _, v := range list {
if v < result {
result = v
}
}
return
}
// Max returns the element with the maximum value. In the case of multiple items being equally maximal,
// the first such element is returned. Panics if the collection is empty.
func (list {{.TName}}List) Max() (result {{.PName}}) {
if len(list) == 0 {
panic("Cannot determine the Max of an empty list.")
}
result = list[0]
for _, v := range list {
if v > result {
result = v
}
}
return
}
{{else}}
//-------------------------------------------------------------------------------------------------
// These methods are included when {{.TName}} is not ordered.
// Min returns the first element containing the minimum value, when compared to other elements
// using a specified comparator function defining ‘less’.
// Panics if the collection is empty.
func (list {{.TName}}List) Min(less func({{.PName}}, {{.PName}}) bool) (result {{.PName}}) {
l := len(list)
if l == 0 {
panic("Cannot determine the minimum of an empty list.")
}
m := 0
for i := 1; i < l; i++ {
if less(list[i], list[m]) {
m = i
}
}
result = list[m]
return
}
// Max returns the first element containing the maximum value, when compared to other elements
// using a specified comparator function defining ‘less’.
// Panics if the collection is empty.
func (list {{.TName}}List) Max(less func({{.PName}}, {{.PName}}) bool) (result {{.PName}}) {
l := len(list)
if l == 0 {
panic("Cannot determine the maximum of an empty list.")
}
m := 0
for i := 1; i < l; i++ {
if less(list[m], list[i]) {
m = i
}
}
result = list[m]
return
}
{{end}}
` | internal/list/ordered.go | 0.815269 | 0.615117 | ordered.go | starcoder |
package types
import (
"reflect"
"github.com/open2b/scriggo/internal/runtime"
)
// definedType represents a type defined in the Scriggo compiled code with a
// type definition, where the underlying type can be both a type compiled in
// the Scriggo code or in gc.
type definedType struct {
// The embedded reflect.Type can be both a reflect.Type implemented by the
// package "reflect" or a ScriggoType. In the other implementations of
// ScriggoType the embedded reflect.Type is always a gc compiled type.
reflect.Type
name string
// sign ensures that a definedType returned by DefinedOf is always
// different from every other instance of definedType.
// By doing so, two reflect.Types are equal if and only if the type they
// represents are identical (every defined type, in Go, is different from
// every other type).
sign *byte
}
// DefinedOf returns the defined type with the given name and underlying type.
// For example, if n is "Int" and k represents int, DefinedOf(n, k) represents
// the type Int declared with 'type Int int'.
func (types *Types) DefinedOf(name string, underlyingType reflect.Type) reflect.Type {
if name == "" {
panic(internalError("name cannot be empty"))
}
return definedType{Type: underlyingType, name: name, sign: new(byte)}
}
func (x definedType) Name() string {
return x.name
}
func (x definedType) AssignableTo(y reflect.Type) bool {
return AssignableTo(x, y)
}
func (x definedType) ConvertibleTo(y reflect.Type) bool {
return ConvertibleTo(x, y)
}
func (x definedType) Implements(y reflect.Type) bool {
return Implements(x, y)
}
func (x definedType) MethodByName(string) (reflect.Method, bool) {
// TODO.
return reflect.Method{}, false
}
func (x definedType) String() string {
// For defined types the string representation is exactly the name of the
// type; the internal structure of the type is hidden.
// TODO: verify that this is correct.
return x.name
}
// GoType implements the interface runtime.ScriggoType.
func (x definedType) GoType() reflect.Type {
if st, ok := x.Type.(runtime.ScriggoType); ok {
return st.GoType()
}
assertNotScriggoType(x.Type)
return x.Type
}
// Unwrap implements the interface runtime.ScriggoType.
func (x definedType) Unwrap(v reflect.Value) (reflect.Value, bool) { return unwrap(x, v) }
// Wrap implements the interface runtime.ScriggoType.
func (x definedType) Wrap(v reflect.Value) reflect.Value { return wrap(x, v) } | internal/compiler/types/defined.go | 0.656218 | 0.61996 | defined.go | starcoder |
package monitoring
import "strings"
// FlatSnapshot represents a flatten snapshot of all metrics.
// Names in the tree will be joined with `.` .
type FlatSnapshot struct {
Bools map[string]bool
Ints map[string]int64
Floats map[string]float64
Strings map[string]string
}
type flatSnapshotVisitor struct {
snapshot FlatSnapshot
level []string
}
type structSnapshotVisitor struct {
key keyStack
event eventStack
depth int
}
type keyStack struct {
current string
stack []string
stack0 [32]string
}
type eventStack struct {
current map[string]interface{}
stack []map[string]interface{}
stack0 [32]map[string]interface{}
}
// CollectFlatSnapshot collects a flattened snapshot of
// a metrics tree start with the given registry.
func CollectFlatSnapshot(r *Registry, mode Mode, expvar bool) FlatSnapshot {
if r == nil {
r = Default
}
vs := newFlatSnapshotVisitor()
r.Visit(mode, vs)
if expvar {
VisitExpvars(vs)
}
return vs.snapshot
}
func MakeFlatSnapshot() FlatSnapshot {
return FlatSnapshot{
Bools: map[string]bool{},
Ints: map[string]int64{},
Floats: map[string]float64{},
Strings: map[string]string{},
}
}
// CollectStructSnapshot collects a structured metrics snaphot of
// a metrics tree starting with the given registry.
// Empty namespaces will be omitted.
func CollectStructSnapshot(r *Registry, mode Mode, expvar bool) map[string]interface{} {
if r == nil {
r = Default
}
vs := newStructSnapshotVisitor()
r.Visit(mode, vs)
snapshot := vs.event.current
if expvar {
vs := newStructSnapshotVisitor()
VisitExpvars(vs)
for k, v := range vs.event.current {
snapshot[k] = v
}
}
return snapshot
}
func newFlatSnapshotVisitor() *flatSnapshotVisitor {
return &flatSnapshotVisitor{snapshot: MakeFlatSnapshot()}
}
func (vs *flatSnapshotVisitor) OnRegistryStart() {}
func (vs *flatSnapshotVisitor) OnRegistryFinished() {
if len(vs.level) > 0 {
vs.dropName()
}
}
func (vs *flatSnapshotVisitor) OnKey(name string) {
vs.level = append(vs.level, name)
}
func (vs *flatSnapshotVisitor) getName() string {
defer vs.dropName()
if len(vs.level) == 1 {
return vs.level[0]
}
return strings.Join(vs.level, ".")
}
func (vs *flatSnapshotVisitor) dropName() {
vs.level = vs.level[:len(vs.level)-1]
}
func (vs *flatSnapshotVisitor) OnString(s string) {
vs.snapshot.Strings[vs.getName()] = s
}
func (vs *flatSnapshotVisitor) OnBool(b bool) {
vs.snapshot.Bools[vs.getName()] = b
}
func (vs *flatSnapshotVisitor) OnInt(i int64) {
vs.snapshot.Ints[vs.getName()] = i
}
func (vs *flatSnapshotVisitor) OnFloat(f float64) {
vs.snapshot.Floats[vs.getName()] = f
}
func newStructSnapshotVisitor() *structSnapshotVisitor {
vs := &structSnapshotVisitor{}
vs.key.stack = vs.key.stack0[:0]
vs.event.stack = vs.event.stack0[:0]
return vs
}
func (s *structSnapshotVisitor) OnRegistryStart() {
if s.depth > 0 {
s.event.push()
}
s.depth++
}
func (s *structSnapshotVisitor) OnRegistryFinished() {
s.depth--
if s.depth == 0 {
return
}
event := s.event.pop()
if event == nil {
s.key.pop()
return
}
s.setValue(event)
}
func (s *structSnapshotVisitor) OnKey(key string) {
s.key.push(key)
}
func (s *structSnapshotVisitor) OnString(str string) { s.setValue(str) }
func (s *structSnapshotVisitor) OnBool(b bool) { s.setValue(b) }
func (s *structSnapshotVisitor) OnInt(i int64) { s.setValue(i) }
func (s *structSnapshotVisitor) OnFloat(f float64) { s.setValue(f) }
func (s *structSnapshotVisitor) setValue(v interface{}) {
if s.event.current == nil {
s.event.current = map[string]interface{}{}
}
s.event.current[s.key.current] = v
s.key.pop()
}
func (s *keyStack) push(key string) {
s.stack = append(s.stack, s.current)
s.current = key
}
func (s *keyStack) pop() {
last := len(s.stack) - 1
s.current = s.stack[last]
s.stack = s.stack[:last]
}
func (s *eventStack) push() {
s.stack = append(s.stack, s.current)
s.current = nil
}
func (s *eventStack) pop() map[string]interface{} {
event := s.current
last := len(s.stack) - 1
s.current = s.stack[last]
s.stack = s.stack[:last]
return event
} | libbeat/monitoring/snapshot.go | 0.753829 | 0.407333 | snapshot.go | starcoder |
package rf
import (
"fmt"
"log"
"math"
)
// Frequency type (Hz) to assist with unit coherence
type Frequency float64
// Wavelength type (m) to assist with unit coherence
type Wavelength float64
// Distance type (m) to assist with unit coherence
type Distance float64
// Attenuation type (dB) to assist with unit coherence
type Attenuation float64
const (
//C is the speed of light in air in meters per second
C = 2.998e+8
// FresnelObstructionOK is the largest acceptable proportion of fresnel zone impingement
FresnelObstructionOK = 0.4
// FresnelObstructionIdeal is the largest ideal proportion of fresnel zone impingement
FresnelObstructionIdeal = 0.2
// R is the (average) radius of the earth
R = 6.371e6
// Pi for use in formulae
π = math.Pi
)
// Frequency helper types
const (
Hz Frequency = 1.0
KHz = Hz * 1000
MHz = KHz * 1000
GHz = MHz * 1000
)
// Distance helper types
const (
M Distance = 1.0
Km = M * 1000
)
// Attenuation helpers
const (
DB Attenuation = 0
)
// Free Space Path Loss (FSPL) calculations
// https://en.wikipedia.org/wiki/Free-space_path_loss#Free-space_path_loss_formula
// CalculateFreeSpacePathLoss calculates the Free Space Path Loss in Decibels for a given frequency and distance
func CalculateFreeSpacePathLoss(freq Frequency, distance Distance) Attenuation {
fading := 20 * math.Log10((4 * math.Pi * float64(distance) * float64(freq) / C))
return Attenuation(fading)
}
// Freznel zone calculations
// Note that distances must be much greater than wavelengths
// https://en.wikipedia.org/wiki/Fresnel_zone#Fresnel_zone_clearance
// FresnelMinDistanceWavelengthRadio is the minimum ratio of distance:wavelength for viable calculations
// This is used as a programattic sanity check for distance >> wavelength
const FresnelMinDistanceWavelengthRadio = 0.1
// FresnelPoint calculates the fresnel zone radius d for a given wavelength
// and order at a point P between endpoints
func FresnelPoint(d1, d2 Distance, freq Frequency, order int64) (float64, error) {
wavelength := FrequencyToWavelength(freq)
if ((float64(d1) * FresnelMinDistanceWavelengthRadio) < float64(wavelength)) || ((float64(d2) * FresnelMinDistanceWavelengthRadio) < float64(wavelength)) {
return 0, fmt.Errorf("Fresnel calculation valid only for distances >> wavelength (d1: %.2fm d2: %.2fm wavelength %.2fm)", d1, d2, wavelength)
}
return math.Sqrt((float64(order) * float64(wavelength) * float64(d1) * float64(d2)) / (float64(d1) + float64(d2))), nil
}
// FresnelFirstZoneMax calculates the maximum fresnel zone radius for a given frequency
func FresnelFirstZoneMax(freq Frequency, dist Distance) (float64, error) {
wavelength := FrequencyToWavelength(freq)
if (float64(dist) * FresnelMinDistanceWavelengthRadio) < float64(wavelength) {
return 0, fmt.Errorf("Fresnel calculation valid only for distance >> wavelength (distance: %.2fm wavelength %.2fm)", dist, wavelength)
}
return 0.5 * math.Sqrt((C * float64(dist) / float64(freq))), nil
}
// CalculateFresnelKirckoffDiffractionParam Calculates the Fresnel-Kirchoff Diffraction parameter
// d1 and d2 are the distances between the "knife edge" impingement and the transmitter/receiver
// h is the impingement, where -ve is below Line of Sight (LoS) and +ve is above LoS
// https://en.wikipedia.org/wiki/Kirchhoff%27s_diffraction_formula
// https://s.campbellsci.com/documents/au/technical-papers/line-of-sight-obstruction.pdf
func CalculateFresnelKirckoffDiffractionParam(freq Frequency, d1, d2, h Distance) (v float64, err error) {
wavelength := FrequencyToWavelength(freq)
v = float64(h) * math.Sqrt((2*float64(d1+d2))/(float64(wavelength)*float64(d1*d2)))
return v, err
}
// CalculateFresnelKirchoffLossApprox Calculates approximate loss due to diffraction using
// the Fresnel-Kirchoff Diffraction parameter. This approximate is valid for values >= -0.7
// https://s.campbellsci.com/documents/au/technical-papers/line-of-sight-obstruction.pdf
func CalculateFresnelKirchoffLossApprox(v float64) (Attenuation, error) {
if !(v >= -0.7) {
return 0.0, fmt.Errorf("Fresnel-Kirchoff loss approximation only valid for v >= -0.7 (v: %.6f)", v)
}
loss := 6.9 + 20*math.Log10(math.Sqrt(math.Pow(v-0.1, 2)+1)+v-0.1)
return Attenuation(loss), nil
}
const (
WeissbergerMinFreq = 230 * MHz
WeissbergerMaxFreq = 95 * GHz
WeissbergerMinDist = 0 * M
WeissbergerMaxDist = 400 * M
)
// CalculateFoliageLoss calculates path loss in dB due to foliage based on the Weissberger model
// https://en.wikipedia.org/wiki/Weissberger%27s_model
func CalculateFoliageLoss(freq Frequency, depth Distance) (Attenuation, error) {
if freq < WeissbergerMinFreq || freq > WeissbergerMaxFreq {
return 0, fmt.Errorf("Frequency %.2f is not between 230MHz and 95GHz as required by the Weissberger model", freq)
}
if depth < WeissbergerMinDist || depth > WeissbergerMaxDist {
return 0, fmt.Errorf("Depth %.2f is not between 0 and 400m as required by the Weissberger model", depth)
}
fading := 0.0
if depth > 0.0 && depth <= 14.0 {
fading = 0.45 * math.Pow(float64(freq/GHz), 0.284) * float64(depth)
} else if depth > 14.0 && depth <= 400.0 {
fading = 1.33 * math.Pow(float64(freq/GHz), 0.284) * math.Pow(float64(depth), 0.588)
} else {
return 0, fmt.Errorf("Depth %.2f is not between 0 and 400m as required by the Weissberger model", depth)
}
return Attenuation(fading), nil
}
// CalculateRaleighFading calculates Raleigh fading
// https://en.wikipedia.org/wiki/Rayleigh_fading
func CalculateRaleighFading(freq Frequency) (Attenuation, error) {
log.Panicf("Raleigh fading not yet implemented")
return 0.0, nil
}
// CalculateRicanFading calculates Rican fading
// https://en.wikipedia.org/wiki/Rician_fading
func CalculateRicanFading(freq Frequency) (Attenuation, error) {
log.Panicf("Rican fading not yet implemented")
return 0.0, nil
}
// CalculateWeibullFading calculates Weibull fading
// https://en.wikipedia.org/wiki/Weibull_fading
func CalculateWeibullFading(freq Frequency) (Attenuation, error) {
log.Panicf("Weibull fading not yet implemented")
return 0.0, nil
}
// BullingtonFigure12Method implements the Bullington Figure 12 (intersecting horizons) method to approximate
// height and distance for use in the Fresnell-Kirchoff path loss approximation.
// Note that this implementation is not accurate for most negative (below LOS) impingements
// See: https://hams.soe.ucsc.edu/sites/default/files/Bullington%20VTS%201977.pdf
func BullingtonFigure12Method(x, y []float64, d Distance) (d1, d2 Distance, height float64) {
θ1, θ2 := findBullingtonFigure12Angles(x, y, d)
d1, height = solveBullingtonFigureTwelveDist(θ1, θ2, d)
d2 = d - d1
return d1, d2, height
}
func findBullingtonFigure12Angles(x, y []float64, d Distance) (θ1, θ2 float64) {
// Find minimum angles
maxθ1, maxθ2 := -math.Pi/2, -math.Pi/2
for i := 1; i < len(x)-1; i++ {
θ1 := math.Atan2(y[i], x[i])
θ2 := math.Atan2(y[i], float64(d)-x[i])
if θ1 > maxθ1 {
maxθ1 = θ1
}
if θ2 > maxθ2 {
maxθ2 = θ2
}
}
return maxθ1, maxθ2
}
func solveBullingtonFigureTwelveDist(θb, θc float64, d Distance) (dist Distance, height float64) {
θa := math.Pi - θb - θc
r := float64(d) / math.Sin(θa)
C := r * math.Sin(θc)
height = math.Sin(θb) * C
dist = Distance(math.Cos(θb) * C)
return dist, height
}
// FresnelImpingementMax computes the maximum first fresnel zone impingement due to terrain between two points
func FresnelImpingementMax(x, y []float64, d Distance, f Frequency) (maxImpingement float64, point Distance) {
maxImpingement, point = 0.0, d/2
for i := 1; i < len(x)-1; i++ {
d1 := Distance(x[i])
d2 := Distance(d) - d1
// Calculate size of fresnel zone
fresnelZone, err := FresnelPoint(d1, d2, f, 1)
if err != nil {
// Skip invalid points (where wavelength is not << d1 or d2)
continue
}
// Calculate impingement
impingement := 0.0
if y[i] > fresnelZone/2 {
impingement = 1.0
} else if y[i] < -fresnelZone/2 {
impingement = 0.0
} else {
impingement = (y[i] + fresnelZone/2) / fresnelZone
}
// Record max
if impingement > maxImpingement {
maxImpingement = impingement
point = d1
}
}
return maxImpingement, point
} | rf.go | 0.860398 | 0.608027 | rf.go | starcoder |
package render
import (
"github.com/go-gl/gl/v4.1-core/gl"
"github.com/go-gl/mathgl/mgl32"
"github.com/samuelyuan/openbiohazard2/fileio"
"github.com/samuelyuan/openbiohazard2/geometry"
"github.com/samuelyuan/openbiohazard2/world"
)
const (
RENDER_TYPE_DEBUG = -1
)
type DebugEntity struct {
Color [4]float32
VertexBuffer []float32
VertexArrayObject uint32
VertexBufferObject uint32
}
func RenderCameraSwitches(programShader uint32, cameraSwitchDebugEntity *DebugEntity) {
renderTypeUniform := gl.GetUniformLocation(programShader, gl.Str("renderType\x00"))
gl.Uniform1i(renderTypeUniform, RENDER_TYPE_DEBUG)
RenderDebugEntities(programShader, []*DebugEntity{cameraSwitchDebugEntity})
}
func RenderDebugEntities(programShader uint32, debugEntities []*DebugEntity) {
renderTypeUniform := gl.GetUniformLocation(programShader, gl.Str("renderType\x00"))
gl.Uniform1i(renderTypeUniform, RENDER_TYPE_DEBUG)
floatSize := 4
for _, debugEntity := range debugEntities {
entityVertexBuffer := debugEntity.VertexBuffer
if len(entityVertexBuffer) == 0 {
continue
}
// 3 floats for vertex
stride := int32(3 * floatSize)
vao := debugEntity.VertexArrayObject
gl.BindVertexArray(vao)
vbo := debugEntity.VertexBufferObject
gl.BindBuffer(gl.ARRAY_BUFFER, vbo)
gl.BufferData(gl.ARRAY_BUFFER, len(entityVertexBuffer)*floatSize, gl.Ptr(entityVertexBuffer), gl.STATIC_DRAW)
// Position attribute
gl.VertexAttribPointer(0, 3, gl.FLOAT, false, stride, gl.PtrOffset(0))
gl.EnableVertexAttribArray(0)
diffuseUniform := gl.GetUniformLocation(programShader, gl.Str("diffuse\x00"))
gl.Uniform1i(diffuseUniform, 0)
debugColorLoc := gl.GetUniformLocation(programShader, gl.Str("debugColor\x00"))
color := debugEntity.Color
gl.Uniform4f(debugColorLoc, color[0], color[1], color[2], color[3])
// Draw triangles
gl.DrawArrays(gl.TRIANGLES, 0, int32(len(entityVertexBuffer)/3))
// Cleanup
gl.DisableVertexAttribArray(0)
}
}
func BuildAllDebugEntities(gameWorld *world.GameWorld) []*DebugEntity {
debugEntities := make([]*DebugEntity, 0)
debugEntities = append(debugEntities, NewDoorTriggerDebugEntity(gameWorld.AotManager.Doors))
debugEntities = append(debugEntities, NewCollisionDebugEntity(gameWorld.GameRoom.CollisionEntities))
debugEntities = append(debugEntities, NewSlopedSurfacesDebugEntity(gameWorld.GameRoom.CollisionEntities))
debugEntities = append(debugEntities, NewItemTriggerDebugEntity(gameWorld.AotManager.Items))
debugEntities = append(debugEntities, NewAotTriggerDebugEntity(gameWorld.AotManager.AotTriggers))
return debugEntities
}
func NewCollisionDebugEntity(collisionEntities []fileio.CollisionEntity) *DebugEntity {
vertexBuffer := make([]float32, 0)
for _, entity := range collisionEntities {
switch entity.Shape {
case 0:
// Rectangle
vertex1 := mgl32.Vec3{float32(entity.X), 0, float32(entity.Z)}
vertex2 := mgl32.Vec3{float32(entity.X), 0, float32(entity.Z + entity.Density)}
vertex3 := mgl32.Vec3{float32(entity.X + entity.Width), 0, float32(entity.Z + entity.Density)}
vertex4 := mgl32.Vec3{float32(entity.X + entity.Width), 0, float32(entity.Z)}
rect := buildDebugRectangle(vertex1, vertex2, vertex3, vertex4)
vertexBuffer = append(vertexBuffer, rect...)
case 1:
// Triangle \\|
vertex1 := mgl32.Vec3{float32(entity.X), 0, float32(entity.Z + entity.Density)}
vertex2 := mgl32.Vec3{float32(entity.X + entity.Width), 0, float32(entity.Z + entity.Density)}
vertex3 := mgl32.Vec3{float32(entity.X + entity.Width), 0, float32(entity.Z)}
tri := buildDebugTriangle(vertex1, vertex2, vertex3)
vertexBuffer = append(vertexBuffer, tri...)
case 2:
// Triangle |/
vertex1 := mgl32.Vec3{float32(entity.X), 0, float32(entity.Z)}
vertex2 := mgl32.Vec3{float32(entity.X), 0, float32(entity.Z + entity.Density)}
vertex3 := mgl32.Vec3{float32(entity.X + entity.Width), 0, float32(entity.Z + entity.Density)}
tri := buildDebugTriangle(vertex1, vertex2, vertex3)
vertexBuffer = append(vertexBuffer, tri...)
case 3:
// Triangle /|
vertex1 := mgl32.Vec3{float32(entity.X), 0, float32(entity.Z)}
vertex2 := mgl32.Vec3{float32(entity.X + entity.Width), 0, float32(entity.Z + entity.Density)}
vertex3 := mgl32.Vec3{float32(entity.X + entity.Width), 0, float32(entity.Z)}
tri := buildDebugTriangle(vertex1, vertex2, vertex3)
vertexBuffer = append(vertexBuffer, tri...)
case 6:
// Circle
radius := float32(entity.Width) / 2.0
center := mgl32.Vec3{float32(entity.X) + radius, 0, float32(entity.Z) + radius}
circle := geometry.NewCircle(center, radius)
vertexBuffer = append(vertexBuffer, circle.VertexBuffer...)
case 7:
// Ellipse, rectangle with rounded corners on the x-axis
majorAxis := float32(entity.Width) / 2.0
minorAxis := float32(entity.Density) / 2.0
center := mgl32.Vec3{float32(entity.X) + majorAxis, 0, float32(entity.Z) + minorAxis}
ellipse := geometry.NewEllipse(center, majorAxis, minorAxis, true)
vertexBuffer = append(vertexBuffer, ellipse.VertexBuffer...)
case 8:
// Ellipse, rectangle with rounded corners on the z-axis
majorAxis := float32(entity.Density) / 2.0
minorAxis := float32(entity.Width) / 2.0
center := mgl32.Vec3{float32(entity.X) + minorAxis, 0, float32(entity.Z) + majorAxis}
ellipse := geometry.NewEllipse(center, majorAxis, minorAxis, false)
vertexBuffer = append(vertexBuffer, ellipse.VertexBuffer...)
}
}
var vao uint32
gl.GenVertexArrays(1, &vao)
var vbo uint32
gl.GenBuffers(1, &vbo)
return &DebugEntity{
Color: [4]float32{1.0, 0.0, 0.0, 0.3},
VertexBuffer: vertexBuffer,
VertexArrayObject: vao,
VertexBufferObject: vbo,
}
}
func NewCameraSwitchDebugEntity(curCameraId int,
cameraSwitches []fileio.RVDHeader,
cameraSwitchTransitions map[int][]int) *DebugEntity {
vertexBuffer := make([]float32, 0)
for _, regionIndex := range cameraSwitchTransitions[curCameraId] {
cameraSwitch := cameraSwitches[regionIndex]
corners := [4][]float32{
[]float32{float32(cameraSwitch.X1), float32(cameraSwitch.Z1)},
[]float32{float32(cameraSwitch.X2), float32(cameraSwitch.Z2)},
[]float32{float32(cameraSwitch.X3), float32(cameraSwitch.Z3)},
[]float32{float32(cameraSwitch.X4), float32(cameraSwitch.Z4)},
}
rect := geometry.NewQuadFourPoints(corners)
vertexBuffer = append(vertexBuffer, rect.VertexBuffer...)
}
var vao uint32
gl.GenVertexArrays(1, &vao)
var vbo uint32
gl.GenBuffers(1, &vbo)
return &DebugEntity{
Color: [4]float32{0.0, 1.0, 0.0, 0.3},
VertexBuffer: vertexBuffer,
VertexArrayObject: vao,
VertexBufferObject: vbo,
}
}
func NewDoorTriggerDebugEntity(doors []world.AotDoor) *DebugEntity {
vertexBuffer := make([]float32, 0)
for _, aot := range doors {
vertexBuffer = append(vertexBuffer, aot.Bounds.VertexBuffer...)
}
var vao uint32
gl.GenVertexArrays(1, &vao)
var vbo uint32
gl.GenBuffers(1, &vbo)
return &DebugEntity{
Color: [4]float32{0.0, 0.0, 1.0, 0.3},
VertexBuffer: vertexBuffer,
VertexArrayObject: vao,
VertexBufferObject: vbo,
}
}
func NewItemTriggerDebugEntity(items []world.AotItem) *DebugEntity {
vertexBuffer := make([]float32, 0)
for _, aot := range items {
vertexBuffer = append(vertexBuffer, aot.Bounds.VertexBuffer...)
}
var vao uint32
gl.GenVertexArrays(1, &vao)
var vbo uint32
gl.GenBuffers(1, &vbo)
return &DebugEntity{
Color: [4]float32{0.0, 1.0, 1.0, 0.3},
VertexBuffer: vertexBuffer,
VertexArrayObject: vao,
VertexBufferObject: vbo,
}
}
func NewAotTriggerDebugEntity(aotTriggers []world.AotObject) *DebugEntity {
vertexBuffer := make([]float32, 0)
for _, aot := range aotTriggers {
vertexBuffer = append(vertexBuffer, aot.Bounds.VertexBuffer...)
}
var vao uint32
gl.GenVertexArrays(1, &vao)
var vbo uint32
gl.GenBuffers(1, &vbo)
return &DebugEntity{
Color: [4]float32{0.0, 1.0, 1.0, 0.3},
VertexBuffer: vertexBuffer,
VertexArrayObject: vao,
VertexBufferObject: vbo,
}
}
func NewSlopedSurfacesDebugEntity(collisionEntities []fileio.CollisionEntity) *DebugEntity {
vertexBuffer := make([]float32, 0)
for _, entity := range collisionEntities {
switch entity.Shape {
case 11:
// Ramp
rect := geometry.NewSlopedRectangle(entity)
vertexBuffer = append(vertexBuffer, rect.VertexBuffer...)
case 12:
// Stairs
rect := geometry.NewSlopedRectangle(entity)
vertexBuffer = append(vertexBuffer, rect.VertexBuffer...)
}
}
var vao uint32
gl.GenVertexArrays(1, &vao)
var vbo uint32
gl.GenBuffers(1, &vbo)
return &DebugEntity{
Color: [4]float32{1.0, 0.0, 1.0, 0.3},
VertexBuffer: vertexBuffer,
VertexArrayObject: vao,
VertexBufferObject: vbo,
}
}
func buildDebugRectangle(corner1 mgl32.Vec3, corner2 mgl32.Vec3, corner3 mgl32.Vec3, corner4 mgl32.Vec3) []float32 {
quad := geometry.NewQuad([4]mgl32.Vec3{corner1, corner2, corner3, corner4})
return quad.VertexBuffer
}
func buildDebugTriangle(corner1 mgl32.Vec3, corner2 mgl32.Vec3, corner3 mgl32.Vec3) []float32 {
triBuffer := make([]float32, 0)
vertex1 := []float32{corner1.X(), corner1.Y(), corner1.Z()}
vertex2 := []float32{corner2.X(), corner2.Y(), corner2.Z()}
vertex3 := []float32{corner3.X(), corner3.Y(), corner3.Z()}
triBuffer = append(triBuffer, vertex1...)
triBuffer = append(triBuffer, vertex2...)
triBuffer = append(triBuffer, vertex3...)
return triBuffer
} | render/debugentity.go | 0.635109 | 0.411525 | debugentity.go | starcoder |
package graphics2d
import (
"fmt"
"image"
"math"
)
// Shape is a fillable collection of paths. For a path to be fillable,
// it must be closed, so paths added to the shape are forced closed on rendering.
type Shape struct {
paths []*Path
bounds image.Rectangle
mask *image.Alpha
parent *Shape
}
// Bounds calculates the union of the bounds of the paths the shape contains.
func (s *Shape) Bounds() image.Rectangle {
if s.bounds.Empty() && s.paths != nil && len(s.paths) > 0 {
rect := s.paths[0].Bounds()
for i := 1; i < len(s.paths); i++ {
rect = rect.Union(s.paths[i].Bounds())
}
s.bounds = rect
}
return s.bounds
}
// Mask returns an Alpha image, the size of the shape bounds, containing the result
// of rendering the shape, located at {0, 0}.
func (s *Shape) Mask() *image.Alpha {
if s.mask != nil {
return s.mask
}
s.mask = RenderShapeAlpha(s)
return s.mask
}
// Contains returns true if the points are contained within the shape, false otherwise.
func (s *Shape) Contains(pts ...[]float64) bool {
rect := s.Bounds()
mask := s.Mask()
ox, oy := rect.Min.X, rect.Min.Y
mx, my := rect.Max.X, rect.Max.Y
for _, pt := range pts {
x := int(math.Floor(pt[0] + 0.5))
y := int(math.Floor(pt[1] + 0.5))
// Bounding box test
if x < ox || x >= mx || y < oy || y >= my {
return false
}
// Mask test
if mask.AlphaAt(x, y).A < 128 {
return false
}
}
return true
}
// NewShape constructs a shape from the supplied paths.
func NewShape(paths ...*Path) *Shape {
res := &Shape{}
res.AddPaths(paths...)
return res
}
// AddPaths adds paths to the shape and closes them if not already closed.
func (s *Shape) AddPaths(paths ...*Path) {
for _, p := range paths {
lp := p.Copy()
if s.paths == nil {
s.paths = make([]*Path, 1)
s.paths[0] = lp
} else {
s.paths = append(s.paths, lp)
}
}
s.bounds = image.Rectangle{}
s.mask = nil
}
// AddShapes adds the paths from the supplied shapes to this shape.
func (s *Shape) AddShapes(shapes ...*Shape) {
for _, shape := range shapes {
s.AddPaths(shape.Paths()...)
}
}
// Paths returns a shallow copy of the paths contained by this shape.
func (s *Shape) Paths() []*Path {
return s.paths[:]
}
// Copy creates a new instance of this shape with a shallow copy of its paths.
func (s *Shape) Copy() *Shape {
np := make([]*Path, len(s.paths))
copy(np, s.paths)
return &Shape{np, s.bounds, s.mask, s.parent}
}
// Transform applies an affine transform to all the paths in the shape
// and returns a new shape.
func (s *Shape) Transform(xfm *Aff3) *Shape {
np := make([]*Path, len(s.paths))
for i, path := range s.paths {
np[i] = path.Transform(xfm)
}
return &Shape{np, image.Rectangle{}, nil, s}
}
// Process applies a shape processor to the shape and
// returns a collection of new shapes.
func (s *Shape) Process(proc ShapeProcessor) []*Shape {
shapes := proc.Process(s)
// Fix parent
for _, shape := range shapes {
shape.parent = s
}
return shapes
}
// ProcessPaths applies a path processor to the shape and
// returns a new shape containing the processed paths.
func (s *Shape) ProcessPaths(proc PathProcessor) *Shape {
np := make([]*Path, 0)
for _, p := range s.paths {
npaths := p.Process(proc)
for _, pp := range npaths {
np = append(np, pp)
}
}
return &Shape{np, image.Rectangle{}, nil, s}
}
// String converts a shape into a string.
func (s *Shape) String() string {
str := fmt.Sprintf("SH %d ", len(s.paths))
for _, path := range s.paths {
str += path.String() + " "
}
return str
} | shape.go | 0.844505 | 0.547283 | shape.go | starcoder |
package geo
import (
"github.com/golang/geo/s2"
"github.com/paulmach/go.geojson"
)
const (
//EarthRadius the radius of earth in kilometers
EarthRadius = 6371.01
maxCells = 100
)
// Point struct contains the lat/lng of a point
type Point struct {
Lat float64
Lng float64
}
// DecodeGeoJSON decodes a feature collection
func DecodeGeoJSON(json []byte) ([]*geojson.Feature, error) {
f, err := geojson.UnmarshalFeatureCollection(json)
if err != nil {
return nil, err
}
return f.Features, nil
}
// PointsToPolygon converts points to s2 polygon
func PointsToPolygon(points [][]float64) *s2.Polygon {
var pts []s2.Point
for _, pt := range points {
pts = append(pts, s2.PointFromLatLng(s2.LatLngFromDegrees(pt[1], pt[0])))
}
loop := s2.LoopFromPoints(pts)
return s2.PolygonFromLoops([]*s2.Loop{loop})
}
// CoverPolygon converts s2 polygon to cell union and returns the respective cells
func CoverPolygon(p *s2.Polygon, maxLevel, minLevel int) (s2.CellUnion, []string, [][][]float64) {
var tokens []string
var s2cells [][][]float64
rc := &s2.RegionCoverer{MaxLevel: maxLevel, MinLevel: minLevel, MaxCells: maxCells}
r := s2.Region(p)
covering := rc.Covering(r)
for _, c := range covering {
cell := s2.CellFromCellID(s2.CellIDFromToken(c.ToToken()))
s2cells = append(s2cells, edgesOfCell(cell))
tokens = append(tokens, c.ToToken())
}
return covering, tokens, s2cells
}
// CoverPoint converts a point to cell based on given level
func CoverPoint(p Point, maxLevel int) (s2.Cell, string, [][][]float64) {
var s2cells [][][]float64
cid := s2.CellFromLatLng(s2.LatLngFromDegrees(p.Lat, p.Lng)).ID().Parent(maxLevel)
cell := s2.CellFromCellID(cid)
token := cid.ToToken()
s2cells = append(s2cells, edgesOfCell(cell))
return cell, token, s2cells
}
func edgesOfCell(c s2.Cell) [][]float64 {
var edges [][]float64
for i := 0; i < 4; i++ {
latLng := s2.LatLngFromPoint(c.Vertex(i))
edges = append(edges, []float64{latLng.Lat.Degrees(), latLng.Lng.Degrees()})
}
return edges
} | pkg/geo/geo.go | 0.784567 | 0.49939 | geo.go | starcoder |
package mcc
import (
"math"
"sync"
)
const (
maxUpdateQueueLength = math.MaxUint32 / 4
)
type blockUpdate struct {
index, ticks int
}
type blockUpdateQueue struct {
lock sync.Mutex
updates []blockUpdate
}
func (queue *blockUpdateQueue) add(index int, delay int) {
queue.lock.Lock()
defer queue.lock.Unlock()
if len(queue.updates) < maxUpdateQueueLength {
queue.updates = append(queue.updates, blockUpdate{index, delay})
} else {
queue.updates = nil
}
}
func (queue *blockUpdateQueue) tick() (updates []int) {
i := 0
queue.lock.Lock()
for _, update := range queue.updates {
update.ticks--
if update.ticks == 0 {
updates = append(updates, update.index)
} else {
queue.updates[i] = update
i++
}
}
queue.updates = queue.updates[:i]
queue.lock.Unlock()
return
}
// WaterSimulator is an implementation of the Simulator interface that handles
// water and sponge physics.
type WaterSimulator struct {
Level *Level
queue blockUpdateQueue
}
// Update implements Simulator.
func (simulator *WaterSimulator) Update(block, old byte, index int) {
if block == BlockActiveWater || (block == BlockWater && block == old) {
simulator.queue.add(index, 5)
} else {
level := simulator.Level
x, y, z := level.Position(index)
if block == BlockAir && simulator.checkEdge(x, y, z) {
if !simulator.checkSponge(x, y, z) {
level.SetBlock(x, y, z, BlockActiveWater)
}
} else if block != old {
if block == BlockSponge {
simulator.placeSponge(x, y, z)
} else if old == BlockSponge {
simulator.breakSponge(x, y, z)
}
}
}
}
// Tick implements Simulator.
func (simulator *WaterSimulator) Tick() {
level := simulator.Level
for _, index := range simulator.queue.tick() {
block := level.Blocks[index]
if block != BlockActiveWater && block != BlockWater {
return
}
x, y, z := level.Position(index)
if x < level.Width-1 {
simulator.spread(x+1, y, z)
}
if x > 0 {
simulator.spread(x-1, y, z)
}
if z < level.Length-1 {
simulator.spread(x, y, z+1)
}
if z > 0 {
simulator.spread(x, y, z-1)
}
if y > 0 {
simulator.spread(x, y-1, z)
}
}
}
func (simulator *WaterSimulator) checkEdge(x, y, z int) bool {
level := simulator.Level
env := level.EnvConfig
return (env.EdgeBlock == BlockActiveWater || env.EdgeBlock == BlockWater) &&
y >= (env.EdgeHeight+env.SideOffset) && y < env.EdgeHeight &&
(x == 0 || z == 0 || x == level.Width-1 || z == level.Length-1)
}
func (simulator *WaterSimulator) checkSponge(x, y, z int) bool {
level := simulator.Level
for yy := max(y-2, 0); yy <= min(y+2, level.Height-1); yy++ {
for zz := max(z-2, 0); zz <= min(z+2, level.Length-1); zz++ {
for xx := max(x-2, 0); xx <= min(x+2, level.Width-1); xx++ {
if level.GetBlock(xx, yy, zz) == BlockSponge {
return true
}
}
}
}
return false
}
func (simulator *WaterSimulator) spread(x, y, z int) {
level := simulator.Level
switch level.GetBlock(x, y, z) {
case BlockAir:
if !simulator.checkSponge(x, y, z) {
level.SetBlock(x, y, z, BlockActiveWater)
}
case BlockActiveLava, BlockLava:
level.SetBlock(x, y, z, BlockStone)
}
}
func (simulator *WaterSimulator) placeSponge(x, y, z int) {
level := simulator.Level
for yy := max(y-2, 0); yy <= min(y+2, level.Height-1); yy++ {
for zz := max(z-2, 0); zz <= min(z+2, level.Length-1); zz++ {
for xx := max(x-2, 0); xx <= min(x+2, level.Width-1); xx++ {
switch level.GetBlock(xx, yy, zz) {
case BlockActiveWater, BlockWater:
level.SetBlock(xx, yy, zz, BlockAir)
}
}
}
}
}
func (simulator *WaterSimulator) breakSponge(x, y, z int) {
level := simulator.Level
for yy := max(y-3, 0); yy <= min(y+3, level.Height-1); yy++ {
for zz := max(z-3, 0); zz <= min(z+3, level.Length-1); zz++ {
for xx := max(x-3, 0); xx <= min(x+3, level.Width-1); xx++ {
index := level.Index(xx, yy, zz)
block := level.Blocks[index]
simulator.Update(block, block, index)
}
}
}
}
// LavaSimulator is an implementation of the Simulator interface that handles
// lava physics.
type LavaSimulator struct {
Level *Level
queue blockUpdateQueue
}
// Update implements Simulator.
func (simulator *LavaSimulator) Update(block, old byte, index int) {
if block == BlockActiveLava || (block == BlockLava && block == old) {
simulator.queue.add(index, 30)
}
}
// Tick implements Simulator.
func (simulator *LavaSimulator) Tick() {
level := simulator.Level
for _, index := range simulator.queue.tick() {
block := level.Blocks[index]
if block != BlockActiveLava && block != BlockLava {
return
}
x, y, z := level.Position(index)
if x < level.Width-1 {
simulator.spread(x+1, y, z)
}
if x > 0 {
simulator.spread(x-1, y, z)
}
if z < level.Length-1 {
simulator.spread(x, y, z+1)
}
if z > 0 {
simulator.spread(x, y, z-1)
}
if y > 0 {
simulator.spread(x, y-1, z)
}
}
}
func (simulator *LavaSimulator) spread(x, y, z int) {
level := simulator.Level
switch level.GetBlock(x, y, z) {
case BlockAir:
level.SetBlock(x, y, z, BlockActiveLava)
case BlockActiveWater, BlockWater:
level.SetBlock(x, y, z, BlockStone)
}
}
// SandSimulator is an implementation of the Simulator interface that handles
// falling block physics.
type SandSimulator struct {
Level *Level
}
// Update implements Simulator.
func (simulator *SandSimulator) Update(block, old byte, index int) {
if block != BlockSand && block != BlockGravel {
return
}
level := simulator.Level
x, y0, z := level.Position(index)
y1 := y0
for y1 >= 0 && simulator.check(x, y1-1, z) {
y1--
}
if y0 != y1 {
level.SetBlock(x, y0, z, BlockAir)
level.SetBlock(x, y1, z, block)
}
}
// Tick implements Simulator.
func (simulator *SandSimulator) Tick() {}
func (simulator *SandSimulator) check(x, y, z int) bool {
switch simulator.Level.GetBlock(x, y, z) {
case BlockAir, BlockActiveWater, BlockWater,
BlockActiveLava, BlockLava:
return true
default:
return false
}
} | mcc/physics.go | 0.529263 | 0.414069 | physics.go | starcoder |
package proto
import (
"github.com/ysmood/gson"
)
/*
LayerTree
*/
// LayerTreeLayerID Unique Layer identifier.
type LayerTreeLayerID string
// LayerTreeSnapshotID Unique snapshot identifier.
type LayerTreeSnapshotID string
// LayerTreeScrollRectType enum
type LayerTreeScrollRectType string
const (
// LayerTreeScrollRectTypeRepaintsOnScroll enum const
LayerTreeScrollRectTypeRepaintsOnScroll LayerTreeScrollRectType = "RepaintsOnScroll"
// LayerTreeScrollRectTypeTouchEventHandler enum const
LayerTreeScrollRectTypeTouchEventHandler LayerTreeScrollRectType = "TouchEventHandler"
// LayerTreeScrollRectTypeWheelEventHandler enum const
LayerTreeScrollRectTypeWheelEventHandler LayerTreeScrollRectType = "WheelEventHandler"
)
// LayerTreeScrollRect Rectangle where scrolling happens on the main thread.
type LayerTreeScrollRect struct {
// Rect Rectangle itself.
Rect *DOMRect `json:"rect"`
// Type Reason for rectangle to force scrolling on the main thread
Type LayerTreeScrollRectType `json:"type"`
}
// LayerTreeStickyPositionConstraint Sticky position constraints.
type LayerTreeStickyPositionConstraint struct {
// StickyBoxRect Layout rectangle of the sticky element before being shifted
StickyBoxRect *DOMRect `json:"stickyBoxRect"`
// ContainingBlockRect Layout rectangle of the containing block of the sticky element
ContainingBlockRect *DOMRect `json:"containingBlockRect"`
// NearestLayerShiftingStickyBox (optional) The nearest sticky layer that shifts the sticky box
NearestLayerShiftingStickyBox LayerTreeLayerID `json:"nearestLayerShiftingStickyBox,omitempty"`
// NearestLayerShiftingContainingBlock (optional) The nearest sticky layer that shifts the containing block
NearestLayerShiftingContainingBlock LayerTreeLayerID `json:"nearestLayerShiftingContainingBlock,omitempty"`
}
// LayerTreePictureTile Serialized fragment of layer picture along with its offset within the layer.
type LayerTreePictureTile struct {
// X Offset from owning layer left boundary
X float64 `json:"x"`
// Y Offset from owning layer top boundary
Y float64 `json:"y"`
// Picture Base64-encoded snapshot data.
Picture []byte `json:"picture"`
}
// LayerTreeLayer Information about a compositing layer.
type LayerTreeLayer struct {
// LayerID The unique id for this layer.
LayerID LayerTreeLayerID `json:"layerId"`
// ParentLayerID (optional) The id of parent (not present for root).
ParentLayerID LayerTreeLayerID `json:"parentLayerId,omitempty"`
// BackendNodeID (optional) The backend id for the node associated with this layer.
BackendNodeID DOMBackendNodeID `json:"backendNodeId,omitempty"`
// OffsetX Offset from parent layer, X coordinate.
OffsetX float64 `json:"offsetX"`
// OffsetY Offset from parent layer, Y coordinate.
OffsetY float64 `json:"offsetY"`
// Width Layer width.
Width float64 `json:"width"`
// Height Layer height.
Height float64 `json:"height"`
// Transform (optional) Transformation matrix for layer, default is identity matrix
Transform []float64 `json:"transform,omitempty"`
// AnchorX (optional) Transform anchor point X, absent if no transform specified
AnchorX float64 `json:"anchorX,omitempty"`
// AnchorY (optional) Transform anchor point Y, absent if no transform specified
AnchorY float64 `json:"anchorY,omitempty"`
// AnchorZ (optional) Transform anchor point Z, absent if no transform specified
AnchorZ float64 `json:"anchorZ,omitempty"`
// PaintCount Indicates how many time this layer has painted.
PaintCount int `json:"paintCount"`
// DrawsContent Indicates whether this layer hosts any content, rather than being used for
// transform/scrolling purposes only.
DrawsContent bool `json:"drawsContent"`
// Invisible (optional) Set if layer is not visible.
Invisible bool `json:"invisible,omitempty"`
// ScrollRects (optional) Rectangles scrolling on main thread only.
ScrollRects []*LayerTreeScrollRect `json:"scrollRects,omitempty"`
// StickyPositionConstraint (optional) Sticky position constraint information
StickyPositionConstraint *LayerTreeStickyPositionConstraint `json:"stickyPositionConstraint,omitempty"`
}
// LayerTreePaintProfile Array of timings, one per paint step.
type LayerTreePaintProfile []float64
// LayerTreeCompositingReasons Provides the reasons why the given layer was composited.
type LayerTreeCompositingReasons struct {
// LayerID The id of the layer for which we want to get the reasons it was composited.
LayerID LayerTreeLayerID `json:"layerId"`
}
// ProtoReq name
func (m LayerTreeCompositingReasons) ProtoReq() string { return "LayerTree.compositingReasons" }
// Call the request
func (m LayerTreeCompositingReasons) Call(c Client) (*LayerTreeCompositingReasonsResult, error) {
var res LayerTreeCompositingReasonsResult
return &res, call(m.ProtoReq(), m, &res, c)
}
// LayerTreeCompositingReasonsResult Provides the reasons why the given layer was composited.
type LayerTreeCompositingReasonsResult struct {
// CompositingReasons (deprecated) A list of strings specifying reasons for the given layer to become composited.
CompositingReasons []string `json:"compositingReasons"`
// CompositingReasonIds A list of strings specifying reason IDs for the given layer to become composited.
CompositingReasonIds []string `json:"compositingReasonIds"`
}
// LayerTreeDisable Disables compositing tree inspection.
type LayerTreeDisable struct {
}
// ProtoReq name
func (m LayerTreeDisable) ProtoReq() string { return "LayerTree.disable" }
// Call sends the request
func (m LayerTreeDisable) Call(c Client) error {
return call(m.ProtoReq(), m, nil, c)
}
// LayerTreeEnable Enables compositing tree inspection.
type LayerTreeEnable struct {
}
// ProtoReq name
func (m LayerTreeEnable) ProtoReq() string { return "LayerTree.enable" }
// Call sends the request
func (m LayerTreeEnable) Call(c Client) error {
return call(m.ProtoReq(), m, nil, c)
}
// LayerTreeLoadSnapshot Returns the snapshot identifier.
type LayerTreeLoadSnapshot struct {
// Tiles An array of tiles composing the snapshot.
Tiles []*LayerTreePictureTile `json:"tiles"`
}
// ProtoReq name
func (m LayerTreeLoadSnapshot) ProtoReq() string { return "LayerTree.loadSnapshot" }
// Call the request
func (m LayerTreeLoadSnapshot) Call(c Client) (*LayerTreeLoadSnapshotResult, error) {
var res LayerTreeLoadSnapshotResult
return &res, call(m.ProtoReq(), m, &res, c)
}
// LayerTreeLoadSnapshotResult Returns the snapshot identifier.
type LayerTreeLoadSnapshotResult struct {
// SnapshotID The id of the snapshot.
SnapshotID LayerTreeSnapshotID `json:"snapshotId"`
}
// LayerTreeMakeSnapshot Returns the layer snapshot identifier.
type LayerTreeMakeSnapshot struct {
// LayerID The id of the layer.
LayerID LayerTreeLayerID `json:"layerId"`
}
// ProtoReq name
func (m LayerTreeMakeSnapshot) ProtoReq() string { return "LayerTree.makeSnapshot" }
// Call the request
func (m LayerTreeMakeSnapshot) Call(c Client) (*LayerTreeMakeSnapshotResult, error) {
var res LayerTreeMakeSnapshotResult
return &res, call(m.ProtoReq(), m, &res, c)
}
// LayerTreeMakeSnapshotResult Returns the layer snapshot identifier.
type LayerTreeMakeSnapshotResult struct {
// SnapshotID The id of the layer snapshot.
SnapshotID LayerTreeSnapshotID `json:"snapshotId"`
}
// LayerTreeProfileSnapshot ...
type LayerTreeProfileSnapshot struct {
// SnapshotID The id of the layer snapshot.
SnapshotID LayerTreeSnapshotID `json:"snapshotId"`
// MinRepeatCount (optional) The maximum number of times to replay the snapshot (1, if not specified).
MinRepeatCount int `json:"minRepeatCount,omitempty"`
// MinDuration (optional) The minimum duration (in seconds) to replay the snapshot.
MinDuration float64 `json:"minDuration,omitempty"`
// ClipRect (optional) The clip rectangle to apply when replaying the snapshot.
ClipRect *DOMRect `json:"clipRect,omitempty"`
}
// ProtoReq name
func (m LayerTreeProfileSnapshot) ProtoReq() string { return "LayerTree.profileSnapshot" }
// Call the request
func (m LayerTreeProfileSnapshot) Call(c Client) (*LayerTreeProfileSnapshotResult, error) {
var res LayerTreeProfileSnapshotResult
return &res, call(m.ProtoReq(), m, &res, c)
}
// LayerTreeProfileSnapshotResult ...
type LayerTreeProfileSnapshotResult struct {
// Timings The array of paint profiles, one per run.
Timings []LayerTreePaintProfile `json:"timings"`
}
// LayerTreeReleaseSnapshot Releases layer snapshot captured by the back-end.
type LayerTreeReleaseSnapshot struct {
// SnapshotID The id of the layer snapshot.
SnapshotID LayerTreeSnapshotID `json:"snapshotId"`
}
// ProtoReq name
func (m LayerTreeReleaseSnapshot) ProtoReq() string { return "LayerTree.releaseSnapshot" }
// Call sends the request
func (m LayerTreeReleaseSnapshot) Call(c Client) error {
return call(m.ProtoReq(), m, nil, c)
}
// LayerTreeReplaySnapshot Replays the layer snapshot and returns the resulting bitmap.
type LayerTreeReplaySnapshot struct {
// SnapshotID The id of the layer snapshot.
SnapshotID LayerTreeSnapshotID `json:"snapshotId"`
// FromStep (optional) The first step to replay from (replay from the very start if not specified).
FromStep int `json:"fromStep,omitempty"`
// ToStep (optional) The last step to replay to (replay till the end if not specified).
ToStep int `json:"toStep,omitempty"`
// Scale (optional) The scale to apply while replaying (defaults to 1).
Scale float64 `json:"scale,omitempty"`
}
// ProtoReq name
func (m LayerTreeReplaySnapshot) ProtoReq() string { return "LayerTree.replaySnapshot" }
// Call the request
func (m LayerTreeReplaySnapshot) Call(c Client) (*LayerTreeReplaySnapshotResult, error) {
var res LayerTreeReplaySnapshotResult
return &res, call(m.ProtoReq(), m, &res, c)
}
// LayerTreeReplaySnapshotResult Replays the layer snapshot and returns the resulting bitmap.
type LayerTreeReplaySnapshotResult struct {
// DataURL A data: URL for resulting image.
DataURL string `json:"dataURL"`
}
// LayerTreeSnapshotCommandLog Replays the layer snapshot and returns canvas log.
type LayerTreeSnapshotCommandLog struct {
// SnapshotID The id of the layer snapshot.
SnapshotID LayerTreeSnapshotID `json:"snapshotId"`
}
// ProtoReq name
func (m LayerTreeSnapshotCommandLog) ProtoReq() string { return "LayerTree.snapshotCommandLog" }
// Call the request
func (m LayerTreeSnapshotCommandLog) Call(c Client) (*LayerTreeSnapshotCommandLogResult, error) {
var res LayerTreeSnapshotCommandLogResult
return &res, call(m.ProtoReq(), m, &res, c)
}
// LayerTreeSnapshotCommandLogResult Replays the layer snapshot and returns canvas log.
type LayerTreeSnapshotCommandLogResult struct {
// CommandLog The array of canvas function calls.
CommandLog []map[string]gson.JSON `json:"commandLog"`
}
// LayerTreeLayerPainted ...
type LayerTreeLayerPainted struct {
// LayerID The id of the painted layer.
LayerID LayerTreeLayerID `json:"layerId"`
// Clip Clip rectangle.
Clip *DOMRect `json:"clip"`
}
// ProtoEvent name
func (evt LayerTreeLayerPainted) ProtoEvent() string {
return "LayerTree.layerPainted"
}
// LayerTreeLayerTreeDidChange ...
type LayerTreeLayerTreeDidChange struct {
// Layers (optional) Layer tree, absent if not in the comspositing mode.
Layers []*LayerTreeLayer `json:"layers,omitempty"`
}
// ProtoEvent name
func (evt LayerTreeLayerTreeDidChange) ProtoEvent() string {
return "LayerTree.layerTreeDidChange"
} | lib/proto/layer_tree.go | 0.857112 | 0.420659 | layer_tree.go | starcoder |
package taskmaster
import (
"github.com/thompsonlabs/taskmaster/pool"
)
var poolBuilderInstance *PoolBuilder
//PoolBuilder - Builds a new TaskMaster TaskPool
type PoolBuilder struct {
maxWorkerCount int
maxQueueCount int
poolType PoolType
customErrorFunction func(interface{})
maxCachePeriodInMillis int64
minWorkerCount int
}
//NewFixedTaskPool - Creates a new Fixed TaskPool. A FixedTaskPool starts up with a fixed Worker count (as specified at the time the pool is built) and from that point forward maintains Its Worker count at this constant value until such time that the pool is explitly shutdown or the period specified to the Wait() method elapses (which ever occurs first).
func (tmpb *PoolBuilder) NewFixedTaskPool() *PoolBuilder {
tmpb.resetValues()
tmpb.poolType = FIXED
return tmpb
}
//NewCachedTaskPool - Creates a new Cached TaskPool. A CachedTaskPool initially starts up with a Worker pool count of zero and then proceeds to dynamically scale up its Workers to accomadate the submission of new tasks as necessary. Where possible the pool will ALWAYS seek to use existing (cached) Workers to service newly submitted tasks; Workers ONLY remain in the pool for as long as absoutely necessary and are promptly evicted after "maxCachePeriodInMillis" duration specified to the constructor elapses, which may well result in the pool count returning to zero following periods of prolonged inactivity.
func (tmpb *PoolBuilder) NewCachedTaskPool(maxCachePeriodInMillis int64) *PoolBuilder {
tmpb.resetValues()
tmpb.poolType = CACHED
tmpb.maxCachePeriodInMillis = maxCachePeriodInMillis
return tmpb
}
//NewElasticTaskPool - Creates a new Elastic TaskPool. An Elastic TaskPool dynamically scales up AND down to accomadate the submital of tasks for execution. On instantiation a min and max Worker pool value is specified and the pool will then expand and contract to these values respectively in line with the load its required to handle. Idle Workers in the pool over and above the specified minimum will be automatically evicted in due course as part of the pools aforementioned contraction process.
func (tmpb *PoolBuilder) NewElasticTaskPool(maxCachePeriodInMillis int64, minWorkerCount int) *PoolBuilder {
tmpb.resetValues()
tmpb.poolType = ELASTIC
tmpb.maxCachePeriodInMillis = maxCachePeriodInMillis
tmpb.minWorkerCount = minWorkerCount
return tmpb
}
//SetMaxWorkerCount - Set the resulting TaskPool's Max Worker Count; defaults to 10 and each worker occupies its own Go Routine.
func (tmpb *PoolBuilder) SetMaxWorkerCount(maxWorkerCount int) *PoolBuilder {
tmpb.maxWorkerCount = maxWorkerCount
return tmpb
}
//SetMaxQueueCount - Set the resulting TaskPool's Max Queue Count; this is the max number of tasks that may be queued for execution and defaults to 100.
func (tmpb *PoolBuilder) SetMaxQueueCount(maxQueueCount int) *PoolBuilder {
tmpb.maxQueueCount = maxQueueCount
return tmpb
}
//SetCustomErrorFunction - Allows a custom, developer-defined error function to be associated with the pool.When specified a call will be made to this function each time a Pool worker (or more specifically the go routine its associated with) encounter an unrecoverable error (i.e a panic)
func (tmpb *PoolBuilder) SetCustomErrorFunction(errorFunction func(interface{})) *PoolBuilder {
tmpb.customErrorFunction = errorFunction
return tmpb
}
//Build - Builds a new TaskPool using the settings supplied to the builder.
func (tmpb *PoolBuilder) Build() pool.TaskPool {
var aTaskPool pool.TaskPool
if tmpb.poolType == FIXED {
aTaskPool = pool.NewFixedTaskPool()
} else if tmpb.poolType == CACHED {
aTaskPool = pool.NewCachedTaskPool(tmpb.maxCachePeriodInMillis)
} else {
aTaskPool = pool.NewElasticTaskPool(tmpb.maxCachePeriodInMillis, tmpb.minWorkerCount)
}
aTaskPool.SetMaxQueueCount(tmpb.maxQueueCount)
aTaskPool.SetMaxWorkerCount(tmpb.maxWorkerCount)
aTaskPool.SetCustomErrorFunction(tmpb.customErrorFunction)
return aTaskPool
}
func (tmpb *PoolBuilder) resetValues() {
tmpb.maxQueueCount = 100
tmpb.maxWorkerCount = 10
tmpb.maxCachePeriodInMillis = 0
tmpb.minWorkerCount = 0
tmpb.customErrorFunction = nil
}
//Builder - Returns a singular reference to the TaskMasterBuilder.
func Builder() *PoolBuilder {
if poolBuilderInstance == nil {
poolBuilderInstance = new(PoolBuilder)
}
return poolBuilderInstance
}
//PoolType - A group of constants.
type PoolType int
const (
//FIXED -A FIXED POOL
FIXED PoolType = iota
//CACHED - A CACHE POOL
CACHED
//ELASTIC - AN ELASTIC POOL
ELASTIC
)
func (poolType PoolType) String() string {
return [...]string{"FIXED", "CACHED", "ELASTIC"}[poolType]
} | TaskMaster.go | 0.574753 | 0.429489 | TaskMaster.go | starcoder |
package timeutil
import (
"errors"
"fmt"
"math"
"time"
)
type TimeUnit int
const (
zero = 0
one = 1
Second = iota
Minute
Hour
Day
Month
Year
)
func TruncateThirtyMinutes(currentTime time.Time) time.Time {
minutes := currentTime.Minute()
if minutes >= 30 {
minutes = 30
} else {
minutes = 0
}
return time.Date(currentTime.Year(), currentTime.Month(), currentTime.Day(), currentTime.Hour(), minutes, zero, zero, currentTime.Location())
}
func TruncateHour(currentTime time.Time) time.Time {
return time.Date(currentTime.Year(), currentTime.Month(), currentTime.Day(), currentTime.Hour(), zero, zero, zero, currentTime.Location())
}
func TruncateDay(currentTime time.Time) time.Time {
year, month, day := currentTime.Date()
return time.Date(year, month, day, zero, zero, zero, zero, currentTime.Location())
}
func TruncateMonth(currentTime time.Time) time.Time {
year, month, _ := currentTime.Date()
return time.Date(year, month, one, zero, zero, zero, zero, currentTime.Location())
}
func FloorTimeByMinutes(currentTime time.Time, interval int) (time.Time, error) {
switch {
case interval%60 == 0 && interval/60 == 1:
return currentTime.Truncate(time.Hour), nil
case interval%1440 == 0 && interval/1440 == 1:
return TruncateDay(currentTime), nil
case interval%43200 == 0 && interval/43200 == 1:
return TruncateMonth(currentTime), nil
default:
return currentTime, fmt.Errorf("不支持的值: %v", interval)
}
}
func FloorTime(currentTime time.Time, interval int, unit TimeUnit) (time.Time, error) {
if interval <= 0 {
return currentTime, errors.New("value应该大于0")
}
err := errors.New("value值超出unit范围")
year, month, day := currentTime.Date()
location := currentTime.Location()
switch unit {
case Second:
if interval > 59 {
return currentTime, err
}
if 60%interval != 0 {
return currentTime, fmt.Errorf("秒钟不能按%v整分", interval)
}
startSecond := floorInt(float64(currentTime.Second()), float64(interval))
return time.Date(year, month, day, currentTime.Hour(), currentTime.Minute(), startSecond, zero, location), nil
case Minute:
if interval > 59 {
return currentTime, err
}
if 60%interval != 0 {
return currentTime, fmt.Errorf("分钟不能按%v整分", interval)
}
startMinute := floorInt(float64(currentTime.Minute()), float64(interval))
return time.Date(year, month, day, currentTime.Hour(), startMinute, zero, zero, location), nil
case Hour:
if interval > 23 {
return currentTime, err
}
if 24%interval != 0 {
return currentTime, fmt.Errorf("时钟不能按%v整分", interval)
}
startHour := floorInt(float64(currentTime.Hour()), float64(interval))
return time.Date(year, month, day, startHour, zero, zero, zero, location), nil
case Day:
if interval > 15 {
return currentTime, err
}
if 30%interval != 0 {
return currentTime, fmt.Errorf("日期不能按%v整分", interval)
}
startDay := floorInt(float64(day-1), float64(interval))
return time.Date(year, month, startDay+1, zero, zero, zero, zero, location), nil
case Month:
if interval > 6 {
return currentTime, err
}
if 12%interval != 0 {
return currentTime, fmt.Errorf("月份不能按%v整分", interval)
}
startMonth := floorInt(float64(month-1), float64(interval))
return time.Date(year, time.Month(startMonth)+1, one, zero, zero, zero, zero, location), nil
case Year:
if interval > 3 {
return currentTime, err
}
startYear := floorInt(float64(year-1), float64(interval))
return time.Date(startYear, time.Month(one), one, zero, zero, zero, zero, location), nil
default:
return currentTime, errors.New("不支持的时间单位")
}
}
func floorInt(src float64, interval float64) int {
return int(math.Floor(src/interval) * interval)
} | pkg/util/timeutil/truncate.go | 0.659844 | 0.633934 | truncate.go | starcoder |
package mapbox
import (
"bytes"
"context"
"fmt"
"github.com/soider/elevations/internal/geo"
"image"
"image/color"
"image/png"
)
// ElevationDecoder decodes elevation data from pngraw format
type ElevationDecoder struct{}
// Decode decodes elevation data from pngraw format
// Every rawpng file is png 256x256 size
// To avoid projecting real lat\long pair to the actual pixel in the tile
// we take 4 points (middle points of each quadrant of the tile)
// and calculate average tile elevation based on the elevations at those points
// Not production ready
// To be production ready:
// - metrics for cache hit
// - metrics for decode duration
// - cache for tiles elevation
// -
func (m ElevationDecoder) Decode(ctx context.Context, data EncodedElevationData) (geo.RouteElevation, error) {
var result geo.RouteElevation
for tileCoord, rawPng := range data.png {
image, err := png.Decode(bytes.NewBuffer(rawPng))
if err != nil {
return result, fmt.Errorf("broken png file from the mapbox")
}
result = append(result,
geo.Elevation{
Location: tileCoord.From,
Elevation: getAverageElevation(
getColorAtTheMiddleOfTopLeftQuadrant(image),
getColorAtTheMiddleOfTopRightQuadrant(image),
getColorAtTheMiddleOfBottomLeftQuadrant(image),
getColorAtTheMiddleOfBottomRightQuadrant(image),
),
},
)
}
return result, nil
}
func getColorAtTheMiddleOfTopLeftQuadrant(i image.Image) color.Color {
return i.At(64, 64)
}
func getColorAtTheMiddleOfTopRightQuadrant(i image.Image) color.Color {
return i.At(64+128, 64)
}
func getColorAtTheMiddleOfBottomLeftQuadrant(i image.Image) color.Color {
return i.At(64, 64+128)
}
func getColorAtTheMiddleOfBottomRightQuadrant(i image.Image) color.Color {
return i.At(64+128, 64+128)
}
func getAverageElevation(a, b, c, d color.Color) float64 {
return (getElevationAtThePoint(a) + getElevationAtThePoint(a) + getElevationAtThePoint(a) + getElevationAtThePoint(a)) / 4
}
func getElevationAtThePoint(a color.Color) float64 {
r, g, b, _ := a.RGBA()
return -10000 + ((float64(r)*256 + float64(g)*256 + float64(b)) * 0.1)
}
// NewMapboxElevationDecoder constructor
func NewMapboxElevationDecoder() *ElevationDecoder {
return &ElevationDecoder{}
} | internal/mapbox/decoder.go | 0.738952 | 0.485539 | decoder.go | starcoder |
package ring
import (
"github.com/tuneinsight/lattigo/v3/utils"
)
// UnfoldConjugateInvariantToStandard maps the compressed representation (N/2 coefficients)
// of Z_Q[X+X^-1]/(X^2N + 1) to full representation in Z_Q[X]/(X^2N+1).
// Requires degree(polyConjugateInvariant) = 2*degree(polyStd).
// Requires that polyStd and polyConjugateInvariant share the same moduli.
func (r *Ring) UnfoldConjugateInvariantToStandard(level int, polyConjugateInvariant, polyStd *Poly) {
if 2*len(polyConjugateInvariant.Coeffs[0]) != len(polyStd.Coeffs[0]) {
panic("Ring degree of polyConjugateInvariant must be twice the ring degree of polyStd")
}
N := len(polyConjugateInvariant.Coeffs[0])
for i := 0; i < level+1; i++ {
tmp2, tmp1 := polyStd.Coeffs[i], polyConjugateInvariant.Coeffs[i]
copy(tmp2, tmp1)
for idx, jdx := N-1, N; jdx < 2*N; idx, jdx = idx-1, jdx+1 {
tmp2[jdx] = tmp1[idx]
}
}
}
// FoldStandardToConjugateInvariant folds [X]/(X^N+1) to [X+X^-1]/(X^N+1) in compressed form (N/2 coefficients).
// Requires degree(polyConjugateInvariant) = 2*degree(polyStd).
// Requires that polyStd and polyConjugateInvariant share the same moduli.
func (r *Ring) FoldStandardToConjugateInvariant(level int, polyStandard *Poly, permuteNTTIndexInv []uint64, polyConjugateInvariant *Poly) {
if len(polyStandard.Coeffs[0]) != 2*len(polyConjugateInvariant.Coeffs[0]) {
panic("Ring degree of p2 must be 2N and ring degree of p1 must be N")
}
r.PermuteNTTWithIndexLvl(level, polyStandard, permuteNTTIndexInv, polyConjugateInvariant)
for i := 0; i < level+1; i++ {
AddVec(polyConjugateInvariant.Coeffs[i][:r.N], polyStandard.Coeffs[i][:r.N], polyConjugateInvariant.Coeffs[i][:r.N], r.Modulus[i])
}
}
// PadDefaultRingToConjuateInvariant converts a polynomial in Z[X]/(X^N +1) to a polynomial in Z[X+X^-1]/(X^2N+1).
// Conversion will check the .IsNTT flag of the polynomial p1.
func PadDefaultRingToConjuateInvariant(p1 *Poly, ringQ *Ring, p2 *Poly) {
if p1 == p2 {
panic("p1 == p2 but method cannot be used in place")
}
level := utils.MinInt(p1.Level(), p2.Level())
n := len(p1.Coeffs[0])
for i := 0; i < level+1; i++ {
qi := ringQ.Modulus[i]
if len(p2.Coeffs[i]) != 2*len(p1.Coeffs[i]) {
panic("p2 degree must be twice the one of p1")
}
copy(p2.Coeffs[i], p1.Coeffs[i])
tmp := p2.Coeffs[i]
if p1.IsNTT {
for j := 0; j < n; j++ {
tmp[n-j-1] = tmp[j]
}
} else {
tmp[0] = 0
for j := 1; j < n; j++ {
tmp[n-j] = qi - tmp[j]
}
}
}
p2.IsNTT = p1.IsNTT
} | ring/conjugate_invariant.go | 0.560132 | 0.475971 | conjugate_invariant.go | starcoder |
package object
import "github.com/butlermatt/glpc/lexer"
// Expr is an AST expression which returns a value of type Object or an error.
type Expr interface {
Accept(ExprVisitor) (Object, error)
}
// Stmt is an AST statement which returns no value but may produce an error.
type Stmt interface {
Accept(StmtVisitor) error
}
// AssignExpr is a Expr of a Assign
type AssignExpr struct {
Name *lexer.Token
Value Expr
}
// Accept calls the correct visit method on ExprVisitor, passing a reference to itself as a value
func (a *AssignExpr) Accept(visitor ExprVisitor) (Object, error) { return visitor.VisitAssignExpr(a) }
// BinaryExpr is a Expr of a Binary
type BinaryExpr struct {
Left Expr
Operator *lexer.Token
Right Expr
}
// Accept calls the correct visit method on ExprVisitor, passing a reference to itself as a value
func (b *BinaryExpr) Accept(visitor ExprVisitor) (Object, error) { return visitor.VisitBinaryExpr(b) }
// BooleanExpr is a Expr of a Boolean
type BooleanExpr struct {
Token *lexer.Token
Value bool
}
// Accept calls the correct visit method on ExprVisitor, passing a reference to itself as a value
func (b *BooleanExpr) Accept(visitor ExprVisitor) (Object, error) { return visitor.VisitBooleanExpr(b) }
// CallExpr is a Expr of a Call
type CallExpr struct {
Callee Expr
Paren *lexer.Token
Args []Expr
}
// Accept calls the correct visit method on ExprVisitor, passing a reference to itself as a value
func (c *CallExpr) Accept(visitor ExprVisitor) (Object, error) { return visitor.VisitCallExpr(c) }
// GetExpr is a Expr of a Get
type GetExpr struct {
Object Expr
Name *lexer.Token
}
// Accept calls the correct visit method on ExprVisitor, passing a reference to itself as a value
func (g *GetExpr) Accept(visitor ExprVisitor) (Object, error) { return visitor.VisitGetExpr(g) }
// GroupingExpr is a Expr of a Grouping
type GroupingExpr struct {
Expression Expr
}
// Accept calls the correct visit method on ExprVisitor, passing a reference to itself as a value
func (g *GroupingExpr) Accept(visitor ExprVisitor) (Object, error) {
return visitor.VisitGroupingExpr(g)
}
// IndexExpr is a Expr of a Index
type IndexExpr struct {
Left Expr
Operator *lexer.Token
Right Expr
}
// Accept calls the correct visit method on ExprVisitor, passing a reference to itself as a value
func (i *IndexExpr) Accept(visitor ExprVisitor) (Object, error) { return visitor.VisitIndexExpr(i) }
// ListExpr is a Expr of a List
type ListExpr struct {
Values []Expr
}
// Accept calls the correct visit method on ExprVisitor, passing a reference to itself as a value
func (l *ListExpr) Accept(visitor ExprVisitor) (Object, error) { return visitor.VisitListExpr(l) }
// LogicalExpr is a Expr of a Logical
type LogicalExpr struct {
Left Expr
Operator *lexer.Token
Right Expr
}
// Accept calls the correct visit method on ExprVisitor, passing a reference to itself as a value
func (l *LogicalExpr) Accept(visitor ExprVisitor) (Object, error) { return visitor.VisitLogicalExpr(l) }
// NumberExpr is a Expr of a Number
type NumberExpr struct {
Token *lexer.Token
Float float64
Int int
}
// Accept calls the correct visit method on ExprVisitor, passing a reference to itself as a value
func (n *NumberExpr) Accept(visitor ExprVisitor) (Object, error) { return visitor.VisitNumberExpr(n) }
// NullExpr is a Expr of a Null
type NullExpr struct {
Token *lexer.Token
Value interface{}
}
// Accept calls the correct visit method on ExprVisitor, passing a reference to itself as a value
func (n *NullExpr) Accept(visitor ExprVisitor) (Object, error) { return visitor.VisitNullExpr(n) }
// SetExpr is a Expr of a Set
type SetExpr struct {
Object Expr
Name *lexer.Token
Value Expr
IsIndex bool
}
// Accept calls the correct visit method on ExprVisitor, passing a reference to itself as a value
func (s *SetExpr) Accept(visitor ExprVisitor) (Object, error) { return visitor.VisitSetExpr(s) }
// StringExpr is a Expr of a String
type StringExpr struct {
Token *lexer.Token
Value string
}
// Accept calls the correct visit method on ExprVisitor, passing a reference to itself as a value
func (s *StringExpr) Accept(visitor ExprVisitor) (Object, error) { return visitor.VisitStringExpr(s) }
// SuperExpr is a Expr of a Super
type SuperExpr struct {
Keyword *lexer.Token
Method *lexer.Token
}
// Accept calls the correct visit method on ExprVisitor, passing a reference to itself as a value
func (s *SuperExpr) Accept(visitor ExprVisitor) (Object, error) { return visitor.VisitSuperExpr(s) }
// ThisExpr is a Expr of a This
type ThisExpr struct {
Keyword *lexer.Token
}
// Accept calls the correct visit method on ExprVisitor, passing a reference to itself as a value
func (t *ThisExpr) Accept(visitor ExprVisitor) (Object, error) { return visitor.VisitThisExpr(t) }
// UnaryExpr is a Expr of a Unary
type UnaryExpr struct {
Operator *lexer.Token
Right Expr
}
// Accept calls the correct visit method on ExprVisitor, passing a reference to itself as a value
func (u *UnaryExpr) Accept(visitor ExprVisitor) (Object, error) { return visitor.VisitUnaryExpr(u) }
// VariableExpr is a Expr of a Variable
type VariableExpr struct {
Name *lexer.Token
}
// Accept calls the correct visit method on ExprVisitor, passing a reference to itself as a value
func (v *VariableExpr) Accept(visitor ExprVisitor) (Object, error) {
return visitor.VisitVariableExpr(v)
}
// ExprVisitor will visit Expr objects and must receive calls to their applicable methods.
type ExprVisitor interface {
VisitAssignExpr(expr *AssignExpr) (Object, error)
VisitBinaryExpr(expr *BinaryExpr) (Object, error)
VisitBooleanExpr(expr *BooleanExpr) (Object, error)
VisitCallExpr(expr *CallExpr) (Object, error)
VisitGetExpr(expr *GetExpr) (Object, error)
VisitGroupingExpr(expr *GroupingExpr) (Object, error)
VisitIndexExpr(expr *IndexExpr) (Object, error)
VisitListExpr(expr *ListExpr) (Object, error)
VisitLogicalExpr(expr *LogicalExpr) (Object, error)
VisitNumberExpr(expr *NumberExpr) (Object, error)
VisitNullExpr(expr *NullExpr) (Object, error)
VisitSetExpr(expr *SetExpr) (Object, error)
VisitStringExpr(expr *StringExpr) (Object, error)
VisitSuperExpr(expr *SuperExpr) (Object, error)
VisitThisExpr(expr *ThisExpr) (Object, error)
VisitUnaryExpr(expr *UnaryExpr) (Object, error)
VisitVariableExpr(expr *VariableExpr) (Object, error)
}
// BlockStmt is a Stmt of a Block
type BlockStmt struct {
Statements []Stmt
}
// Accept calls the correct visit method on StmtVisitor, passing a reference to itself as a value
func (b *BlockStmt) Accept(visitor StmtVisitor) error { return visitor.VisitBlockStmt(b) }
// BreakStmt is a Stmt of a Break
type BreakStmt struct {
Keyword *lexer.Token
}
// Accept calls the correct visit method on StmtVisitor, passing a reference to itself as a value
func (b *BreakStmt) Accept(visitor StmtVisitor) error { return visitor.VisitBreakStmt(b) }
// ClassStmt is a Stmt of a Class
type ClassStmt struct {
Name *lexer.Token
Super *VariableExpr
Methods []*FunctionStmt
}
// Accept calls the correct visit method on StmtVisitor, passing a reference to itself as a value
func (c *ClassStmt) Accept(visitor StmtVisitor) error { return visitor.VisitClassStmt(c) }
// ContinueStmt is a Stmt of a Continue
type ContinueStmt struct {
Keyword *lexer.Token
}
// Accept calls the correct visit method on StmtVisitor, passing a reference to itself as a value
func (c *ContinueStmt) Accept(visitor StmtVisitor) error { return visitor.VisitContinueStmt(c) }
// ExpressionStmt is a Stmt of a Expression
type ExpressionStmt struct {
Expression Expr
}
// Accept calls the correct visit method on StmtVisitor, passing a reference to itself as a value
func (e *ExpressionStmt) Accept(visitor StmtVisitor) error { return visitor.VisitExpressionStmt(e) }
// FunctionStmt is a Stmt of a Function
type FunctionStmt struct {
Name *lexer.Token
Parameters []*lexer.Token
Body []Stmt
}
// Accept calls the correct visit method on StmtVisitor, passing a reference to itself as a value
func (f *FunctionStmt) Accept(visitor StmtVisitor) error { return visitor.VisitFunctionStmt(f) }
// IfStmt is a Stmt of a If
type IfStmt struct {
Condition Expr
Then Stmt
Else Stmt
}
// Accept calls the correct visit method on StmtVisitor, passing a reference to itself as a value
func (i *IfStmt) Accept(visitor StmtVisitor) error { return visitor.VisitIfStmt(i) }
// ImportStmt is a Stmt of a Import
type ImportStmt struct {
Keyword *lexer.Token
Other Expr
}
// Accept calls the correct visit method on StmtVisitor, passing a reference to itself as a value
func (i *ImportStmt) Accept(visitor StmtVisitor) error { return visitor.VisitImportStmt(i) }
// ForStmt is a Stmt of a For
type ForStmt struct {
Keyword *lexer.Token
Initializer Stmt
Condition Expr
Body Stmt
Increment Expr
}
// Accept calls the correct visit method on StmtVisitor, passing a reference to itself as a value
func (f *ForStmt) Accept(visitor StmtVisitor) error { return visitor.VisitForStmt(f) }
// ReturnStmt is a Stmt of a Return
type ReturnStmt struct {
Keyword *lexer.Token
Value Expr
}
// Accept calls the correct visit method on StmtVisitor, passing a reference to itself as a value
func (r *ReturnStmt) Accept(visitor StmtVisitor) error { return visitor.VisitReturnStmt(r) }
// VarStmt is a Stmt of a Var
type VarStmt struct {
Name *lexer.Token
Value Expr
}
// Accept calls the correct visit method on StmtVisitor, passing a reference to itself as a value
func (v *VarStmt) Accept(visitor StmtVisitor) error { return visitor.VisitVarStmt(v) }
// StmtVisitor will visit Stmt objects and must receive calls to their applicable methods.
type StmtVisitor interface {
VisitBlockStmt(stmt *BlockStmt) error
VisitBreakStmt(stmt *BreakStmt) error
VisitClassStmt(stmt *ClassStmt) error
VisitContinueStmt(stmt *ContinueStmt) error
VisitExpressionStmt(stmt *ExpressionStmt) error
VisitFunctionStmt(stmt *FunctionStmt) error
VisitIfStmt(stmt *IfStmt) error
VisitImportStmt(stmt *ImportStmt) error
VisitForStmt(stmt *ForStmt) error
VisitReturnStmt(stmt *ReturnStmt) error
VisitVarStmt(stmt *VarStmt) error
} | object/ast.go | 0.743727 | 0.561816 | ast.go | starcoder |
package prnm
import (
"math/big"
"github.com/pkg/errors"
)
// BigInt wraps a golang math/big.Int.
// All functions on BigInt have their equivalent in the documentation below.
// See https://golang.org/pkg/math/big/#Int
// If we do this as embedding, the functions are skipped because of the wrong return type.
type BigInt struct {
i *big.Int
}
// NewBigIntFromBytes creates a BigInt from a byte slice.
func NewBigIntFromBytes(data []byte) *BigInt {
return &BigInt{new(big.Int).SetBytes(data)}
}
// NewBigIntFromInt64 creates a BigInt from an int64.
func NewBigIntFromInt64(v int64) *BigInt {
return &BigInt{new(big.Int).SetInt64(v)}
}
// NewBigIntFromString creates a BigInt by parsing a string.
// A prefix of "0b" or "0B" selects base 2, "0", "0o" or "0O" selects base 8,
// and "0x" or "0X" selects base 16. Otherwise, the selected base is 10 and no prefix is accepted.
// Read documentation of https://pkg.go.dev/math/big?tab=doc#Int.SetString for more details.
func NewBigIntFromString(data string) (*BigInt, error) {
b, success := new(big.Int).SetString(data, 0)
if !success {
return nil, errors.New("invalid number string")
}
return &BigInt{b}, nil
}
// NewBigIntFromStringBase creates a BigInt by parsing a string containing a number of given base.
func NewBigIntFromStringBase(data string, base int) (*BigInt, error) {
b, success := new(big.Int).SetString(data, base)
if !success {
return nil, errors.New("invalid number string")
}
return &BigInt{b}, nil
}
// Add returns the result of the receiver + x. Does not change the reveiver.
func (b *BigInt) Add(x *BigInt) *BigInt {
return &BigInt{new(big.Int).Add(b.i, x.i)}
}
// Sub returns the result of the receiver - x. Does not change the reveiver.
func (b *BigInt) Sub(x *BigInt) *BigInt {
return &BigInt{new(big.Int).Sub(b.i, x.i)}
}
// IsWithin returns whether the receiver deviates at most `delta` from `x`.
func (b *BigInt) IsWithin(x *BigInt, delta *BigInt) bool {
return new(big.Int).Sub(b.i, x.i).CmpAbs(delta.i) <= 0
}
// ToInt64 wraps math/big.Int.Int64
func (b *BigInt) ToInt64() int64 {
return b.i.Int64()
}
// Cmp wraps math/big.Int.Cmp
func (b *BigInt) Cmp(x *BigInt) int {
return b.i.Cmp(x.i)
}
// String wraps math/big.Int.String
func (b *BigInt) String() string {
return b.i.String()
}
// StringBase wraps math/big.Int.Text
func (b *BigInt) StringBase(base int) string {
return b.i.Text(base)
}
// ToBytesArray wraps math/big.Int.Bytes
func (b *BigInt) ToBytesArray() []byte {
return b.i.Bytes()
}
// BigInt can not be called from Java, only here to improve reusability.
func (b *BigInt) BigInt() *big.Int {
return b.i
}
// BigInts is a slice of BigInt's
type BigInts struct {
values []*big.Int
}
// NewBigInts creates a new BitInts with the given length.
func NewBigInts(length int) *BigInts {
return &BigInts{values: make([]*big.Int, length)}
}
// NewBalances creates a new BigInts of length two with the given values.
func NewBalances(first, second *BigInt) *BigInts {
return &BigInts{values: []*big.Int{first.i, second.i}}
}
// Length returns the length of the BigInts slice.
func (bs *BigInts) Length() int {
return len(bs.values)
}
// Get returns the element at the given index.
func (bs *BigInts) Get(index int) (*BigInt, error) {
if index < 0 || index >= len(bs.values) {
return nil, errors.New("get: index out of range")
}
return &BigInt{bs.values[index]}, nil
}
// Set sets the element at the given index.
func (bs *BigInts) Set(index int, value *BigInt) error {
if index < 0 || index >= len(bs.values) {
return errors.New("set: index out of range")
}
bs.values[index] = value.i
return nil
}
// Data can not be called from Java, only here to improve reusability.
func (bs *BigInts) Data() []*big.Int {
return bs.values
} | big.go | 0.799599 | 0.664323 | big.go | starcoder |
package main
import (
"log"
"math"
"github.com/unixpickle/model3d/render3d"
"github.com/unixpickle/model3d/toolbox3d"
"github.com/unixpickle/model3d/model3d"
)
const (
BrickZSpace = 0.4
BrickThetaSpace = 0.4
BrickDivot = 0.03
TopBlockCount = 10
TopBlockThickness = 0.2
WallHeight = 3.0
WallThickness = 0.4
WallBrickXSpace = 0.6
)
func main() {
tower := model3d.StackedSolid{
model3d.JoinedSolid{
&BrickCylinder{Height: 3, Radius: 1},
&toolbox3d.Ramp{
P1: model3d.Z(2),
P2: model3d.Z(3.2),
Solid: &model3d.Cylinder{
P1: model3d.Z(2),
P2: model3d.Z(3.2),
Radius: 1.2,
},
},
},
&BrickCylinder{Height: 1.2, Radius: 1.2},
&TopBlocks{Height: 0.3, Radius: 1.2},
}
solid := model3d.JoinedSolid{
&model3d.Rect{
MinVal: model3d.XYZ(-3.7, -1.2, -0.2),
MaxVal: model3d.XYZ(3.7, 1.2, 0),
},
&XBlock{X: -2.5, Solid: tower},
&XBlock{X: 2.5, Solid: tower},
&Wall{X1: -2, X2: 2},
}
log.Println("Creating mesh...")
mesh := model3d.MarchingCubesSearch(solid, 0.02, 8)
log.Println("Saving STL...")
mesh.SaveGroupedSTL("castle.stl")
log.Println("Saving rendering...")
render3d.SaveRendering("rendering.png", mesh, model3d.Coord3D{Y: 7, Z: 5}, 400, 400, nil)
}
type XBlock struct {
Solid model3d.Solid
X float64
}
func (x *XBlock) Min() model3d.Coord3D {
res := x.Solid.Min()
res.X += x.X
return res
}
func (x *XBlock) Max() model3d.Coord3D {
res := x.Solid.Max()
res.X += x.X
return res
}
func (x *XBlock) Contains(c model3d.Coord3D) bool {
return x.Solid.Contains(c.Sub(model3d.X(x.X)))
}
type BrickCylinder struct {
Height float64
Radius float64
}
func (b *BrickCylinder) Min() model3d.Coord3D {
return model3d.Coord3D{X: -b.Radius, Y: -b.Radius}
}
func (b *BrickCylinder) Max() model3d.Coord3D {
return model3d.XYZ(b.Radius, b.Radius, b.Height)
}
func (b *BrickCylinder) Contains(c model3d.Coord3D) bool {
if !model3d.InBounds(b, c) {
return false
}
effectiveRadius := b.Radius
thetaDist := math.Atan2(c.Y, c.X) + math.Pi
dist1 := math.Mod(c.Z, BrickZSpace) - BrickZSpace/2
dist2 := b.Radius * (math.Mod(thetaDist, BrickThetaSpace) - BrickThetaSpace/2)
for _, dist := range []float64{dist1, dist2} {
effectiveRadius = math.Min(effectiveRadius, b.Radius-BrickDivot+math.Abs(dist))
}
return c.XY().Norm() < effectiveRadius
}
type TopBlocks struct {
Height float64
Radius float64
}
func (t *TopBlocks) Min() model3d.Coord3D {
return model3d.Coord3D{X: -t.Radius, Y: -t.Radius}
}
func (t *TopBlocks) Max() model3d.Coord3D {
return model3d.XYZ(t.Radius, t.Radius, t.Height)
}
func (t *TopBlocks) Contains(c model3d.Coord3D) bool {
if !model3d.InBounds(t, c) {
return false
}
r := c.XY().Norm()
if r > t.Radius || r < t.Radius-TopBlockThickness {
return false
}
thetaDist := math.Atan2(c.Y, c.X) + math.Pi
spaceTheta := math.Pi * 2 / TopBlockCount
return math.Mod(thetaDist, spaceTheta) < spaceTheta/2
}
type Wall struct {
X1 float64
X2 float64
}
func (w *Wall) Min() model3d.Coord3D {
return model3d.Coord3D{X: w.X1, Y: -WallThickness / 2}
}
func (w *Wall) Max() model3d.Coord3D {
return model3d.XYZ(w.X2, WallThickness/2, WallHeight)
}
func (w *Wall) Contains(c model3d.Coord3D) bool {
if !model3d.InBounds(w, c) {
return false
}
dist1 := math.Mod(c.X-w.X1, WallBrickXSpace) - WallBrickXSpace/2
dist2 := math.Mod(c.Z, BrickZSpace) - BrickZSpace/2
thickness := WallThickness / 2
for _, dist := range []float64{dist1, dist2} {
thickness = math.Min(thickness, thickness-BrickDivot+math.Abs(dist))
}
return math.Abs(c.Y) < thickness
} | examples/_deprecated/castle_tower/main.go | 0.626696 | 0.457258 | main.go | starcoder |
package darksky
// DataPoint contains weather data for a specific location and time.
type DataPoint struct {
Time *float64 `json:"time"`
Summary *string `json:"summary"`
Icon Icon `json:"icon"`
SunriseTime *float64 `json:"sunriseTime"`
SunsetTime *float64 `json:"sunsetTime"`
MoonPhase *float64 `json:"moonPhase"`
MoonPhaseError *float64 `json:"moonPhaseErorr"`
NearestStormDistance *float64 `json:"nearestStormDistance"`
NearestStormDistanceError *float64 `json:"NearestStormDistanceError"`
NearestStormBearing *float64 `json:"nearestStormBearing"`
NearestStormBearingError *float64 `json:"nearestStormBearingError"`
PrecipIntensity *float64 `json:"precipIntensity"`
PrecipIntensityError *float64 `json:"precipIntensityError"`
PrecipIntensityMax *float64 `json:"precipIntensityMax"`
PrecipIntensityMaxError *float64 `json:"precipIntensityMaxError"`
PrecipIntensityMaxTime *float64 `json:"precipIntensityMaxTime"`
PrecipProbability *float64 `json:"precipProbability"`
PrecipProbabilityError *float64 `json:"precipProbabilityError"`
PrecipType Precipitation `json:"precipType"`
PrecipAccumulation *float64 `json:"precipAccumulation"`
PrecipAccumulationError *float64 `json:"precipAccumulationError"`
Temperature *float64 `json:"temperature"`
TemperatureError *float64 `json:"temperatureError"`
TemperatureMin *float64 `json:"temperatureMin"`
TemperatureMinError *float64 `json:"temperatureMinError"`
TemperatureMinTime *float64 `json:"temperatureMinTime"`
TemperatureMax *float64 `json:"temperatureMax"`
TemperatureMaxError *float64 `json:"temperatureMaxError"`
TemperatureMaxTime *float64 `json:"temperatureMaxTime"`
ApparentTemperature *float64 `json:"apparentTemperature"`
ApparentTemperatureError *float64 `json:"apparentTemperatureError"`
ApparentTemperatureMin *float64 `json:"apparentTemperatureMin"`
ApparentTemperatureMinError *float64 `json:"apparentTemperatureMinError"`
ApparentTemperatureMinTime *float64 `json:"apparentTemperatureMinTime"`
ApparentTemperatureMax *float64 `json:"apparentTemperatureMax"`
ApparentTemperatureMaxError *float64 `json:"apparentTemperatureMaxError"`
ApparentTemperatureMaxTime *float64 `json:"apparentTemperatureMaxTime"`
DewPoint *float64 `json:"dewPoint"`
DewPointError *float64 `json:"dewPointError"`
WindSpeed *float64 `json:"windSpeed"`
WindSpeedError *float64 `json:"windSpeedError"`
WindBearing *float64 `json:"windBearing"`
WindBearingError *float64 `json:"windBearingError"`
CloudCover *float64 `json:"cloudCover"`
CloudCoverError *float64 `json:"cloudCoverError"`
Humidity *float64 `json:"humidity"`
HumidityError *float64 `json:"humidityError"`
Pressure *float64 `json:"pressure"`
PressureError *float64 `json:"pressureError"`
Visibility *float64 `json:"visibility"`
VisibilityError *float64 `json:"visibilityError"`
Ozone *float64 `json:"ozone"`
OzoneError *float64 `json:"ozoneError"`
} | datapoint.go | 0.716913 | 0.42316 | datapoint.go | starcoder |
package engine
// Copy returns a copy of a Phrase
func (p *Phrase) Copy() *Phrase {
p.RLock()
defer p.RUnlock()
r := NewPhrase()
for n := p.firstnote; n != nil; n = n.next {
nn := n.Copy()
r.InsertNote(nn)
}
r.Length = p.Length
return r
}
// CopyAndAppend makes a copy of a Note and appends it to the Phrase
func (p *Phrase) CopyAndAppend(n *Note) *Note {
newn := n.Copy()
if p.firstnote == nil {
p.firstnote = newn
} else {
p.lastnote.next = newn
}
p.lastnote = newn
return newn
}
// CutTime creates a new Phrase with notes in a given time range
func (p *Phrase) CutTime(fromclick, toclick Clicks) *Phrase {
p.RLock()
defer p.RUnlock()
newp := NewPhrase()
for n := p.firstnote; n != nil; n = n.next {
if n.Clicks >= fromclick && n.Clicks < toclick {
newp.CopyAndAppend(n)
}
}
newp.ResetLengthNoLock()
return newp
}
// CutSound creates a new Phrase with notes for a given sound
func (p *Phrase) CutSound(sound string) *Phrase {
p.RLock()
defer p.RUnlock()
newp := NewPhrase()
for n := p.firstnote; n != nil; n = n.next {
if n.Sound == sound {
newp.CopyAndAppend(n)
}
}
newp.ResetLengthNoLock()
return newp
}
// AdjustTimes returns a new Phrase shifted by shift Clicks
func (p *Phrase) AdjustTimes(shift Clicks) *Phrase {
p.RLock()
defer p.RUnlock()
ret := NewPhrase()
for n := p.firstnote; n != nil; n = n.next {
newn := ret.CopyAndAppend(n)
newn.Clicks += shift
}
ret.ResetLengthNoLock()
return ret
}
// Merge merges a second Phrase into a Phrase
// NOTE: we get a Write lock on the Phrase,
// since we're actually changing it.
func (p *Phrase) Merge(fromPhrase *Phrase) *Phrase {
p.Lock() // write lock, we're changing p
defer p.Unlock()
for nt := fromPhrase.firstnote; nt != nil; nt = nt.next {
nn := nt.Copy()
p.InsertNote(nn)
}
p.ResetLengthNoLock()
return p
}
// Arpeggio returns an arpeggiated version of the phrase.
// One way of describing is that all the notes have been
// separated and then put back together, back-to-back.
func (p *Phrase) Arpeggio() *Phrase {
p.RLock()
defer p.RUnlock()
lastend := Clicks(0)
r := NewPhrase()
for nt := p.firstnote; nt != nil; nt = nt.next {
nn := nt.Copy()
nn.Clicks = lastend
r.InsertNote(nn)
d := nt.Duration
if d == 0 {
d = 1
}
lastend += d
}
r.Length = lastend
return r
}
// Step returns a stepped version of the Phrase.
func (p *Phrase) Step(stepsize Clicks) *Phrase {
p.RLock()
defer p.RUnlock()
first := true
lasttime := Clicks(0)
steptime := Clicks(0)
r := NewPhrase()
for nt := p.firstnote; nt != nil; nt = nt.next {
// Notes that are at the same time (like chords)
// are still at the same time.
if !first && nt.Clicks != lasttime {
steptime += stepsize
lasttime = nt.Clicks
}
first = false
newnt := nt.Copy()
newnt.Clicks = steptime
newnt.Duration = stepsize
r.InsertNote(newnt)
}
r.Length = steptime + stepsize
return (r)
}
// Transpose returns a Phrase whose pitch is transposed.
func (p *Phrase) Transpose(delta int) *Phrase {
p.RLock()
defer p.RUnlock()
r := NewPhrase()
for nt := p.firstnote; nt != nil; nt = nt.next {
newnt := r.CopyAndAppend(nt)
newnt.Pitch = uint8(int(newnt.Pitch) + delta)
}
return r
}
// LowestPitch returns the lowest pitch in a Phrase
func (p *Phrase) LowestPitch(delta int) uint8 {
p.RLock()
defer p.RUnlock()
lowest := uint8(127)
for nt := p.firstnote; nt != nil; nt = nt.next {
if nt.Pitch < lowest {
lowest = nt.Pitch
}
}
return lowest
}
// Legato extends the duration of each note to abutt the start of the next note.
// Doesn't modify the duration of the last note.
func (p *Phrase) Legato() *Phrase {
r := p.Copy()
for nt := r.firstnote; nt != nil; nt = nt.next {
if nt.IsNote() {
nextt := r.NextTime(nt.Clicks)
// notes at the end of the phrase aren't touched
if nextt >= 0 {
nt.Duration = nextt - nt.Clicks
}
}
}
return r
}
// AtTime returns those notes in the specified phrase that are
//sounding at the specified time. If a note ends exactly
//at the specified time, it is not included.
func (p *Phrase) AtTime(tm Clicks) *Phrase {
p.RLock()
defer p.RUnlock()
newp := NewPhrase()
for n := p.firstnote; n != nil; n = n.next {
if n.Clicks <= tm && n.EndOf() > tm {
// Assumes Phrase is already sorted, so always append to end of new phrase
newp.Append(n.Copy())
}
}
newp.ResetLengthNoLock()
return newp
}
// NextTime returns the time of the next note AFTER time st.
// If there are no notes after it, returns -1.
func (p *Phrase) NextTime(st Clicks) Clicks {
p.RLock()
defer p.RUnlock()
nexttime := Clicks(-1)
for nt := p.firstnote; nt != nil; nt = nt.next {
if nt.Clicks > st {
nexttime = nt.Clicks
break
}
}
return nexttime
}
/*
// Scadjust returns a Phrase where notes have been adjusted
// to be on a particular Scale
func (p *Phhrase) Scadjust(mel,scale) {
r := NewPhrase()
scarr = []
for ( nt in scale )
scarr[canonic(nt)] = 1
for ( nt in mel ) {
if ( nt.type & (NOTE|NOTEOFF|NOTEON) ) {
inc = sign = 1
cnt = 0
# Don't do computation with nt.pitch directly,
# because negative pitches are invalid
# and get adjusted automatically
ptch = nt.pitch
while ( ! (canonic(ptch) in scarr) && cnt++ < 100 ) {
ptch += (sign*inc)
inc = inc + 1
sign = -sign
}
nt.pitch = ptch
if ( cnt >= 100 ) {
print("Something's amiss in scadjust, for nt=",nt)
continue
}
}
r |= nt
}
return(r)
}
*/ | engine/phraseop.go | 0.750004 | 0.416856 | phraseop.go | starcoder |
package util
import (
"fmt"
"reflect"
"testing"
"time"
"unsafe"
"github.com/stretchr/testify/assert"
)
// AssertDeepCopyEqual checks to see if two variables have the same values but DO NOT share any memory
// There is currently a special case for `time.loc` (as this code traverses into unexported fields)
func AssertDeepCopyEqual(t *testing.T, a interface{}, b interface{}) {
v1 := reflect.ValueOf(a)
v2 := reflect.ValueOf(b)
if !assert.Equal(t, v1.Type(), v2.Type()) {
return
}
traverseDeepCopy(t, v1, v2, v1.Type().String())
}
func traverseDeepCopy(t *testing.T, v1 reflect.Value, v2 reflect.Value, name string) bool {
switch v1.Kind() {
case reflect.Array:
for i := 0; i < v1.Len(); i++ {
if !traverseDeepCopy(t, v1.Index(i), v2.Index(i), fmt.Sprintf("%s[%v]", name, i)) {
return false
}
}
return true
case reflect.Slice:
if v1.IsNil() || v2.IsNil() {
return assert.Equal(t, v1.IsNil(), v2.IsNil(), "%s are not both nil %+v, %+v", name, v1, v2)
}
if !assert.Equal(t, v1.Len(), v2.Len(), "%s did not have the same length", name) {
return false
}
// A slice with cap 0
if v1.Cap() != 0 && !assert.NotEqual(t, v1.Pointer(), v2.Pointer(), "%s point to the same slice %v == %v", name, v1.Pointer(), v2.Pointer()) {
return false
}
v1c := v1.Cap()
v2c := v2.Cap()
if v1c > 0 && v2c > 0 && v1.Slice(0, v1c).Slice(v1c-1, v1c-1).Pointer() == v2.Slice(0, v2c).Slice(v2c-1, v2c-1).Pointer() {
return assert.Fail(t, "", "%s share some underlying memory", name)
}
for i := 0; i < v1.Len(); i++ {
if !traverseDeepCopy(t, v1.Index(i), v2.Index(i), fmt.Sprintf("%s[%v]", name, i)) {
return false
}
}
return true
case reflect.Interface:
if v1.IsNil() || v2.IsNil() {
return assert.Equal(t, v1.IsNil(), v2.IsNil(), "%s are not both nil", name)
}
return traverseDeepCopy(t, v1.Elem(), v2.Elem(), name)
case reflect.Ptr:
local := reflect.ValueOf(time.Local).Pointer()
if local == v1.Pointer() && local == v2.Pointer() {
return true
}
if !assert.NotEqual(t, v1.Pointer(), v2.Pointer(), "%s points to the same memory", name) {
return false
}
return traverseDeepCopy(t, v1.Elem(), v2.Elem(), name)
case reflect.Struct:
for i, n := 0, v1.NumField(); i < n; i++ {
if !traverseDeepCopy(t, v1.Field(i), v2.Field(i), name+"."+v1.Type().Field(i).Name) {
return false
}
}
return true
case reflect.Map:
if v1.IsNil() || v2.IsNil() {
return assert.Equal(t, v1.IsNil(), v2.IsNil(), "%s are not both nil", name)
}
if !assert.Equal(t, v1.Len(), v2.Len(), "%s are not the same length", name) {
return false
}
if !assert.NotEqual(t, v1.Pointer(), v2.Pointer(), "%s point to the same memory", name) {
return false
}
for _, k := range v1.MapKeys() {
val1 := v1.MapIndex(k)
val2 := v2.MapIndex(k)
if !assert.True(t, val1.IsValid(), "%s is an invalid key in %s", k, name) {
return false
}
if !assert.True(t, val2.IsValid(), "%s is an invalid key in %s", k, name) {
return false
}
if !traverseDeepCopy(t, val1, val2, name+fmt.Sprintf("%s[%s]", name, k)) {
return false
}
}
return true
default:
if v1.CanInterface() && v2.CanInterface() {
return assert.Equal(t, v1.Interface(), v2.Interface(), "%s was not equal", name)
}
e1 := reflect.NewAt(v1.Type(), unsafe.Pointer(v1.UnsafeAddr())).Elem().Interface()
e2 := reflect.NewAt(v2.Type(), unsafe.Pointer(v2.UnsafeAddr())).Elem().Interface()
return assert.Equal(t, e1, e2, "%s (unexported) was not equal", name)
}
} | util/assert.go | 0.69035 | 0.5564 | assert.go | starcoder |
package sc
//https://github.com/neo-project/neo-vm/blob/master/src/neo-vm/OpCode.cs
type OpCode byte
const (
// Constants
PUSHINT8 OpCode = 0x00 // Operand Size = 1. Pushes a 1-byte signed integer onto the stack.
PUSHINT16 OpCode = 0x01 // Operand Size = 2. Pushes a 2-bytes signed integer onto the stack.
PUSHINT32 OpCode = 0x02 // Operand Size = 4. Pushes a 4-bytes signed integer onto the stack.
PUSHINT64 OpCode = 0x03 // Operand Size = 8. Pushes a 8-bytes signed integer onto the stack.
PUSHINT128 OpCode = 0x04 // Operand Size = 16. Pushes a 16-bytes signed integer onto the stack.
PUSHINT256 OpCode = 0x05 // Operand Size = 32. Pushes a 32-bytes signed integer onto the stack.
PUSHA OpCode = 0x0A // Converts the 4-bytes offset to a "Pointer", and pushes it onto the stack.
PUSHNULL OpCode = 0x0B // "null" is pushed onto the stack.
PUSHDATA1 OpCode = 0x0C // Operand SizePrefix = 1. The next byte contains the number of bytes to be pushed onto the stack.
PUSHDATA2 OpCode = 0x0D // Operand SizePrefix = 2. The next two bytes contains the number of bytes to be pushed onto the stack.
PUSHDATA4 OpCode = 0x0E // Operand SizePrefix = 4. The next four bytes contains the number of bytes to be pushed onto the stack.
PUSHM1 OpCode = 0x0F // The number -1 is pushed onto the stack.
PUSH0 OpCode = 0x10 // The number 0 is pushed onto the stack.
PUSH1 OpCode = 0x11 // The number 1 is pushed onto the stack.
PUSH2 OpCode = 0x12 // The number 2 is pushed onto the stack.
PUSH3 OpCode = 0x13 // The number 3 is pushed onto the stack.
PUSH4 OpCode = 0x14 // The number 4 is pushed onto the stack.
PUSH5 OpCode = 0x15 // The number 5 is pushed onto the stack.
PUSH6 OpCode = 0x16 // The number 6 is pushed onto the stack.
PUSH7 OpCode = 0x17 // The number 7 is pushed onto the stack.
PUSH8 OpCode = 0x18 // The number 8 is pushed onto the stack.
PUSH9 OpCode = 0x19 // The number 9 is pushed onto the stack.
PUSH10 OpCode = 0x1A // The number 10 is pushed onto the stack.
PUSH11 OpCode = 0x1B // The number 11 is pushed onto the stack.
PUSH12 OpCode = 0x1C // The number 12 is pushed onto the stack.
PUSH13 OpCode = 0x1D // The number 13 is pushed onto the stack.
PUSH14 OpCode = 0x1E // The number 14 is pushed onto the stack.
PUSH15 OpCode = 0x1F // The number 15 is pushed onto the stack.
PUSH16 OpCode = 0x20 // The number 16 is pushed onto the stack.
// Flow control
NOP OpCode = 0x21 // The "NOP" operation does nothing. It is intended to fill in space if opcodes are patched.
JMP OpCode = 0x22 // Operand Size = 1. Unconditionally transfers control to a target instruction. The target instruction is represented as a 1-byte signed offset from the beginning of the current instruction.
JMP_L OpCode = 0x23 // Operand Size = 4. Unconditionally transfers control to a target instruction. The target instruction is represented as a 4-bytes signed offset from the beginning of the current instruction.
JMPIF OpCode = 0x24 // Operand Size = 1. Transfers control to a target instruction if the value is "true", not "null", or non-zero. The target instruction is represented as a 1-byte signed offset from the beginning of the current instruction.
JMPIF_L OpCode = 0x25 // Operand Size = 4. Transfers control to a target instruction if the value is "true", not "null", or non-zero. The target instruction is represented as a 4-bytes signed offset from the beginning of the current instruction.
JMPIFNOT OpCode = 0x26 // Operand Size = 1. Transfers control to a target instruction if the value is "false", a "null" reference, or zero. The target instruction is represented as a 1-byte signed offset from the beginning of the current instruction.
JMPIFNOT_L OpCode = 0x27 // Operand Size = 4. Transfers control to a target instruction if the value is "false", a "null" reference, or zero. The target instruction is represented as a 4-bytes signed offset from the beginning of the current instruction.
JMPEQ OpCode = 0x28 // Operand Size = 1. Transfers control to a target instruction if two values are equal. The target instruction is represented as a 1-byte signed offset from the beginning of the current instruction.
JMPEQ_L OpCode = 0x29 // Operand Size = 4. Transfers control to a target instruction if two values are equal. The target instruction is represented as a 4-bytes signed offset from the beginning of the current instruction.
JMPNE OpCode = 0x2A // Operand Size = 1. Transfers control to a target instruction when two values are not equal. The target instruction is represented as a 1-byte signed offset from the beginning of the current instruction.
JMPNE_L OpCode = 0x2B // Operand Size = 4. Transfers control to a target instruction when two values are not equal. The target instruction is represented as a 4-bytes signed offset from the beginning of the current instruction.
JMPGT OpCode = 0x2C // Operand Size = 1. Transfers control to a target instruction if the first value is greater than the second value. The target instruction is represented as a 1-byte signed offset from the beginning of the current instruction.
JMPGT_L OpCode = 0x2D // Operand Size = 4. Transfers control to a target instruction if the first value is greater than the second value. The target instruction is represented as a 4-bytes signed offset from the beginning of the current instruction.
JMPGE OpCode = 0x2E // Operand Size = 1. Transfers control to a target instruction if the first value is greater than or equal to the second value. The target instruction is represented as a 1-byte signed offset from the beginning of the current instruction.
JMPGE_L OpCode = 0x2F // Operand Size = 4. Transfers control to a target instruction if the first value is greater than or equal to the second value. The target instruction is represented as a 4-bytes signed offset from the beginning of the current instruction.
JMPLT OpCode = 0x30 // Operand Size = 1. Transfers control to a target instruction if the first value is less than the second value. The target instruction is represented as a 1-byte signed offset from the beginning of the current instruction.
JMPLT_L OpCode = 0x31 // Operand Size = 4. Transfers control to a target instruction if the first value is less than the second value. The target instruction is represented as a 4-bytes signed offset from the beginning of the current instruction.
JMPLE OpCode = 0x32 // Operand Size = 1. Transfers control to a target instruction if the first value is less than or equal to the second value. The target instruction is represented as a 1-byte signed offset from the beginning of the current instruction.
JMPLE_L OpCode = 0x33 // Operand Size = 4. Transfers control to a target instruction if the first value is less than or equal to the second value. The target instruction is represented as a 4-bytes signed offset from the beginning of the current instruction.
CALL OpCode = 0x34 // Operand Size = 1. Calls the function at the target address which is represented as a 1-byte signed offset from the beginning of the current instruction.
CALL_L OpCode = 0x35 // Operand Size = 4. Calls the function at the target address which is represented as a 4-bytes signed offset from the beginning of the current instruction.
CALLA OpCode = 0x36 // Pop the address of a function from the stack, and call the function.
CALLT OpCode = 0x37 // Operand Size = 2. Calls the function which is described by the token.
ABORT OpCode = 0x38 // It turns the vm state to FAULT immediately, and cannot be caught.
ASSERT OpCode = 0x39 // Pop the top value of the stack, if it false, then exit vm execution and set vm state to FAULT.
THROW OpCode = 0x3A // Pop the top value of the stack, and throw it.
TRY OpCode = 0x3B // Operand Size = 2. TRY CatchOffset(sbyte) FinallyOffset(sbyte). If there's no catch body, set CatchOffset 0. If there's no finally body, set FinallyOffset 0.
TRY_L OpCode = 0x3C // Operand Size = 8. TRY_L CatchOffset(int) FinallyOffset(int). If there's no catch body, set CatchOffset 0. If there's no finally body, set FinallyOffset 0.
ENDTRY OpCode = 0x3D // Operand Size = 1. Ensures that the appropriate surrounding finally blocks are executed. And then unconditionally transfers control to the specific target instruction, represented as a 1-byte signed offset from the beginning of the current instruction.
ENDTRY_L OpCode = 0x3E // Operand Size = 4. Ensures that the appropriate surrounding finally blocks are executed. And then unconditionally transfers control to the specific target instruction, represented as a 4-byte signed offset from the beginning of the current instruction.
ENDFINALLY OpCode = 0x3F // End finally, If no exception happen or be catched, vm will jump to the target instruction of ENDTRY/ENDTRY_L. Otherwise vm will rethrow the exception to upper layer.
RET OpCode = 0x40 // Returns from the current method.
SYSCALL OpCode = 0x41 // Operand Size = 4. Calls to an interop service.
// Stack
DEPTH OpCode = 0x43 // Puts the number of stack items onto the stack.
DROP OpCode = 0x45 // Removes the top stack item.
NIP OpCode = 0x46 // Removes the second-to-top stack item.
XDROP OpCode = 0x48 // The item n back in the main stack is removed.
CLEAR OpCode = 0x49 // Clear the stack.
DUP OpCode = 0x4A // Duplicates the top stack item.
OVER OpCode = 0x4B // Copies the second-to-top stack item to the top.
PICK OpCode = 0x4D // The item n back in the stack is copied to the top.
TUCK OpCode = 0x4E // The item at the top of the stack is copied and inserted before the second-to-top item.
SWAP OpCode = 0x50 // The top two items on the stack are swapped.
ROT OpCode = 0x51 // The top three items on the stack are rotated to the left.
ROLL OpCode = 0x52 // The item n back in the stack is moved to the top.
REVERSE3 OpCode = 0x53 // Reverse the order of the top 3 items on the stack.
REVERSE4 OpCode = 0x54 // Reverse the order of the top 4 items on the stack.
REVERSEN OpCode = 0x55 // Pop the number N on the stack, and reverse the order of the top N items on the stack.
// Slot
INITSSLOT OpCode = 0x56 // Operand Size = 1. Initialize the static field list for the current execution context.
INITSLOT OpCode = 0x57 // Operand Size = 2. Initialize the argument slot and the local variable list for the current execution context.
LDSFLD0 OpCode = 0x58 // Loads the static field at index 0 onto the evaluation stack.
LDSFLD1 OpCode = 0x59 // Loads the static field at index 1 onto the evaluation stack.
LDSFLD2 OpCode = 0x5A // Loads the static field at index 2 onto the evaluation stack.
LDSFLD3 OpCode = 0x5B // Loads the static field at index 3 onto the evaluation stack.
LDSFLD4 OpCode = 0x5C // Loads the static field at index 4 onto the evaluation stack.
LDSFLD5 OpCode = 0x5D // Loads the static field at index 5 onto the evaluation stack.
LDSFLD6 OpCode = 0x5E // Loads the static field at index 6 onto the evaluation stack.
LDSFLD OpCode = 0x5F // Operand Size = 1. Loads the static field at a specified index onto the evaluation stack. The index is represented as a 1-byte unsigned integer.
STSFLD0 OpCode = 0x60 // Stores the value on top of the evaluation stack in the static field list at index 0.
STSFLD1 OpCode = 0x61 // Stores the value on top of the evaluation stack in the static field list at index 1.
STSFLD2 OpCode = 0x62 // Stores the value on top of the evaluation stack in the static field list at index 2.
STSFLD3 OpCode = 0x63 // Stores the value on top of the evaluation stack in the static field list at index 3.
STSFLD4 OpCode = 0x64 // Stores the value on top of the evaluation stack in the static field list at index 4.
STSFLD5 OpCode = 0x65 // Stores the value on top of the evaluation stack in the static field list at index 5.
STSFLD6 OpCode = 0x66 // Stores the value on top of the evaluation stack in the static field list at index 6.
STSFLD OpCode = 0x67 // Operand Size = 1. Stores the value on top of the evaluation stack in the static field list at a specified index. The index is represented as a 1-byte unsigned integer.
LDLOC0 OpCode = 0x68 // Loads the local variable at index 0 onto the evaluation stack.
LDLOC1 OpCode = 0x69 // Loads the local variable at index 1 onto the evaluation stack.
LDLOC2 OpCode = 0x6A // Loads the local variable at index 2 onto the evaluation stack.
LDLOC3 OpCode = 0x6B // Loads the local variable at index 3 onto the evaluation stack.
LDLOC4 OpCode = 0x6C // Loads the local variable at index 4 onto the evaluation stack.
LDLOC5 OpCode = 0x6D // Loads the local variable at index 5 onto the evaluation stack.
LDLOC6 OpCode = 0x6E // Loads the local variable at index 6 onto the evaluation stack.
LDLOC OpCode = 0x6F // Operand Size = 1. Loads the local variable at a specified index onto the evaluation stack. The index is represented as a 1-byte unsigned integer.
STLOC0 OpCode = 0x70 // Stores the value on top of the evaluation stack in the local variable list at index 0.
STLOC1 OpCode = 0x71 // Stores the value on top of the evaluation stack in the local variable list at index 1.
STLOC2 OpCode = 0x72 // Stores the value on top of the evaluation stack in the local variable list at index 2.
STLOC3 OpCode = 0x73 // Stores the value on top of the evaluation stack in the local variable list at index 3.
STLOC4 OpCode = 0x74 // Stores the value on top of the evaluation stack in the local variable list at index 4.
STLOC5 OpCode = 0x75 // Stores the value on top of the evaluation stack in the local variable list at index 5.
STLOC6 OpCode = 0x76 // Stores the value on top of the evaluation stack in the local variable list at index 6.
STLOC OpCode = 0x77 // Operand Size = 1. Stores the value on top of the evaluation stack in the local variable list at a specified index. The index is represented as a 1-byte unsigned integer.
LDARG0 OpCode = 0x78 // Loads the argument at index 0 onto the evaluation stack.
LDARG1 OpCode = 0x79 // Loads the argument at index 1 onto the evaluation stack.
LDARG2 OpCode = 0x7A // Loads the argument at index 2 onto the evaluation stack.
LDARG3 OpCode = 0x7B // Loads the argument at index 3 onto the evaluation stack.
LDARG4 OpCode = 0x7C // Loads the argument at index 4 onto the evaluation stack.
LDARG5 OpCode = 0x7D // Loads the argument at index 5 onto the evaluation stack.
LDARG6 OpCode = 0x7E // Loads the argument at index 6 onto the evaluation stack.
LDARG OpCode = 0x7F // Operand Size = 1. Loads the argument at a specified index onto the evaluation stack. The index is represented as a 1-byte unsigned integer.
STARG0 OpCode = 0x80 // Stores the value on top of the evaluation stack in the argument slot at index 0.
STARG1 OpCode = 0x81 // Stores the value on top of the evaluation stack in the argument slot at index 1.
STARG2 OpCode = 0x82 // Stores the value on top of the evaluation stack in the argument slot at index 2.
STARG3 OpCode = 0x83 // Stores the value on top of the evaluation stack in the argument slot at index 3.
STARG4 OpCode = 0x84 // Stores the value on top of the evaluation stack in the argument slot at index 4.
STARG5 OpCode = 0x85 // Stores the value on top of the evaluation stack in the argument slot at index 5.
STARG6 OpCode = 0x86 // Stores the value on top of the evaluation stack in the argument slot at index 6.
STARG OpCode = 0x87 // Operand Size = 1. Stores the value on top of the evaluation stack in the argument slot at a specified index. The index is represented as a 1-byte unsigned integer.
// Splice
NEWBUFFER OpCode = 0x88 // Creates a new "Buffer" and pushes it onto the stack.
MEMCPY OpCode = 0x89 // Copies a range of bytes from one "Buffer" to another.
CAT OpCode = 0x8B // Concatenates two strings.
SUBSTR OpCode = 0x8C // Returns a section of a string.
LEFT OpCode = 0x8D // Keeps only characters left of the specified point in a string.
RIGHT OpCode = 0x8E // Keeps only characters right of the specified point in a string.
// Bitwise logic
INVERT OpCode = 0x90 // Flips all of the bits in the input.
AND OpCode = 0x91 // Boolean and between each bit in the inputs.
OR OpCode = 0x92 // Boolean or between each bit in the inputs.
XOR OpCode = 0x93 // Boolean exclusive or between each bit in the inputs.
EQUAL OpCode = 0x97 // Returns 1 if the inputs are exactly equal, 0 otherwise.
NOTEQUAL OpCode = 0x98 // Returns 1 if the inputs are not equal, 0 otherwise.
// Arithmetic
SIGN OpCode = 0x99 // Puts the sign of top stack item on top of the main stack. If value is negative, put -1; if positive, put 1; if value is zero, put 0.
ABS OpCode = 0x9A // The input is made positive.
NEGATE OpCode = 0x9B // The sign of the input is flipped.
INC OpCode = 0x9C // 1 is added to the input.
DEC OpCode = 0x9D // 1 is subtracted from the input.
ADD OpCode = 0x9E // a is added to b.
SUB OpCode = 0x9F // b is subtracted from a.
MUL OpCode = 0xA0 // a is multiplied by b.
DIV OpCode = 0xA1 // a is divided by b.
MOD OpCode = 0xA2 // Returns the remainder after dividing a by b.
POW OpCode = 0xA3 // The result of raising value to the exponent power.
SQRT OpCode = 0xA4 // Returns the square root of a specified number.
SHL OpCode = 0xA8 // Shifts a left b bits preserving sign.
SHR OpCode = 0xA9 // Shifts a right b bits preserving sign.
NOT OpCode = 0xAA // If the input is 0 or 1 it is flipped. Otherwise the output will be 0.
BOOLAND OpCode = 0xAB // If both a and b are not 0 the output is 1. Otherwise 0.
BOOLOR OpCode = 0xAC // If a or b is not 0 the output is 1. Otherwise 0.
NZ OpCode = 0xB1 // Returns 0 if the input is 0. 1 otherwise.
NUMEQUAL OpCode = 0xB3 // Returns 1 if the numbers are equal 0 otherwise.
NUMNOTEQUAL OpCode = 0xB4 // Returns 1 if the numbers are not equal 0 otherwise.
LT OpCode = 0xB5 // Returns 1 if a is less than b, 0 otherwise.
LE OpCode = 0xB6 // Returns 1 if a is less than or equal to b, 0 otherwise.
GT OpCode = 0xB7 // Returns 1 if a is greater than b, 0 otherwise.
GE OpCode = 0xB8 // Returns 1 if a is greater than or equal to b, 0 otherwise.
MIN OpCode = 0xB9 // Returns the smaller of a and b.
MAX OpCode = 0xBA // Returns the larger of a and b.
WITHIN OpCode = 0xBB // Returns 1 if x is within the specified range (left-inclusive), 0 otherwise.
// Compound-type
PACK OpCode = 0xC0 // A value n is taken from top of main stack. The next n items on main stack are removed, put inside n-sized array and this array is put on top of the main stack.
UNPACK OpCode = 0xC1 // An array is removed from top of the main stack. Its elements are put on top of the main stack (in reverse order) and the array size is also put on main stack.
NEWARRAY0 OpCode = 0xC2 // An empty array (with size 0) is put on top of the main stack.
NEWARRAY OpCode = 0xC3 // A value n is taken from top of main stack. A null-filled array with size n is put on top of the main stack.
NEWARRAY_T OpCode = 0xC4 // Operand Size = 1. A value n is taken from top of main stack. An array of type T with size n is put on top of the main stack.
NEWSTRUCT0 OpCode = 0xC5 // An empty struct (with size 0) is put on top of the main stack.
NEWSTRUCT OpCode = 0xC6 // A value n is taken from top of main stack. A zero-filled struct type with size n is put on top of the main stack.
NEWMAP OpCode = 0xC8 // A Map is created and put on top of the main stack.
SIZE OpCode = 0xCA // An array is removed from top of the main stack. Its size is put on top of the main stack.
HASKEY OpCode = 0xCB // An input index n (or key) and an array (or map) are removed from the top of the main stack. Puts True on top of main stack if array[n] (or map[n]) exist, and False otherwise.
KEYS OpCode = 0xCC // A map is taken from top of the main stack. The keys of this map are put on top of the main stack.
VALUES OpCode = 0xCD // A map is taken from top of the main stack. The values of this map are put on top of the main stack.
PICKITEM OpCode = 0xCE // An input index n (or key) and an array (or map) are taken from main stack. Element array[n] (or map[n]) is put on top of the main stack.
APPEND OpCode = 0xCF // The item on top of main stack is removed and appended to the second item on top of the main stack.
SETITEM OpCode = 0xD0 // A value v, index n (or key) and an array (or map) are taken from main stack. Attribution array[n]=v (or map[n]=v) is performed.
REVERSEITEMS OpCode = 0xD1 // An array is removed from the top of the main stack and its elements are reversed.
REMOVE OpCode = 0xD2 // An input index n (or key) and an array (or map) are removed from the top of the main stack. Element array[n] (or map[n]) is removed.
CLEARITEMS OpCode = 0xD3 // Remove all the items from the compound-type.
POPITEM OpCode = 0xD4 // Remove the last element from an array, and push it onto the stack.
// Types
ISNULL OpCode = 0xD8 // Returns "true" if the input is "null"; "false" otherwise.
ISTYPE OpCode = 0xD9 // Operand Size = 1. Returns "true" if the top item of the stack is of the specified type; "false" otherwise.
CONVERT OpCode = 0xDB // Operand Size = 1. Converts the top item of the stack to the specified type.
)
var OpCodePrices = map[OpCode]int64{
PUSHINT8: 1 << 0,
PUSHINT16: 1 << 0,
PUSHINT32: 1 << 0,
PUSHINT64: 1 << 0,
PUSHINT128: 1 << 2,
PUSHINT256: 1 << 2,
PUSHA: 1 << 2,
PUSHNULL: 1 << 0,
PUSHDATA1: 1 << 3,
PUSHDATA2: 1 << 9,
PUSHDATA4: 1 << 12,
PUSHM1: 1 << 0,
PUSH0: 1 << 0,
PUSH1: 1 << 0,
PUSH2: 1 << 0,
PUSH3: 1 << 0,
PUSH4: 1 << 0,
PUSH5: 1 << 0,
PUSH6: 1 << 0,
PUSH7: 1 << 0,
PUSH8: 1 << 0,
PUSH9: 1 << 0,
PUSH10: 1 << 0,
PUSH11: 1 << 0,
PUSH12: 1 << 0,
PUSH13: 1 << 0,
PUSH14: 1 << 0,
PUSH15: 1 << 0,
PUSH16: 1 << 0,
NOP: 1 << 0,
JMP: 1 << 1,
JMP_L: 1 << 1,
JMPIF: 1 << 1,
JMPIF_L: 1 << 1,
JMPIFNOT: 1 << 1,
JMPIFNOT_L: 1 << 1,
JMPEQ: 1 << 1,
JMPEQ_L: 1 << 1,
JMPNE: 1 << 1,
JMPNE_L: 1 << 1,
JMPGT: 1 << 1,
JMPGT_L: 1 << 1,
JMPGE: 1 << 1,
JMPGE_L: 1 << 1,
JMPLT: 1 << 1,
JMPLT_L: 1 << 1,
JMPLE: 1 << 1,
JMPLE_L: 1 << 1,
CALL: 1 << 9,
CALL_L: 1 << 9,
CALLA: 1 << 9,
CALLT: 1 << 15,
ABORT: 0,
ASSERT: 1 << 0,
THROW: 1 << 9,
TRY: 1 << 2,
TRY_L: 1 << 2,
ENDTRY: 1 << 2,
ENDTRY_L: 1 << 2,
ENDFINALLY: 1 << 2,
RET: 0,
SYSCALL: 0,
DEPTH: 1 << 1,
DROP: 1 << 1,
NIP: 1 << 1,
XDROP: 1 << 4,
CLEAR: 1 << 4,
DUP: 1 << 1,
OVER: 1 << 1,
PICK: 1 << 1,
TUCK: 1 << 1,
SWAP: 1 << 1,
ROT: 1 << 1,
ROLL: 1 << 4,
REVERSE3: 1 << 1,
REVERSE4: 1 << 1,
REVERSEN: 1 << 4,
INITSSLOT: 1 << 4,
INITSLOT: 1 << 6,
LDSFLD0: 1 << 1,
LDSFLD1: 1 << 1,
LDSFLD2: 1 << 1,
LDSFLD3: 1 << 1,
LDSFLD4: 1 << 1,
LDSFLD5: 1 << 1,
LDSFLD6: 1 << 1,
LDSFLD: 1 << 1,
STSFLD0: 1 << 1,
STSFLD1: 1 << 1,
STSFLD2: 1 << 1,
STSFLD3: 1 << 1,
STSFLD4: 1 << 1,
STSFLD5: 1 << 1,
STSFLD6: 1 << 1,
STSFLD: 1 << 1,
LDLOC0: 1 << 1,
LDLOC1: 1 << 1,
LDLOC2: 1 << 1,
LDLOC3: 1 << 1,
LDLOC4: 1 << 1,
LDLOC5: 1 << 1,
LDLOC6: 1 << 1,
LDLOC: 1 << 1,
STLOC0: 1 << 1,
STLOC1: 1 << 1,
STLOC2: 1 << 1,
STLOC3: 1 << 1,
STLOC4: 1 << 1,
STLOC5: 1 << 1,
STLOC6: 1 << 1,
STLOC: 1 << 1,
LDARG0: 1 << 1,
LDARG1: 1 << 1,
LDARG2: 1 << 1,
LDARG3: 1 << 1,
LDARG4: 1 << 1,
LDARG5: 1 << 1,
LDARG6: 1 << 1,
LDARG: 1 << 1,
STARG0: 1 << 1,
STARG1: 1 << 1,
STARG2: 1 << 1,
STARG3: 1 << 1,
STARG4: 1 << 1,
STARG5: 1 << 1,
STARG6: 1 << 1,
STARG: 1 << 1,
NEWBUFFER: 1 << 8,
MEMCPY: 1 << 11,
CAT: 1 << 11,
SUBSTR: 1 << 11,
LEFT: 1 << 11,
RIGHT: 1 << 11,
INVERT: 1 << 2,
AND: 1 << 3,
OR: 1 << 3,
XOR: 1 << 3,
EQUAL: 1 << 5,
NOTEQUAL: 1 << 5,
SIGN: 1 << 2,
ABS: 1 << 2,
NEGATE: 1 << 2,
INC: 1 << 2,
DEC: 1 << 2,
ADD: 1 << 3,
SUB: 1 << 3,
MUL: 1 << 3,
DIV: 1 << 3,
MOD: 1 << 3,
POW: 1 << 6,
SQRT: 1 << 11,
SHL: 1 << 3,
SHR: 1 << 3,
NOT: 1 << 2,
BOOLAND: 1 << 3,
BOOLOR: 1 << 3,
NZ: 1 << 2,
NUMEQUAL: 1 << 3,
NUMNOTEQUAL: 1 << 3,
LT: 1 << 3,
LE: 1 << 3,
GT: 1 << 3,
GE: 1 << 3,
MIN: 1 << 3,
MAX: 1 << 3,
WITHIN: 1 << 3,
PACK: 1 << 11,
UNPACK: 1 << 11,
NEWARRAY0: 1 << 4,
NEWARRAY: 1 << 9,
NEWARRAY_T: 1 << 9,
NEWSTRUCT0: 1 << 4,
NEWSTRUCT: 1 << 9,
NEWMAP: 1 << 3,
SIZE: 1 << 2,
HASKEY: 1 << 6,
KEYS: 1 << 4,
VALUES: 1 << 13,
PICKITEM: 1 << 6,
APPEND: 1 << 13,
SETITEM: 1 << 13,
REVERSEITEMS: 1 << 13,
REMOVE: 1 << 4,
CLEARITEMS: 1 << 4,
POPITEM: 1 << 4,
ISNULL: 1 << 1,
ISTYPE: 1 << 1,
CONVERT: 1 << 13,
} | sc/opCode.go | 0.801354 | 0.700588 | opCode.go | starcoder |
package ledger
import "time"
// TransactionsInDateRange returns a new array of transactions that are in the date range
// specified by start and end. The returned list contains transactions on the same day as start
// but does not include any transactions on the day of end.
func TransactionsInDateRange(trans []*Transaction, start, end time.Time) []*Transaction {
var newlist []*Transaction
start = start.Add(-1 * time.Second)
for _, tran := range trans {
if tran.Date.After(start) && tran.Date.Before(end) {
newlist = append(newlist, tran)
}
}
return newlist
}
// Period is used to specify the length of a date range or frequency
type Period string
// Periods suppored by ledger
const (
PeriodWeek Period = "Weekly"
Period2Week Period = "BiWeekly"
PeriodMonth Period = "Monthly"
Period2Month Period = "BiMonthly"
PeriodQuarter Period = "Quarterly"
PeriodSemiYear Period = "SemiYearly"
PeriodYear Period = "Yearly"
)
func getDateBoundaries(per Period, start, end time.Time) []time.Time {
var incDays, incMonth, incYear int
var periodStart time.Time
switch per {
case PeriodWeek:
incDays = 7
for periodStart = time.Date(start.Year(), start.Month(), start.Day(), 0, 0, 0, 0, time.UTC); periodStart.Weekday() != time.Sunday; {
periodStart = periodStart.AddDate(0, 0, -1)
}
case Period2Week:
incDays = 14
for periodStart = time.Date(start.Year(), start.Month(), start.Day(), 0, 0, 0, 0, time.UTC); periodStart.Weekday() != time.Sunday; {
periodStart = periodStart.AddDate(0, 0, -1)
}
case PeriodMonth:
incMonth = 1
periodStart = time.Date(start.Year(), start.Month(), 1, 0, 0, 0, 0, time.UTC)
case Period2Month:
incMonth = 2
periodStart = time.Date(start.Year(), start.Month(), 1, 0, 0, 0, 0, time.UTC)
case PeriodQuarter:
incMonth = 3
switch start.Month() {
case time.January, time.February, time.March:
periodStart = time.Date(start.Year(), time.January, 1, 0, 0, 0, 0, time.UTC)
case time.April, time.May, time.June:
periodStart = time.Date(start.Year(), time.April, 1, 0, 0, 0, 0, time.UTC)
case time.July, time.August, time.September:
periodStart = time.Date(start.Year(), time.July, 1, 0, 0, 0, 0, time.UTC)
default:
periodStart = time.Date(start.Year(), time.October, 1, 0, 0, 0, 0, time.UTC)
}
case PeriodSemiYear:
incMonth = 6
switch start.Month() {
case time.January, time.February, time.March, time.April, time.May, time.June:
periodStart = time.Date(start.Year(), time.January, 1, 0, 0, 0, 0, time.UTC)
default:
periodStart = time.Date(start.Year(), time.July, 1, 0, 0, 0, 0, time.UTC)
}
case PeriodYear:
incYear = 1
periodStart = time.Date(start.Year(), time.January, 1, 0, 0, 0, 0, time.UTC)
}
boundaries := []time.Time{periodStart}
for periodStart.Before(end) || periodStart.Equal(end) {
periodStart = periodStart.AddDate(incYear, incMonth, incDays)
boundaries = append(boundaries, periodStart)
}
return boundaries
}
// RangeType is used to specify how the data is "split" into sections
type RangeType string
const (
// RangeSnapshot will have each section be the running total at the time of the snapshot
RangeSnapshot RangeType = "Snapshot"
// RangePartition will have each section be the accumulated value of the transactions within that partition's date range
RangePartition RangeType = "Partition"
)
// RangeTransactions contains the transactions and the start and end time of the date range
type RangeTransactions struct {
Start, End time.Time
Transactions []*Transaction
}
// startEndTime will return the start and end Times of a list of transactions
func startEndTime(trans []*Transaction) (start, end time.Time) {
if len(trans) < 1 {
return
}
start = trans[0].Date
end = trans[0].Date
for _, t := range trans {
if end.Before(t.Date) {
end = t.Date
}
if start.After(t.Date) {
start = t.Date
}
}
return
}
// TransactionsByPeriod will return the transactions for each period.
func TransactionsByPeriod(trans []*Transaction, per Period) []*RangeTransactions {
var results []*RangeTransactions
if len(trans) < 1 {
return results
}
tStart, tEnd := startEndTime(trans)
boundaries := getDateBoundaries(per, tStart, tEnd)
bStart := boundaries[0]
for _, boundary := range boundaries[1:] {
bEnd := boundary
bTrans := TransactionsInDateRange(trans, bStart, bEnd)
// End date should be the last day (inclusive, so subtract 1 day)
results = append(results, &RangeTransactions{Start: bStart, End: bEnd.AddDate(0, 0, -1), Transactions: bTrans})
bStart = bEnd
}
return results
}
// RangeBalance contains the account balances and the start and end time of the date range
type RangeBalance struct {
Start, End time.Time
Balances []*Account
}
// BalancesByPeriod will return the account balances for each period.
func BalancesByPeriod(trans []*Transaction, per Period, rType RangeType) []*RangeBalance {
var results []*RangeBalance
if len(trans) < 1 {
return results
}
tStart, tEnd := startEndTime(trans)
boundaries := getDateBoundaries(per, tStart, tEnd)
bStart := boundaries[0]
for _, boundary := range boundaries[1:] {
bEnd := boundary
bTrans := TransactionsInDateRange(trans, bStart, bEnd)
// End date should be the last day (inclusive, so subtract 1 day)
results = append(results, &RangeBalance{Start: bStart, End: bEnd.AddDate(0, 0, -1), Balances: GetBalances(bTrans, []string{})})
if rType == RangePartition {
bStart = bEnd
}
}
return results
} | date.go | 0.754463 | 0.610076 | date.go | starcoder |
package impl
import (
. "github.com/gabz57/goledmatrix/canvas"
. "github.com/gabz57/goledmatrix/components"
"github.com/gabz57/goledmatrix/components/shapes"
"image"
"time"
)
type BouncingDot struct {
move *Movement
dot *shapes.Dot
//dotAcceleration *ConstantAcceleration
bounds image.Rectangle
elapsedSinceSceneStart time.Duration
}
var groundReaction Acceleration = NewConstantAcceleration(9.81, TOP)
var noReaction Acceleration = NewConstantAcceleration(0, 0)
func NewBouncingDot(c Canvas, initialPosition Point, initialVelocity FloatingPoint, initialBottomAcceleration float64, bounds image.Rectangle) *BouncingDot {
var accs []Acceleration
//acceleration := NewConstantAcceleration(0, 0)
//accs = append(accs, *acceleration)
accs = append(accs, NewConstantAcceleration(initialBottomAcceleration, BOTTOM))
dot := BouncingDot{
move: NewMovement(
initialPosition.Floating(),
FloatingPoint{
X: initialVelocity.X,
Y: initialVelocity.Y,
},
&accs,
),
dot: shapes.NewDot(
NewGraphic(nil, nil),
initialPosition,
),
bounds: bounds,
}
//dot.dotAcceleration = acceleration
return &dot
}
func (m *BouncingDot) Update(elapsedBetweenUpdate time.Duration) bool {
// changing erratically of direction by changing the velocity
m.elapsedSinceSceneStart += elapsedBetweenUpdate
// advance the position by one step, make it bounce on bounds with exact values
m.dot.SetPosition(
m.applyNextPosition(
m.move.NextPosition(elapsedBetweenUpdate)).Int())
return true
}
// FIXME: losing some POWER when bouncing, while we expect to bounce infinitely (float approximation after time integration)
func (m *BouncingDot) applyNextPosition(nextPosition FloatingPoint, velocity FloatingPoint) FloatingPoint {
var velocityCoefX, velocityCoefY float64 = 1, 1
//var accelCoefX, accelCoefY float64 = 1, 1
if int(nextPosition.X) < m.bounds.Min.X || int(nextPosition.X) >= m.bounds.Max.X {
// moving to far to the LEFT or to the RIGHT, correcting overlaps
if int(nextPosition.X) < m.bounds.Min.X {
nextPosition = FloatingPoint{
X: -nextPosition.X,
Y: nextPosition.Y,
}
} else {
nextPosition = FloatingPoint{
X: 2*float64(m.bounds.Max.X) - nextPosition.X,
Y: nextPosition.Y,
}
}
// reverse X velocity
velocityCoefX = -1
}
if int(nextPosition.Y) < m.bounds.Min.Y || int(nextPosition.Y) >= m.bounds.Max.Y {
// moving to far to the TOP or to the BOTTOM, correcting overlaps
if int(nextPosition.Y) < m.bounds.Min.Y {
nextPosition = FloatingPoint{
X: nextPosition.X,
Y: -nextPosition.Y,
}
} else {
nextPosition = FloatingPoint{
X: nextPosition.X,
Y: 2*float64(m.bounds.Max.Y) - nextPosition.Y,
}
}
// reverse Y velocity
velocityCoefY = -1
}
m.move.SetVelocity(FloatingPoint{
X: velocity.X * velocityCoefX,
Y: velocity.Y * velocityCoefY,
})
//var direction = DirectionToFloatingPoint(m.dotAcceleration.Direction())
//m.dotAcceleration.SetDirection(FloatingPointToDirection(FloatingPoint{
// X: direction.X * accelCoefX,
// Y: direction.Y * accelCoefY,
//}))
return nextPosition
}
func (m *BouncingDot) Draw(c Canvas) error {
return m.dot.Draw(c)
} | components/impl/bouncingDot.go | 0.689724 | 0.462048 | bouncingDot.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// PlannerUser
type PlannerUser struct {
PlannerDelta
// The all property
all []PlannerDeltaable
// A collection containing the references to the plans that the user has marked as favorites.
favoritePlanReferences PlannerFavoritePlanReferenceCollectionable
// Read-only. Nullable. Returns the plannerPlans that the user marked as favorites.
favoritePlans []PlannerPlanable
// Read-only. Nullable. Returns the plannerTasks assigned to the user.
plans []PlannerPlanable
// A collection containing references to the plans that were viewed recently by the user in apps that support recent plans.
recentPlanReferences PlannerRecentPlanReferenceCollectionable
// Read-only. Nullable. Returns the plannerPlans that have been recently viewed by the user in apps that support recent plans.
recentPlans []PlannerPlanable
// Read-only. Nullable. Returns the plannerPlans contained by the plannerRosters the user is a member.
rosterPlans []PlannerPlanable
// Read-only. Nullable. Returns the plannerTasks assigned to the user.
tasks []PlannerTaskable
}
// NewPlannerUser instantiates a new plannerUser and sets the default values.
func NewPlannerUser()(*PlannerUser) {
m := &PlannerUser{
PlannerDelta: *NewPlannerDelta(),
}
return m
}
// CreatePlannerUserFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreatePlannerUserFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewPlannerUser(), nil
}
// GetAll gets the all property value. The all property
func (m *PlannerUser) GetAll()([]PlannerDeltaable) {
if m == nil {
return nil
} else {
return m.all
}
}
// GetFavoritePlanReferences gets the favoritePlanReferences property value. A collection containing the references to the plans that the user has marked as favorites.
func (m *PlannerUser) GetFavoritePlanReferences()(PlannerFavoritePlanReferenceCollectionable) {
if m == nil {
return nil
} else {
return m.favoritePlanReferences
}
}
// GetFavoritePlans gets the favoritePlans property value. Read-only. Nullable. Returns the plannerPlans that the user marked as favorites.
func (m *PlannerUser) GetFavoritePlans()([]PlannerPlanable) {
if m == nil {
return nil
} else {
return m.favoritePlans
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *PlannerUser) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := m.PlannerDelta.GetFieldDeserializers()
res["all"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreatePlannerDeltaFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]PlannerDeltaable, len(val))
for i, v := range val {
res[i] = v.(PlannerDeltaable)
}
m.SetAll(res)
}
return nil
}
res["favoritePlanReferences"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreatePlannerFavoritePlanReferenceCollectionFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetFavoritePlanReferences(val.(PlannerFavoritePlanReferenceCollectionable))
}
return nil
}
res["favoritePlans"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreatePlannerPlanFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]PlannerPlanable, len(val))
for i, v := range val {
res[i] = v.(PlannerPlanable)
}
m.SetFavoritePlans(res)
}
return nil
}
res["plans"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreatePlannerPlanFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]PlannerPlanable, len(val))
for i, v := range val {
res[i] = v.(PlannerPlanable)
}
m.SetPlans(res)
}
return nil
}
res["recentPlanReferences"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreatePlannerRecentPlanReferenceCollectionFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetRecentPlanReferences(val.(PlannerRecentPlanReferenceCollectionable))
}
return nil
}
res["recentPlans"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreatePlannerPlanFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]PlannerPlanable, len(val))
for i, v := range val {
res[i] = v.(PlannerPlanable)
}
m.SetRecentPlans(res)
}
return nil
}
res["rosterPlans"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreatePlannerPlanFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]PlannerPlanable, len(val))
for i, v := range val {
res[i] = v.(PlannerPlanable)
}
m.SetRosterPlans(res)
}
return nil
}
res["tasks"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreatePlannerTaskFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]PlannerTaskable, len(val))
for i, v := range val {
res[i] = v.(PlannerTaskable)
}
m.SetTasks(res)
}
return nil
}
return res
}
// GetPlans gets the plans property value. Read-only. Nullable. Returns the plannerTasks assigned to the user.
func (m *PlannerUser) GetPlans()([]PlannerPlanable) {
if m == nil {
return nil
} else {
return m.plans
}
}
// GetRecentPlanReferences gets the recentPlanReferences property value. A collection containing references to the plans that were viewed recently by the user in apps that support recent plans.
func (m *PlannerUser) GetRecentPlanReferences()(PlannerRecentPlanReferenceCollectionable) {
if m == nil {
return nil
} else {
return m.recentPlanReferences
}
}
// GetRecentPlans gets the recentPlans property value. Read-only. Nullable. Returns the plannerPlans that have been recently viewed by the user in apps that support recent plans.
func (m *PlannerUser) GetRecentPlans()([]PlannerPlanable) {
if m == nil {
return nil
} else {
return m.recentPlans
}
}
// GetRosterPlans gets the rosterPlans property value. Read-only. Nullable. Returns the plannerPlans contained by the plannerRosters the user is a member.
func (m *PlannerUser) GetRosterPlans()([]PlannerPlanable) {
if m == nil {
return nil
} else {
return m.rosterPlans
}
}
// GetTasks gets the tasks property value. Read-only. Nullable. Returns the plannerTasks assigned to the user.
func (m *PlannerUser) GetTasks()([]PlannerTaskable) {
if m == nil {
return nil
} else {
return m.tasks
}
}
// Serialize serializes information the current object
func (m *PlannerUser) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
err := m.PlannerDelta.Serialize(writer)
if err != nil {
return err
}
if m.GetAll() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetAll()))
for i, v := range m.GetAll() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("all", cast)
if err != nil {
return err
}
}
{
err = writer.WriteObjectValue("favoritePlanReferences", m.GetFavoritePlanReferences())
if err != nil {
return err
}
}
if m.GetFavoritePlans() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetFavoritePlans()))
for i, v := range m.GetFavoritePlans() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("favoritePlans", cast)
if err != nil {
return err
}
}
if m.GetPlans() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetPlans()))
for i, v := range m.GetPlans() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("plans", cast)
if err != nil {
return err
}
}
{
err = writer.WriteObjectValue("recentPlanReferences", m.GetRecentPlanReferences())
if err != nil {
return err
}
}
if m.GetRecentPlans() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetRecentPlans()))
for i, v := range m.GetRecentPlans() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("recentPlans", cast)
if err != nil {
return err
}
}
if m.GetRosterPlans() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetRosterPlans()))
for i, v := range m.GetRosterPlans() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("rosterPlans", cast)
if err != nil {
return err
}
}
if m.GetTasks() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetTasks()))
for i, v := range m.GetTasks() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("tasks", cast)
if err != nil {
return err
}
}
return nil
}
// SetAll sets the all property value. The all property
func (m *PlannerUser) SetAll(value []PlannerDeltaable)() {
if m != nil {
m.all = value
}
}
// SetFavoritePlanReferences sets the favoritePlanReferences property value. A collection containing the references to the plans that the user has marked as favorites.
func (m *PlannerUser) SetFavoritePlanReferences(value PlannerFavoritePlanReferenceCollectionable)() {
if m != nil {
m.favoritePlanReferences = value
}
}
// SetFavoritePlans sets the favoritePlans property value. Read-only. Nullable. Returns the plannerPlans that the user marked as favorites.
func (m *PlannerUser) SetFavoritePlans(value []PlannerPlanable)() {
if m != nil {
m.favoritePlans = value
}
}
// SetPlans sets the plans property value. Read-only. Nullable. Returns the plannerTasks assigned to the user.
func (m *PlannerUser) SetPlans(value []PlannerPlanable)() {
if m != nil {
m.plans = value
}
}
// SetRecentPlanReferences sets the recentPlanReferences property value. A collection containing references to the plans that were viewed recently by the user in apps that support recent plans.
func (m *PlannerUser) SetRecentPlanReferences(value PlannerRecentPlanReferenceCollectionable)() {
if m != nil {
m.recentPlanReferences = value
}
}
// SetRecentPlans sets the recentPlans property value. Read-only. Nullable. Returns the plannerPlans that have been recently viewed by the user in apps that support recent plans.
func (m *PlannerUser) SetRecentPlans(value []PlannerPlanable)() {
if m != nil {
m.recentPlans = value
}
}
// SetRosterPlans sets the rosterPlans property value. Read-only. Nullable. Returns the plannerPlans contained by the plannerRosters the user is a member.
func (m *PlannerUser) SetRosterPlans(value []PlannerPlanable)() {
if m != nil {
m.rosterPlans = value
}
}
// SetTasks sets the tasks property value. Read-only. Nullable. Returns the plannerTasks assigned to the user.
func (m *PlannerUser) SetTasks(value []PlannerTaskable)() {
if m != nil {
m.tasks = value
}
} | models/planner_user.go | 0.740737 | 0.444324 | planner_user.go | starcoder |
package nock
import (
"strconv"
"strings"
)
// A Noun is an atom or a cell. An atom is any natural number. A cell is any
// ordered pair of nouns.
type Noun struct {
atom *int
cell *[2]Noun
}
// IsAtom returns true if n is an atom.
func (n Noun) IsAtom() bool { return n.atom != nil }
// IsCell returns true if n is a cell.
func (n Noun) IsCell() bool { return n.cell != nil }
// Num returns the integer value of n, which must be an atom.
func (n Noun) Num() int { return *n.atom }
// Head returns the head of n, which must be a cell.
func (n Noun) Head() Noun { return n.cell[0] }
// Tail returns the tail of n, which must be a cell.
func (n Noun) Tail() Noun { return n.cell[1] }
// String implements the fmt.Stringer interface.
func (n Noun) String() string {
if n.IsAtom() {
return strconv.Itoa(n.Num())
}
return "[" + n.Head().String() + " " + n.Tail().String() + "]"
}
// Atom returns an atom with value i.
func Atom(i int) Noun { return Noun{atom: &i} }
// Cell returns a cell that pairs head with tail.
func Cell(head, tail Noun) Noun { return Noun{cell: &[2]Noun{head, tail}} }
// Loobean returns the atom 0 if b is true, and the atom 1 if b is false.
func Loobean(b bool) Noun { return Atom(map[bool]int{true: 0, false: 1}[b]) }
func wut(n Noun) Noun { return Loobean(n.IsCell()) }
func lus(n Noun) Noun { return Atom(1 + n.Num()) }
func tis(n Noun) Noun { return Loobean(n.Head().String() == n.Tail().String()) }
func fas(i int, n Noun) Noun {
switch i {
case 1:
return n
case 2:
return n.Head()
case 3:
return n.Tail()
default:
return fas(2+i%2, fas(i/2, n))
}
}
func tar(sub, form Noun) Noun {
if form.Head().IsCell() {
return Cell(tar(sub, form.Head()), tar(sub, form.Tail()))
}
inst, arg := form.Head(), form.Tail()
return map[int]func() Noun{
// *[a 0 b] /[b a]
0: func() Noun { return fas(arg.Num(), sub) },
// *[a 1 b] b
1: func() Noun { return arg },
// *[a 2 b c] *[*[a b] *[a c]]
2: func() Noun { return tar(tar(sub, arg.Head()), tar(sub, arg.Tail())) },
// *[a 3 b] ?*[a b]
3: func() Noun { return wut(tar(sub, arg)) },
// *[a 4 b] +*[a b]
4: func() Noun { return lus(tar(sub, arg)) },
// *[a 5 b] =*[a b]
5: func() Noun { return tis(tar(sub, arg)) },
// *[a 6 b c d] *[a 2 [0 1] 2 [1 c d] [1 0] 2 [1 2 3] [1 0] 4 4 b]
6: func() Noun {
if tar(sub, arg.Head()).Num() == 0 {
return tar(sub, fas(6, arg))
}
return tar(sub, fas(7, arg))
},
// *[a 7 b c] *[a 2 b 1 c]
7: func() Noun { return tar(tar(sub, arg.Head()), arg.Tail()) },
// *[a 8 b c] *[a 7 [[7 [0 1] b] 0 1] c]
8: func() Noun { return tar(Cell(tar(sub, arg.Head()), sub), arg.Tail()) },
// *[a 9 b c] *[a 7 c [2 [0 1] [0 b]]]
9: func() Noun {
d := tar(sub, arg.Tail())
return tar(d, fas(arg.Head().Num(), d))
},
// *[a 10 b c] *[a c]
10: func() Noun {
if b := arg.Head(); b.IsCell() {
_ = tar(sub, b.Tail())
}
return tar(sub, arg.Tail())
},
}[inst.Num()]()
}
// Nock evaluates the nock function on n.
func Nock(n Noun) Noun {
if n.IsAtom() || n.Tail().IsAtom() {
return n
}
return tar(n.Head(), n.Tail())
}
// Parse parses a Nock program.
func Parse(s string) Noun {
s = strings.Replace(s, "[", " [ ", -1)
s = strings.Replace(s, "]", " ] ", -1)
n, _ := parseNoun(strings.Fields(strings.TrimSpace(s)))
return n
}
func parseNoun(s []string) (Noun, []string) {
if s[0] == "[" {
return parseCell(s)
}
return parseAtom(s)
}
func parseCell(s []string) (Noun, []string) {
s = s[1:]
var elems []Noun
for s[0] != "]" {
var e Noun
e, s = parseNoun(s)
elems = append(elems, e)
}
for len(elems) > 1 {
elems = append(elems[:len(elems)-2], Cell(elems[len(elems)-2], elems[len(elems)-1]))
}
return elems[0], s[1:]
}
func parseAtom(s []string) (Noun, []string) {
i, _ := strconv.Atoi(s[0])
return Atom(i), s[1:]
} | nock.go | 0.773302 | 0.642461 | nock.go | starcoder |
package dstructs
import "fmt"
//LNode : Creates a *Node* Object for a ***Singly*** Linked List
type LNode struct {
Data interface{}
Next *LNode
}
//DNode : Creates a *Node* Object for a ***Doubly*** Linked List
type DNode struct {
Data interface{}
Next *DNode
Prev *DNode
}
//LinkedList : Creates a ***Singly*** linked list
type LinkedList struct {
Head *LNode
}
//DLinkedList : Creates a ***Doubly*** linked list
type DLinkedList struct {
Head *DNode
}
//CLinkedList : Creates a ***Circular*** linked list
type CLinkedList struct {
Head *LNode
}
//DCLinkedList : Creates a ***Circular*** ***Doubly*** linked list
type DCLinkedList struct {
Head *DNode
}
//Len : Reports the length of a Linked-List
func (l *LinkedList) Len() int {
tempNode := l.Head
counter := 0
for tempNode.Next != nil {
counter++
tempNode = tempNode.Next
}
return counter
}
func (dl *DLinkedList) Len() int {
tempNode := dl.Head
counter := 0
for tempNode.Next != nil {
counter++
tempNode = tempNode.Next
}
return counter
}
func (cl *CLinkedList) Len() int {
tempNode := cl.Head.Next
start := cl.Head
counter := 1
for tempNode != start {
counter++
tempNode = tempNode.Next
}
return counter
}
func (dcl *DCLinkedList) Len() int {
tempNode := dcl.Head.Next
counter := 1
for tempNode != dcl.Head {
tempNode = tempNode.Next
counter++
}
return counter
}
/*
MakeList : Takes Multiple nodes as args and creates a ***Singly*** Linked list out of them.
* Takes the first Node passed as the *Head Node*.
*/
func MakeList(nodes ...LNode) LinkedList {
for i := 0; i < len(nodes)-1; i++ {
nodes[i].Next = &nodes[i+1]
}
return LinkedList{Head: &nodes[0]}
}
/*
MakeDList : Takes **Multiple** nodes as args and creates a ***Doubly*** Linked list out of them.
* Takes the first *Node* passed as the *Head Node*.
*/
func MakeDList(nodes ...DNode) DLinkedList {
for i := 0; i < len(nodes)-1; i++ {
nodes[i].Next = &nodes[i+1]
nodes[i+1].Prev = &nodes[i]
}
return DLinkedList{Head: &nodes[0]}
}
/*
MakeCList : Takes **Multiple** nodes as args and creates a ***Circular*** Linked list out of them.
* Takes the first *Node* passed as the *Head Node*.
NOTE: If only one node is passed then returns a circular list that has only one node, adviced to just use struct instead
*/
func MakeCList(nodes ...LNode) CLinkedList {
if len(nodes) == 1 {
nodes[0].Next = &nodes[0]
nodes[0].Next.Next = &nodes[0]
return CLinkedList{Head: &nodes[0]}
}
for i := 0; i < len(nodes)-1; i++ {
nodes[i].Next = &nodes[i+1]
}
nodes[len(nodes)-1].Next = &nodes[0]
return CLinkedList{Head: &nodes[0]}
}
/*
MakeDCList : Takes **Multiple** nodes as args and creates a ***Circular*** ***Doubly*** Linked list out of them.
* Takes the first *Node* passed as the *Head Node*.
NOTE: If only one node is passed then returns a circular list that has only one node, adviced to just use struct instead
*/
func MakeDCList(nodes ...DNode) DCLinkedList {
if len(nodes) == 1 {
nodes[0].Next = &nodes[0]
nodes[0].Prev = &nodes[0]
return DCLinkedList{Head: &nodes[0]}
}
for i := 0; i < len(nodes)-1; i++ {
nodes[i].Next = &nodes[i+1]
nodes[i+1].Prev = &nodes[i]
}
nodes[len(nodes)-1].Next = &nodes[0]
nodes[0].Prev = &nodes[len(nodes)-1]
return DCLinkedList{Head: &nodes[0]}
}
//PrintList : **Prints** the List
func (l *LinkedList) PrintList() {
nodeList := []LNode{}
tempNode := l.Head
for tempNode != nil {
nodeList = append(nodeList, *tempNode)
tempNode = tempNode.Next
}
fmt.Println(nodeList)
}
func (dl *DLinkedList) PrintList() {
nodeList := []DNode{}
tempNode := dl.Head
for tempNode != nil {
nodeList = append(nodeList, *tempNode)
tempNode = tempNode.Next
}
fmt.Println(nodeList)
}
func (cl *CLinkedList) PrintList() {
tempNode := cl.Head.Next
nodeList := []LNode{*cl.Head}
for tempNode != cl.Head {
nodeList = append(nodeList, *tempNode)
tempNode = tempNode.Next
}
nodeList = append(nodeList, *tempNode)
fmt.Println(nodeList)
}
func (dcl *DCLinkedList) PrintList() {
nodeList := []DNode{*dcl.Head}
tempNode := dcl.Head.Next
for tempNode != dcl.Head {
nodeList = append(nodeList, *tempNode)
tempNode = tempNode.Next
}
nodeList = append(nodeList, *tempNode)
fmt.Println(nodeList)
}
//Append : appends to the linked list
func (l *LinkedList) Append(node LNode) {
tempNode := l.Head.Next
prevNode := l.Head
for tempNode != nil {
tempNode = tempNode.Next
prevNode = prevNode.Next
}
prevNode.Next = &node
}
func (dl *DLinkedList) Append(node DNode) {
tempNode := dl.Head
for tempNode.Next != nil {
tempNode = tempNode.Next
}
tempNode.Next = &node
tempNode.Next.Prev = tempNode
}
func (cl *CLinkedList) Append(node LNode) {
tempNode := cl.Head.Next
start := cl.Head
for tempNode.Next != start {
tempNode = tempNode.Next
}
node.Next = start
tempNode.Next = &node
}
func (dcl *DCLinkedList) Append(node DNode) {
node.Prev = dcl.Head.Prev // fix tis method
node.Prev.Next = &node
dcl.Head.Prev = &node
node.Next = dcl.Head
}
//Search : Runs a *linear search* on the linked list to find node with Data == [Data]
func (l *LinkedList) Search(Data interface{}) *LNode {
tempNode := l.Head
for tempNode != nil {
if tempNode.Data == Data {
return tempNode
}
tempNode = tempNode.Next
}
return nil
}
func (dl *DLinkedList) Search(Data interface{}) *DNode {
tempNode := dl.Head
for tempNode != nil {
if tempNode.Data == Data {
return tempNode
}
tempNode = tempNode.Next
}
return nil
}
func (cl *CLinkedList) Search(Data interface{}) *LNode {
tempNode := cl.Head.Next
start := cl.Head
for tempNode.Next != start {
if tempNode.Data == Data {
return tempNode
}
tempNode = tempNode.Next
}
return nil
}
func (dcl *DCLinkedList) Search(Data interface{}) *DNode {
tempNode := dcl.Head.Next
for tempNode != dcl.Head {
if tempNode.Data == Data {
return tempNode
}
tempNode = tempNode.Next
}
return nil
}
//Delete : Deletes the first node from the linked list which has [data]
func (l *LinkedList) Delete(Data interface{}) {
if l.Head.Data == Data {
l.Head = l.Head.Next
} else {
tempNode := l.Head.Next
prevNode := l.Head
for tempNode != nil {
if tempNode.Data == Data {
prevNode.Next = tempNode.Next
break
}
tempNode = tempNode.Next
prevNode = prevNode.Next
}
}
}
func (dl *DLinkedList) Delete(Data interface{}) {
node := dl.Search(Data)
tempNode := node
node.Next.Prev = tempNode.Prev
node.Prev.Next = tempNode.Next
node.Next = nil
node.Prev = nil
}
func (cl *CLinkedList) Delete(data interface{}) {
if cl.Head.Data != data {
cl._delNode(data)
} else {
cl._deleteHead()
}
}
func (cl *CLinkedList) _deleteHead() {
if cl.Len() > 1 {
tempNode := cl.Head
ref := cl.Head.Next
for tempNode.Next != cl.Head {
tempNode = tempNode.Next
}
fmt.Println(tempNode)
tempNode.Next = ref
cl.Head = tempNode.Next
}
}
func (cl *CLinkedList) _delNode(data interface{}) {
tempNode := cl.Head.Next
prevNode := cl.Head
for tempNode.Next != cl.Head {
if tempNode.Data == data {
prevNode.Next = tempNode.Next
tempNode.Next = nil
break
}
tempNode = tempNode.Next
prevNode = prevNode.Next
}
}
func (dcl *DCLinkedList) Delete(data interface{}) {
if dcl.Search(data) != nil {
if data == dcl.Head.Data {
dcl.Head.Next.Prev = dcl.Head.Prev
dcl.Head.Prev.Next = dcl.Head.Next
dcl.Head = dcl.Head.Next
} else {
tempNode := dcl.Head
for tempNode.Data != data {
tempNode = tempNode.Next
}
tempNode.Prev.Next = tempNode.Next
tempNode.Next.Prev = tempNode.Prev
tempNode.Next = nil
tempNode.Prev = nil
}
}
} | Go/dstruct/LinkedList.go | 0.507568 | 0.433981 | LinkedList.go | starcoder |
package bencode
import (
"errors"
"fmt"
"strconv"
)
// Decode decodes a bencoded string to string, int, list or map.
func Decode(data []byte) (r interface{}, err error) {
r, _, err = decodeItem(data, 0)
return r, err
}
// DecodeString decodes a string from a given offset.
// It returns the string, the number of bytes successfully read.
func DecodeString(data []byte, start int) (r string, n int, err error) {
if start >= len(data) || data[start] < '0' || data[start] > '9' {
err = errors.New("bencode: invalid string length")
return r, 1, err
}
prefix, i, err := readUntil(data, start, ':')
end := start + i
if err != nil {
return r, end - start, err
}
length, err := strconv.ParseInt(string(prefix), 10, 0)
if err != nil {
return r, end - start, err
}
end = end + int(length)
if end > len(data) || end < i {
err = errors.New("bencode: string length out of range")
return r, end - start, err
}
return string(data[start+i : end]), end - start, nil
}
// DecodeInt decodes an integer value.
// It returns the integer and the number of bytes successfully read.
func DecodeInt(data []byte, start int) (r int64, end int, err error) {
if start >= len(data) || data[start] != 'i' {
err = errors.New("bencode: invalid integer")
return r, end, err
}
prefix, n, err := readUntil(data, start, 'e')
if err != nil {
return r, n, err
}
r, err = strconv.ParseInt(string(prefix[1:]), 10, 64)
return r, n, err
}
// DecodeList decodes a list value.
// It returns the array and the number of bytes successfully read.
func DecodeList(data []byte, start int) (r []interface{}, end int, err error) {
if start >= len(data) {
return r, end, errors.New("bencode: list range error")
}
if data[start] != 'l' {
return r, end, errors.New("bencode: invalid list")
}
end = start + 1
// Empty list
if data[end] == 'e' {
return r, 2, nil
}
var item interface{}
var n int
for end < len(data) {
item, n, err = decodeItem(data, end)
end = end + n
if err != nil {
return r, end - start, err
}
r = append(r, item)
if data[end] == 'e' {
return r, end - start + 1, nil
}
}
return r, end, errors.New("bencode: invalid list termination")
}
// DecodeDict decodes a dict as a map.
// It returns the map and the number of bytes successfully read.
func DecodeDict(data []byte, start int) (map[string]interface{}, int, error) {
r := make(map[string]interface{})
if start >= len(data) {
return r, 0, errors.New("bencode: dict range error")
}
if data[start] != 'd' {
return r, 1, errors.New("bencode: invalid dict")
}
end := start + 1
// Empty dict
if data[end] == 'e' {
return r, 2, nil
}
for end < len(data) {
key, n, err := DecodeString(data, end)
end = end + n
if err != nil {
return r, end, errors.New("bencode: invalid dict key")
}
if end >= len(data) {
return r, end, errors.New("bencode: dict range error")
}
item, n, err := decodeItem(data, end)
end = end + n
if err != nil {
return r, end, err
}
r[key] = item
if data[end] == 'e' {
return r, end - start + 1, nil
}
}
return r, end, errors.New("bencode: invalid dict termination")
}
// decodeItem decodes the next type at the given index.
func decodeItem(data []byte, start int) (r interface{}, n int, err error) {
switch data[start] {
case 'l':
return DecodeList(data, start)
case 'd':
return DecodeDict(data, start)
case 'i':
return DecodeInt(data, start)
default:
return DecodeString(data, start)
}
}
// Read until the given character.
// Returns the slice before the character and the number of bytes successfully read.
func readUntil(data []byte, start int, c byte) ([]byte, int, error) {
i := start
for ; i < len(data); i++ {
if data[i] == c {
return data[start:i], i - start + 1, nil
}
}
return data, i - start, fmt.Errorf("bencode: '%b' not found", c)
} | decode.go | 0.736874 | 0.429908 | decode.go | starcoder |
package pgsql
import (
"database/sql"
"database/sql/driver"
"strconv"
)
// VarBitFromInt64 returns a driver.Valuer that produces a PostgreSQL varbit from the given Go int64.
func VarBitFromInt64(val int64) driver.Valuer {
return varBitFromInt64{val: val}
}
// VarBitToInt64 returns an sql.Scanner that converts a PostgreSQL varbit into a Go int64 and sets it to val.
func VarBitToInt64(val *int64) sql.Scanner {
return varBitToInt64{val: val}
}
// VarBitFromBoolSlice returns a driver.Valuer that produces a PostgreSQL varbit from the given Go []bool.
func VarBitFromBoolSlice(val []bool) driver.Valuer {
return varBitFromBoolSlice{val: val}
}
// VarBitToBoolSlice returns an sql.Scanner that converts a PostgreSQL varbit into a Go []bool and sets it to val.
func VarBitToBoolSlice(val *[]bool) sql.Scanner {
return varBitToBoolSlice{val: val}
}
// VarBitFromUint8Slice returns a driver.Valuer that produces a PostgreSQL varbit from the given Go []uint8.
func VarBitFromUint8Slice(val []uint8) driver.Valuer {
return varBitFromUint8Slice{val: val}
}
// VarBitToUint8Slice returns an sql.Scanner that converts a PostgreSQL varbit into a Go []uint8 and sets it to val.
func VarBitToUint8Slice(val *[]uint8) sql.Scanner {
return varBitToUint8Slice{val: val}
}
type varBitFromInt64 struct {
val int64
}
func (v varBitFromInt64) Value() (driver.Value, error) {
out := strconv.AppendInt([]byte(nil), v.val, 2)
return out, nil
}
type varBitToInt64 struct {
val *int64
}
func (v varBitToInt64) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
i64, err := strconv.ParseInt(string(data), 2, 64)
if err != nil {
return err
}
*v.val = i64
return nil
}
type varBitFromBoolSlice struct {
val []bool
}
func (v varBitFromBoolSlice) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
} else if len(v.val) == 0 {
return []byte(""), nil
}
out := make([]byte, len(v.val))
for i := 0; i < len(v.val); i++ {
if v.val[i] {
out[i] = '1'
} else {
out[i] = '0'
}
}
return out, nil
}
type varBitToBoolSlice struct {
val *[]bool
}
func (v varBitToBoolSlice) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
*v.val = nil
return nil
}
bools := make([]bool, len(data))
for i := 0; i < len(data); i++ {
if data[i] == '1' {
bools[i] = true
}
}
*v.val = bools
return nil
}
type varBitFromUint8Slice struct {
val []uint8
}
func (v varBitFromUint8Slice) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
} else if len(v.val) == 0 {
return []byte(""), nil
}
out := make([]byte, len(v.val))
for i := 0; i < len(v.val); i++ {
if v.val[i] == 1 {
out[i] = '1'
} else {
out[i] = '0'
}
}
return out, nil
}
type varBitToUint8Slice struct {
val *[]uint8
}
func (v varBitToUint8Slice) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
*v.val = nil
return nil
}
uint8s := make([]uint8, len(data))
for i := 0; i < len(data); i++ {
if data[i] == '1' {
uint8s[i] = 1
}
}
*v.val = uint8s
return nil
} | pgsql/varbit.go | 0.702326 | 0.431524 | varbit.go | starcoder |
package model
import (
"github.com/peterhoward42/skilldrill/util/sets"
"strings"
)
/*
The skillTreeOps type is a place for algorithmic functions to live that depend
on traversing parent child relationships in the skills taxonomy tree. The aim
is to prevent any other parts of the model software from having to engage with
this topic.
*/
type skillTreeOps struct {
api *Api
}
/*
The skillWording() method is capable of assembling a description for a
skillNode that is based on its child-parent ancestry. In other words it can
synthesise a description that describes the skill completely and autonomuously,
by contatenating the skill node descriptions, working up the tree. It provides
for convenience also the skill Node title and the aggregate description broken
into pieces. The <desc> return value is from the leaf node alone. The
<descInContext> return value is the aggregated description. While the
<contextAlone> is the description drawn from the skill node's parent
(recursively).
*/
func (treeOps *skillTreeOps) skillWording(skill *skillNode) (title string,
desc string, descInContext string, contextAlone string) {
nodes := []*skillNode{}
treeOps.lineageOf(skill, &nodes)
descriptions := []string{}
for _, node := range nodes {
descriptions = append(descriptions, node.Desc)
}
title = skill.Title
desc = skill.Desc
descInContext = strings.Join(descriptions, ">>>")
contextAlone = strings.Join(descriptions[:len(descriptions)-1], ">>>")
return
}
/*
The lineageOf() method provides the list of skillNodes that comprise the
parent chain from the given skill up to the root of the tree. Root first.
*/
func (treeOps *skillTreeOps) lineageOf(skill *skillNode,
lineage *[]*skillNode) {
// recurse to add parent lineage first
if skill.Parent != -1 {
treeOps.lineageOf(treeOps.api.skillFromId[skill.Parent], lineage)
}
// now add me
*lineage = append(*lineage, skill)
}
/*
The method enumerateTree() provides a list of skill Uids in the order they
should appear when displaying the tree. It is person-specific, and omits the
nodes that have been collapsed (using CollapseSkill()) - including their
children.
*/
func (treeOps *skillTreeOps) enumerateTree(collapsedNodes *sets.SetOfInt) (
skills []int, depths []int) {
curNode := treeOps.api.skillFromId[treeOps.api.SkillRoot]
skills = []int{}
depths = []int{}
curDepth := 0
// Recursive
treeOps.enumerateNode(curNode, collapsedNodes, curDepth, &skills, &depths)
return
}
// Recursive helper for EnumerateTree() method.
func (treeOps *skillTreeOps) enumerateNode(curNode *skillNode,
collapsedNodes *sets.SetOfInt, curDepth int, skills *[]int, depths *[]int) {
// Me first
*skills = append(*skills, curNode.Uid)
*depths = append(*depths, curDepth)
// If I am collapsed, do not continue to recurse into my children
if collapsedNodes.Contains(curNode.Uid) {
return
}
// Otherwise, do
childDepth := curDepth + 1
for _, child := range curNode.Children {
treeOps.enumerateNode(treeOps.api.skillFromId[child], collapsedNodes,
childDepth, skills, depths)
}
return
} | model-hidden/skilltreeops.go | 0.657758 | 0.466663 | skilltreeops.go | starcoder |
package display
import (
"fmt"
"unsafe"
"github.com/go-gl/gl/v4.2-core/gl"
"github.com/go-gl/mathgl/mgl32"
)
// Constant Cube Vertices. Same for all cubes.
func getVertices() []mgl32.Vec3 {
return []mgl32.Vec3{
{0.5, 0.5, 0.5},
{0.5, 0.5, -0.5},
{0.5, -0.5, 0.5},
{0.5, -0.5, -0.5},
{-0.5, 0.5, 0.5},
{-0.5, 0.5, -0.5},
{-0.5, -0.5, 0.5},
{-0.5, -0.5, -0.5},
}
}
// Constant Drawing Indices, defines a number of triangles that are used to render cubes
func getDrawingIndices() []uint8 {
return []uint8{
5, 3, 1,
5, 7, 3,
5, 7, 4,
7, 4, 6,
6, 4, 0,
6, 0, 2,
0, 3, 2,
0, 1, 3,
4, 5, 0,
5, 1, 0,
6, 7, 2,
7, 3, 2,
}
}
type Cube struct {
Transform Transform
info renderInfo
}
type CubeConstructor = func(x, y, z, xSize, ySize, zSize float32) Cube
type renderInfo struct {
vao uint32
vbo uint32
vboIndices uint32
mvpUniformID int32
viewMatUniformID int32
modelMatUniformID int32
lightPositionUniformID int32
materialColorUniformID int32
color mgl32.Vec4
shaderProgram uint32
}
func GetCubeConstructor(shaderProgram uint32) CubeConstructor {
vao, vbo, indicesVbo := generateAndInitializeBuffers()
info := renderInfo{
vao: vao,
vbo: vbo,
vboIndices: indicesVbo,
mvpUniformID: gl.GetUniformLocation(shaderProgram, gl.Str("MVP\x00")),
viewMatUniformID: gl.GetUniformLocation(shaderProgram, gl.Str("V\x00")),
modelMatUniformID: gl.GetUniformLocation(shaderProgram, gl.Str("M\x00")),
lightPositionUniformID: gl.GetUniformLocation(shaderProgram, gl.Str("lightPosition_worldSpace\x00")),
materialColorUniformID: gl.GetUniformLocation(shaderProgram, gl.Str("materialDiffuseColor\x00")),
shaderProgram: shaderProgram,
color: mgl32.Vec4{},
}
positionAttrib := uint32(gl.GetAttribLocation(shaderProgram, gl.Str("position_modelSpace\x00")))
gl.EnableVertexAttribArray(positionAttrib)
stride := int32(unsafe.Sizeof(mgl32.Vec3{}))
gl.VertexAttribPointer(positionAttrib, 3, gl.FLOAT, false, stride, gl.PtrOffset(0))
gl.BindVertexArray(0)
return func(x, y, z, xSize, ySize, zSize float32) Cube {
return newCube(x, y, z, xSize, ySize, zSize, &info)
}
}
// Create a new cube.
func newCube(x, y, z, xSize, ySize, zSize float32, info *renderInfo) Cube {
if xSize < 0 || ySize < 0 || zSize < 0 {
panic("Negative Size given")
}
if info == nil {
panic("render info may not be nil")
}
cube := Cube{
Transform: Transform{
translation: mgl32.Translate3D(x, y, z),
scale: mgl32.Scale3D(xSize, ySize, zSize),
rotation: mgl32.QuatIdent(),
},
info: *info,
}
return cube
}
// Creates a VAO (Vertex Array Object)
// Creates two VBO (Vertex Buffer Objects) and binds them into ARRAY and ELEMENT_ARRAY Buffers of the created vao
// loads vertex data into ARRAY_BUFFER and index data into ELEMENT_ARRAY_BUFFER
// this could be optimized because every cube has the same vertex and index data, we don't have to keep it in
// memory for every cube.
func generateAndInitializeBuffers() (uint32, uint32, uint32) {
var vao, vbo, indicesVbo uint32
gl.GenVertexArrays(1, &vao)
gl.BindVertexArray(vao)
gl.GenBuffers(1, &vbo)
gl.BindBuffer(gl.ARRAY_BUFFER, vbo)
gl.GenBuffers(1, &indicesVbo)
gl.BindBuffer(gl.ELEMENT_ARRAY_BUFFER, indicesVbo)
numVertexBufferBytes := len(getVertices()) * int(unsafe.Sizeof(mgl32.Vec3{}))
gl.BufferData(gl.ARRAY_BUFFER, numVertexBufferBytes, gl.Ptr(getVertices()), gl.STATIC_READ)
gl.BufferData(gl.ELEMENT_ARRAY_BUFFER, len(getDrawingIndices()), gl.Ptr(getDrawingIndices()), gl.STATIC_READ)
return vao, vbo, indicesVbo
}
// Draws the cube into the default framebuffer with the specified view, projection matrices and an arbitrary transform
// lightPosition is a shader parameter
func (cube Cube) draw(view, projection, transform *mgl32.Mat4, lightPosition mgl32.Vec3) {
gl.BindVertexArray(cube.info.vao)
checkForGLError(fmt.Sprintf("glGetError not zero after BindVertexArray(%v)", cube.info.vao))
gl.UseProgram(cube.info.shaderProgram)
model := transform.Mul4(cube.Transform.AsMatrix())
mvp := projection.Mul4(view.Mul4(model))
gl.UniformMatrix4fv(cube.info.mvpUniformID, 1, false, &mvp[0])
gl.UniformMatrix4fv(cube.info.modelMatUniformID, 1, false, &model[0])
gl.UniformMatrix4fv(cube.info.viewMatUniformID, 1, false, &view[0])
gl.Uniform4fv(cube.info.materialColorUniformID, 1, &cube.info.color[0])
gl.Uniform3fv(cube.info.lightPositionUniformID, 1, &lightPosition[0])
gl.DrawElements(gl.TRIANGLES, int32(len(getDrawingIndices())), gl.UNSIGNED_BYTE, gl.PtrOffset(0))
gl.BindVertexArray(0)
}
// Pretty prints location and vao validness
func (cube Cube) String() string {
position := cube.Transform.translation.Mul4x1(mgl32.Vec4{0, 0, 0, 1}).Vec3()
return fmt.Sprintf("cube at %v", position)
} | display/cube.go | 0.696165 | 0.430626 | cube.go | starcoder |
package structsync
import (
"fmt"
"reflect"
"time"
"github.com/deelawn/convert"
)
const timeType = "time.Time"
// assignAndConvertValue -- Assign values from one field to another and handle casts
func assignAndConvertValue(srcValue, dstValue reflect.Value) error {
srcKind := srcValue.Kind()
dstKind := dstValue.Kind()
// The simple case
if srcKind == dstKind {
dstValue.Set(srcValue)
return nil
}
srcTypeStr := srcValue.Type().String()
dstTypeStr := dstValue.Type().String()
// Otherwise convert the type
switch srcKind {
case reflect.Bool:
boolValue := srcValue.Bool()
switch dstKind {
case reflect.Float32:
result, err := convert.BoolToFloat32(boolValue)
dstValue.SetFloat(float64(result))
return err
case reflect.Float64:
result, err := convert.BoolToFloat64(boolValue)
dstValue.SetFloat(result)
return err
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
result, err := convert.BoolToInt(boolValue)
dstValue.SetInt(result)
return err
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
result, err := convert.BoolToUint(boolValue)
dstValue.SetUint(result)
return err
case reflect.String:
result, err := convert.BoolToString(boolValue)
dstValue.SetString(result)
return err
default:
if dstTypeStr == timeType {
result, err := convert.BoolToInt(boolValue)
timeVal := reflect.ValueOf(time.Unix(result, 0))
dstValue.Set(timeVal)
return err
}
}
case reflect.Float32, reflect.Float64:
floatValue := srcValue.Float()
switch dstKind {
case reflect.Bool:
result, err := convert.FloatToBool(floatValue)
dstValue.SetBool(result)
return err
case reflect.Float32:
result, err := convert.FloatToFloat32(floatValue)
dstValue.SetFloat(float64(result))
return err
case reflect.Int:
result, err := convert.FloatToInt(floatValue)
dstValue.SetInt(int64(result))
return err
case reflect.Int8:
result, err := convert.FloatToInt8(floatValue)
dstValue.SetInt(int64(result))
return err
case reflect.Int16:
result, err := convert.FloatToInt16(floatValue)
dstValue.SetInt(int64(result))
return err
case reflect.Int32:
result, err := convert.FloatToInt32(floatValue)
dstValue.SetInt(int64(result))
return err
case reflect.Int64:
result, err := convert.FloatToInt64(floatValue)
dstValue.SetInt(result)
return err
case reflect.Uint:
result, err := convert.FloatToUint(floatValue)
dstValue.SetUint(uint64(result))
return err
case reflect.Uint8:
result, err := convert.FloatToUint8(floatValue)
dstValue.SetUint(uint64(result))
return err
case reflect.Uint16:
result, err := convert.FloatToUint16(floatValue)
dstValue.SetUint(uint64(result))
return err
case reflect.Uint32:
result, err := convert.FloatToUint32(floatValue)
dstValue.SetUint(uint64(result))
return err
case reflect.Uint64:
result, err := convert.FloatToUint64(floatValue)
dstValue.SetUint(uint64(result))
return err
case reflect.String:
result, err := convert.FloatToString(floatValue)
dstValue.SetString(result)
return err
default:
if dstTypeStr == timeType {
result, err := convert.FloatToInt64(floatValue)
timeVal := reflect.ValueOf(time.Unix(result, 0))
dstValue.Set(timeVal)
return err
}
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Struct:
var intValue int64
// Time can be represented as an integer, so it can be handled using the integer logic
if srcKind == reflect.Struct && srcTypeStr != timeType {
break
} else if srcKind == reflect.Struct {
intValue = srcValue.Interface().(time.Time).Unix()
} else {
intValue = srcValue.Int()
}
switch dstKind {
case reflect.Bool:
result, err := convert.IntToBool(intValue)
dstValue.SetBool(result)
return err
case reflect.Float32:
result, err := convert.IntToFloat32(intValue)
dstValue.SetFloat(float64(result))
return err
case reflect.Float64:
result, err := convert.IntToFloat64(intValue)
dstValue.SetFloat(result)
return err
case reflect.Int:
result, err := convert.IntToDefaultInt(intValue)
dstValue.SetInt(int64(result))
return err
case reflect.Int8:
result, err := convert.IntToInt8(intValue)
dstValue.SetInt(int64(result))
return err
case reflect.Int16:
result, err := convert.IntToInt16(intValue)
dstValue.SetInt(int64(result))
return err
case reflect.Int32:
result, err := convert.IntToInt32(intValue)
dstValue.SetInt(int64(result))
return err
case reflect.Int64:
dstValue.SetInt(intValue)
return nil
case reflect.Uint:
result, err := convert.IntToUint(intValue)
dstValue.SetUint(uint64(result))
return err
case reflect.Uint8:
result, err := convert.IntToUint8(intValue)
dstValue.SetUint(uint64(result))
return err
case reflect.Uint16:
result, err := convert.IntToUint16(intValue)
dstValue.SetUint(uint64(result))
return err
case reflect.Uint32:
result, err := convert.IntToUint32(intValue)
dstValue.SetUint(uint64(result))
return err
case reflect.Uint64:
result, err := convert.IntToUint64(intValue)
dstValue.SetUint(result)
return err
case reflect.String:
result, err := convert.IntToString(intValue)
dstValue.SetString(result)
return err
default:
if dstValue.Type().String() == timeType {
timeVal := reflect.ValueOf(time.Unix(intValue, 0))
dstValue.Set(timeVal)
return nil
}
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
uintValue := srcValue.Uint()
switch dstKind {
case reflect.Bool:
result, err := convert.UintToBool(uintValue)
dstValue.SetBool(result)
return err
case reflect.Float32:
result, err := convert.UintToFloat32(uintValue)
dstValue.SetFloat(float64(result))
return err
case reflect.Float64:
result, err := convert.UintToFloat64(uintValue)
dstValue.SetFloat(result)
return err
case reflect.Int:
result, err := convert.UintToInt(uintValue)
dstValue.SetInt(int64(result))
return err
case reflect.Int8:
result, err := convert.UintToInt8(uintValue)
dstValue.SetInt(int64(result))
return err
case reflect.Int16:
result, err := convert.UintToInt16(uintValue)
dstValue.SetInt(int64(result))
return err
case reflect.Int32:
result, err := convert.UintToInt32(uintValue)
dstValue.SetInt(int64(result))
return err
case reflect.Int64:
result, err := convert.UintToInt64(uintValue)
dstValue.SetInt(result)
return err
case reflect.Uint:
result, err := convert.UintToDefaultUint(uintValue)
dstValue.SetUint(uint64(result))
return err
case reflect.Uint8:
result, err := convert.UintToUint8(uintValue)
dstValue.SetUint(uint64(result))
return err
case reflect.Uint16:
result, err := convert.UintToUint16(uintValue)
dstValue.SetUint(uint64(result))
return err
case reflect.Uint32:
result, err := convert.UintToUint32(uintValue)
dstValue.SetUint(uint64(result))
return err
case reflect.Uint64:
dstValue.SetUint(uintValue)
return nil
case reflect.String:
result, err := convert.UintToString(uintValue)
dstValue.SetString(result)
return err
default:
if dstValue.Type().String() == timeType {
result, err := convert.UintToInt64(uintValue)
timeVal := reflect.ValueOf(time.Unix(result, 0))
dstValue.Set(timeVal)
return err
}
}
case reflect.String:
stringValue := srcValue.String()
switch dstKind {
case reflect.Bool:
result, err := convert.StringToBool(stringValue)
dstValue.SetBool(result)
return err
case reflect.Float32:
result, err := convert.StringToFloat32(stringValue)
dstValue.SetFloat(float64(result))
return err
case reflect.Float64:
result, err := convert.StringToFloat64(stringValue)
dstValue.SetFloat(result)
return err
case reflect.Int:
result, err := convert.StringToInt(stringValue)
dstValue.SetInt(int64(result))
return err
case reflect.Int8:
result, err := convert.StringToInt8(stringValue)
dstValue.SetInt(int64(result))
return err
case reflect.Int16:
result, err := convert.StringToInt16(stringValue)
dstValue.SetInt(int64(result))
return err
case reflect.Int32:
result, err := convert.StringToInt32(stringValue)
dstValue.SetInt(int64(result))
return err
case reflect.Int64:
result, err := convert.StringToInt64(stringValue)
dstValue.SetInt(result)
return err
case reflect.Uint:
result, err := convert.StringToUint(stringValue)
dstValue.SetUint(uint64(result))
return err
case reflect.Uint8:
result, err := convert.StringToUint8(stringValue)
dstValue.SetUint(uint64(result))
return err
case reflect.Uint16:
result, err := convert.StringToUint16(stringValue)
dstValue.SetUint(uint64(result))
return err
case reflect.Uint32:
result, err := convert.StringToUint32(stringValue)
dstValue.SetUint(uint64(result))
return err
case reflect.Uint64:
result, err := convert.StringToUint64(stringValue)
dstValue.SetUint(result)
return err
default:
if dstValue.Type().String() == timeType {
result, err := convert.StringToInt64(stringValue)
timeVal := reflect.ValueOf(time.Unix(result, 0))
dstValue.Set(timeVal)
return err
}
}
}
// If we got here, then something isn't right; return an error
return fmt.Errorf("%s cannot be converted to %s", srcKind.String(), dstKind.String())
} | assignment.go | 0.576065 | 0.564519 | assignment.go | starcoder |
package graph
import (
"container/heap"
)
//IsProperColouring checks if the vertex colouring is a proper colouring of the graph g. It assumes that all colours are \geq 0 and that a colour <0 is a mistake.
//This is because the colour -1 is often used to indicate no colour.
func IsProperColouring(g Graph, colouring []int) bool {
n := g.N()
if colouring == nil || len(colouring) != n {
return false
}
for i := 0; i < n; i++ {
if colouring[i] < 0 {
return false
}
neighbours := g.Neighbours(i)
for _, v := range neighbours {
if v > i {
break
}
if colouring[v] == colouring[i] {
return false
}
}
}
return true
}
//GreedyColor greedily colours the graph G colouring the vertices in the given order. That is, this reads the vertices in the given order and assigns to each vertex the minimum colour such that none of its neighbour have this colour.
func GreedyColor(g Graph, order []int) (int, []int) {
n := g.N()
if n != len(order) {
panic("order does not have length equal to g.N()")
}
c := make([]int, n)
for i := range c {
c[i] = -1
}
seenColours := make([]bool, n)
maxColour := -1
max := 0
for _, v := range order {
max = 0
for _, u := range g.Neighbours(v) {
if c[u] > -1 {
seenColours[c[u]] = true
if c[u] > max {
max = c[u]
}
}
}
i := 0
for i = 0; i < n; i++ {
if !seenColours[i] {
c[v] = i
if i > maxColour {
maxColour = i
}
break
}
seenColours[i] = false
}
for ; i <= max; i++ {
seenColours[i] = false
}
}
return maxColour, c
}
//ChromaticNumber returns the minimum number of colours needed in a proper vertex colouring of g (known as the Chromatic Number χ) and a colouring that uses this many colours ([0, 1, ..., χ -1]).
//Note that a colouring with the minimum number of colours is not necessarily unique and the colouring returned here is arbitrary.
func ChromaticNumber(g Graph) (chromaticNumber int, colouring []int) {
pc := make([]int, g.N())
for i := range pc {
pc[i] = -1
}
cn := CliqueNumber(g)
return dfsDsatur(g, cn, g.N()+1, pc)
}
//IsKColorable returns true if there is a proper colouring with k colours and an example colouring, else it returns false, nil.
func IsKColorable(g Graph, k int) (ok bool, colouring []int) {
pc := make([]int, g.N())
for i := range pc {
pc[i] = -1
}
cn, c := dfsDsatur(g, k, k, pc)
if cn == -1 {
return false, nil
}
return true, c
}
//uncolouredVertex is a type used in the dfsDsatur. It stores information about currently uncoloured vertices.
type uncolouredVertex struct {
v int
numberOfSeenColours int
seenColours []int
degree int
}
//uncolouredHeap stores the information about uncoloured vertices and a heap of which vertices are uncoloured. Information on vertices isn't modified when they are removed from the heap so it can be used again when the vertices are added back to the heap.
type uncolouredHeap struct {
uv []uncolouredVertex
intHeap []int
}
func (uh uncolouredHeap) Len() int { return len(uh.intHeap) }
func (uh uncolouredHeap) Less(i, j int) bool {
//Order the vertices in descending numberOfSeenColours and descending degree if they have the same numberOfSeenColours.
if uh.uv[uh.intHeap[i]].numberOfSeenColours != uh.uv[uh.intHeap[j]].numberOfSeenColours {
return uh.uv[uh.intHeap[i]].numberOfSeenColours > uh.uv[uh.intHeap[j]].numberOfSeenColours
}
return uh.uv[uh.intHeap[i]].degree > uh.uv[uh.intHeap[j]].degree
}
func (uh uncolouredHeap) Swap(i, j int) {
uh.intHeap[i], uh.intHeap[j] = uh.intHeap[j], uh.intHeap[i]
}
func (uh *uncolouredHeap) Push(x interface{}) {
v := x.(int)
uh.intHeap = append(uh.intHeap, v)
}
func (uh *uncolouredHeap) Pop() interface{} {
old := uh.intHeap
n := len(old)
v := old[n-1]
uh.intHeap = old[0 : n-1]
return v
}
//dfsDsatur runs a DFS search over valid colourings where the order is given by DSATUR until a colouring using at most lowerBound colours is found or all options have been exhausted.
//The search will stop when a colouring is found using lowerBound colours.
//The serach will not consider any colourings which use more than upperBound colours (but they are allowed to use upperBound colours).
//The search will start using the colouring partialColouring. -1 should be used for colours which aren't yet fixed.
//If no valid colouring can be found, the returned values are -1, nil.
func dfsDsatur(g Graph, lowerBound int, upperBound int, partialColouring []int) (chromaticNumber int, colouring []int) {
//The code is going to assume that we already have a colouring with upperBound colours and will not look for another one.
//We want to find a colouring with upperBound colours if it exists or we will return -1, nil.
//We'll just increment upperBound by 1.
upperBound++
n := g.N()
if n == 0 {
return 0, []int{}
}
degrees := g.Degrees()
bestColouring := make([]int, n)
for i := range bestColouring {
bestColouring[i] = -1
}
colouring = make([]int, n)
copy(colouring, partialColouring)
precolouredVertices := make([]int, 0)
uncolouredV := make([]uncolouredVertex, n)
intHeap := make([]int, 0)
var tmp []int
maxColourUsed := -1
for v, c := range partialColouring {
if c == -1 {
tmp = make([]int, upperBound)
uncolouredV[v] = uncolouredVertex{v, 0, tmp, degrees[v]}
intHeap = append(intHeap, v)
} else {
if c > maxColourUsed {
maxColourUsed = c
}
colouring[v] = c
precolouredVertices = append(precolouredVertices, v)
}
}
if maxColourUsed+1 >= upperBound {
return -1, nil
}
for i := range uncolouredV {
seenColours := uncolouredV[i].seenColours
v := uncolouredV[i].v
for _, u := range precolouredVertices {
if g.IsEdge(u, v) {
seenColours[colouring[u]]++
if seenColours[colouring[u]] == 1 {
uncolouredV[i].numberOfSeenColours++
}
}
}
uncolouredV[i].seenColours = seenColours
}
uh := uncolouredHeap{uncolouredV, intHeap}
heap.Init(&uh)
chosenVertices := []int{}
currentChoice := []int{}
choices := [][]int{}
c := make([]int, n)
dfsLoop:
for {
v := -1
c := c[:0]
if len(uh.intHeap) > 0 {
v = uh.intHeap[0]
vertex := uh.uv[v]
maxOption := upperBound - 2
if maxColourUsed+1 < maxOption {
maxOption = maxColourUsed + 1
}
for j := 0; j <= maxOption; j++ {
b := vertex.seenColours[j]
if b == 0 {
c = append(c, j)
}
}
if len(c) > 0 {
heap.Remove(&uh, 0)
}
} else {
copy(bestColouring, colouring)
upperBound = maxColourUsed + 1
if upperBound <= lowerBound {
return upperBound, bestColouring
}
}
if len(c) == 0 {
//Backtrack
mustChange := len(currentChoice) - 1
for i := range chosenVertices {
//If the colour is at least upperBound - 1, then the colouring must use at least upperBound colours and we can't improve. We must change the colour here but the only changes left for this colour are higher so we need to change something before this one.
if colouring[chosenVertices[i]] >= upperBound-1 {
mustChange = i - 1
break
}
}
for i := mustChange; i >= 0; i-- {
if currentChoice[i] < len(choices[i])-1 && choices[i][currentChoice[i]+1]+1 < upperBound {
toColour := choices[i][currentChoice[i]+1]
//Backtrack
for j := len(chosenVertices) - 1; j > i; j-- {
for _, u := range uh.intHeap {
if g.IsEdge(u, chosenVertices[j]) {
uh.uv[u].seenColours[colouring[chosenVertices[j]]]--
if uh.uv[u].seenColours[colouring[chosenVertices[j]]] == 0 {
uh.uv[u].numberOfSeenColours--
}
}
}
colouring[chosenVertices[j]] = -1
heap.Push(&uh, chosenVertices[j])
}
currentChoice = currentChoice[:i+1]
choices = choices[:i+1]
chosenVertices = chosenVertices[:i+1]
//Change the choice at position i
for _, u := range uh.intHeap {
if g.IsEdge(u, chosenVertices[i]) {
uh.uv[u].seenColours[colouring[chosenVertices[i]]]--
if uh.uv[u].seenColours[colouring[chosenVertices[i]]] == 0 {
uh.uv[u].numberOfSeenColours--
}
uh.uv[u].seenColours[toColour]++
if uh.uv[u].seenColours[toColour] == 1 {
uh.uv[u].numberOfSeenColours++
}
}
}
heap.Init(&uh)
currentChoice[i]++
colouring[chosenVertices[i]] = toColour
maxColourUsed = 0
for _, u := range chosenVertices {
if colouring[u] > maxColourUsed {
maxColourUsed = colouring[u]
}
}
continue dfsLoop
}
}
if bestColouring[0] == -1 {
return -1, nil
}
return upperBound, bestColouring
}
tmp := make([]int, len(c))
copy(tmp, c)
choices = append(choices, tmp)
currentChoice = append(currentChoice, 0)
chosenVertices = append(chosenVertices, v)
toColour := c[0]
colouring[v] = toColour
if toColour > maxColourUsed {
maxColourUsed = toColour
}
//Update the seen colours.
for k, u := range uh.intHeap {
if g.IsEdge(u, v) {
uh.uv[u].seenColours[toColour]++
if uh.uv[u].seenColours[toColour] == 1 {
uh.uv[u].numberOfSeenColours++
}
}
heap.Fix(&uh, k)
}
}
}
//ChromaticIndex returns the minimum number of colours needed in a proper edge colouring of g (known as the Chromatic Index χ') and a colouring that uses this many colours ([1, ..., χ']).
//The colouring is returned in the form of an edge array with 0 for non-edges and a colour in [1, 2,..., χ'] for the edges.
//Note that a colouring with the minimum number of colours is not necessarily unique and the colouring returned here is arbitrary.
func ChromaticIndex(g Graph) (chromaticIndex int, colouredEdges []byte) {
h := LineGraphDense(g)
ci, colouring := ChromaticNumber(h)
if ci == -1 {
return -1, nil
}
n := 0
colouringIndex := 0
colouredEdges = make([]byte, n*(n-1)/2)
index := 0
for j := 1; j < n; j++ {
for i := 0; i < j; i++ {
if g.IsEdge(i, j) {
colouredEdges[i] = byte(colouring[colouringIndex] + 1)
colouringIndex++
}
index++
}
}
return ci, colouredEdges
}
//ChromaticPolynomial returns the coefficients of the chromatic polynomial.
//This is a very basic implementation.
func ChromaticPolynomial(g EditableGraph) []int {
n := g.N()
poly := make([]int, n+1)
type holder struct {
g EditableGraph
sign int
}
toCheck := make([]holder, 1)
toCheck[0] = holder{g, 1}
var hold holder
for len(toCheck) > 0 {
hold, toCheck = toCheck[len(toCheck)-1], toCheck[:len(toCheck)-1]
h := hold.g
//Check if we know the chromatic polynomial of this graph.
if h.M() == 0 {
poly[h.N()] += hold.sign
continue
}
//Choose an edge.
var i int
var j int
edgeLoop:
for i = 0; i < h.N(); i++ {
for j = 0; j < i; j++ {
if h.IsEdge(i, j) {
break edgeLoop
}
}
}
//Contract and delete the edge.
tmp := h.Copy()
tmp.RemoveEdge(i, j)
toCheck = append(toCheck, holder{tmp, hold.sign})
tmp = h.Copy()
neighbours := h.Neighbours(j)
for _, v := range neighbours {
tmp.AddEdge(i, v)
}
tmp.RemoveVertex(j)
toCheck = append(toCheck, holder{tmp, -hold.sign})
}
return poly
} | graph/colouring.go | 0.741112 | 0.506958 | colouring.go | starcoder |
package consensus
import (
"math/big"
"time"
"go.sia.tech/sunyata"
)
// BlockInterval is the expected wall clock time between consecutive blocks.
const BlockInterval = 10 * time.Minute
// DifficultyAdjustmentInterval is the number of blocks between adjustments to
// the block mining target.
const DifficultyAdjustmentInterval = 2016
func adjustDifficulty(w sunyata.Work, interval time.Duration) sunyata.Work {
if interval.Round(time.Second) != interval {
// developer error; interval should be the difference between two Unix
// timestamps
panic("interval not rounded to nearest second")
}
const maxInterval = BlockInterval * DifficultyAdjustmentInterval * 4
const minInterval = BlockInterval * DifficultyAdjustmentInterval / 4
if interval > maxInterval {
interval = maxInterval
} else if interval < minInterval {
interval = minInterval
}
workInt := new(big.Int).SetBytes(w.NumHashes[:])
workInt.Mul(workInt, big.NewInt(int64(BlockInterval*DifficultyAdjustmentInterval)))
workInt.Div(workInt, big.NewInt(int64(interval)))
quo := workInt.Bytes()
copy(w.NumHashes[32-len(quo):], quo)
return w
}
func applyHeader(vc ValidationContext, h sunyata.BlockHeader) ValidationContext {
if h.Height == 0 {
// special handling for GenesisUpdate
vc.LastAdjust = h.Timestamp
vc.PrevTimestamps[0] = h.Timestamp
vc.History.AppendLeaf(h.Index())
vc.Index = h.Index()
return vc
}
blockWork := sunyata.WorkRequiredForHash(h.ID())
if h.Height > 0 && h.Height%DifficultyAdjustmentInterval == 0 {
vc.Difficulty = adjustDifficulty(vc.Difficulty, h.Timestamp.Sub(vc.LastAdjust))
vc.LastAdjust = h.Timestamp
}
vc.TotalWork = vc.TotalWork.Add(blockWork)
if vc.numTimestamps() < len(vc.PrevTimestamps) {
vc.PrevTimestamps[vc.numTimestamps()] = h.Timestamp
} else {
copy(vc.PrevTimestamps[:], vc.PrevTimestamps[1:])
vc.PrevTimestamps[len(vc.PrevTimestamps)-1] = h.Timestamp
}
vc.Index = h.Index()
vc.History.AppendLeaf(vc.Index)
return vc
}
func updatedInBlock(vc ValidationContext, b sunyata.Block) (outputs []sunyata.Output, objects []stateObject) {
addObject := func(so stateObject) {
// copy proofs so we don't mutate transaction data
so.proof = append([]sunyata.Hash256(nil), so.proof...)
objects = append(objects, so)
}
for _, txn := range b.Transactions {
for _, in := range txn.Inputs {
outputs = append(outputs, in.Parent)
if in.Parent.LeafIndex != sunyata.EphemeralLeafIndex {
addObject(outputStateObject(in.Parent, flagSpent))
}
}
}
return
}
func createdInBlock(vc ValidationContext, b sunyata.Block) (outputs []sunyata.Output, objects []stateObject) {
flags := make(map[sunyata.OutputID]uint64)
for _, txn := range b.Transactions {
for _, in := range txn.Inputs {
if in.Parent.LeafIndex == sunyata.EphemeralLeafIndex {
flags[in.Parent.ID] = flagSpent
}
}
}
addOutput := func(o sunyata.Output) {
outputs = append(outputs, o)
objects = append(objects, outputStateObject(o, flags[o.ID]))
}
addOutput(sunyata.Output{
ID: sunyata.OutputID{
TransactionID: sunyata.TransactionID(b.ID()),
Index: 0,
},
Value: vc.BlockReward(),
Address: b.Header.MinerAddress,
Timelock: vc.BlockRewardTimelock(),
})
for _, txn := range b.Transactions {
txid := txn.ID()
for i, out := range txn.Outputs {
addOutput(sunyata.Output{
ID: sunyata.OutputID{
TransactionID: txid,
Index: uint64(i),
},
Value: out.Value,
Address: out.Address,
Timelock: 0,
})
}
}
return
}
// A StateApplyUpdate reflects the changes to consensus state resulting from the
// application of a block.
type StateApplyUpdate struct {
Context ValidationContext
SpentOutputs []sunyata.Output
NewOutputs []sunyata.Output
updatedObjects [64][]stateObject
treeGrowth [64][]sunyata.Hash256
}
// OutputWasSpent returns true if the given Output was spent.
func (sau *StateApplyUpdate) OutputWasSpent(o sunyata.Output) bool {
for i := range sau.SpentOutputs {
if sau.SpentOutputs[i].LeafIndex == o.LeafIndex {
return true
}
}
return false
}
// UpdateOutputProof updates the Merkle proof of the supplied output to
// incorporate the changes made to the state tree. The output's proof must be
// up-to-date; if it is not, UpdateOutputProof may panic.
func (sau *StateApplyUpdate) UpdateOutputProof(o *sunyata.Output) {
updateProof(o.MerkleProof, o.LeafIndex, &sau.updatedObjects)
o.MerkleProof = append(o.MerkleProof, sau.treeGrowth[len(o.MerkleProof)]...)
}
// ApplyBlock integrates a block into the current consensus state, producing
// a StateApplyUpdate detailing the resulting changes. The block is assumed to
// be fully validated.
func ApplyBlock(vc ValidationContext, b sunyata.Block) (sau StateApplyUpdate) {
sau.Context = applyHeader(vc, b.Header)
var updated, created []stateObject
sau.SpentOutputs, updated = updatedInBlock(vc, b)
sau.NewOutputs, created = createdInBlock(vc, b)
sau.updatedObjects = sau.Context.State.updateExistingObjects(updated)
sau.treeGrowth = sau.Context.State.addNewObjects(created)
for i := range sau.NewOutputs {
sau.NewOutputs[i].LeafIndex = created[0].leafIndex
sau.NewOutputs[i].MerkleProof = created[0].proof
created = created[1:]
}
return
}
// GenesisUpdate returns the StateApplyUpdate for the genesis block b.
func GenesisUpdate(b sunyata.Block, initialDifficulty sunyata.Work) StateApplyUpdate {
return ApplyBlock(ValidationContext{
Difficulty: initialDifficulty,
}, b)
}
// A StateRevertUpdate reflects the changes to consensus state resulting from the
// removal of a block.
type StateRevertUpdate struct {
Context ValidationContext
SpentOutputs []sunyata.Output
NewOutputs []sunyata.Output
updatedObjects [64][]stateObject
}
// OutputWasRemoved returns true if the specified Output was reverted.
func (sru *StateRevertUpdate) OutputWasRemoved(o sunyata.Output) bool {
return o.LeafIndex >= sru.Context.State.NumLeaves
}
// UpdateOutputProof updates the Merkle proof of the supplied output to
// incorporate the changes made to the state tree. The output's proof must be
// up-to-date; if it is not, UpdateOutputProof may panic.
func (sru *StateRevertUpdate) UpdateOutputProof(o *sunyata.Output) {
if mh := mergeHeight(sru.Context.State.NumLeaves, o.LeafIndex); mh <= len(o.MerkleProof) {
o.MerkleProof = o.MerkleProof[:mh-1]
}
updateProof(o.MerkleProof, o.LeafIndex, &sru.updatedObjects)
}
// RevertBlock produces a StateRevertUpdate from a block and the
// ValidationContext prior to that block.
func RevertBlock(vc ValidationContext, b sunyata.Block) (sru StateRevertUpdate) {
sru.Context = vc
sru.SpentOutputs, _ = updatedInBlock(vc, b)
sru.NewOutputs, _ = createdInBlock(vc, b)
sru.updatedObjects = objectsByTree(b.Transactions)
return
} | consensus/update.go | 0.756537 | 0.444263 | update.go | starcoder |
package expressions
import (
"base/docs"
"datavalues"
)
func LT(left interface{}, right interface{}) IExpression {
exprs := expressionsFor(left, right)
return &BinaryExpression{
name: "<",
argumentNames: [][]string{
{"left", "right"},
},
description: docs.Text("Less than."),
validate: All(),
left: exprs[0],
right: exprs[1],
updateFn: func(left datavalues.IDataValue, right datavalues.IDataValue) (datavalues.IDataValue, error) {
cmp, err := left.Compare(right)
if err != nil {
return nil, err
}
return datavalues.MakeBool(cmp == datavalues.LessThan), nil
},
}
}
func LTE(left interface{}, right interface{}) IExpression {
exprs := expressionsFor(left, right)
return &BinaryExpression{
name: "<=",
argumentNames: [][]string{
{"left", "right"},
},
description: docs.Text("Less than or equal to."),
validate: All(),
left: exprs[0],
right: exprs[1],
updateFn: func(left datavalues.IDataValue, right datavalues.IDataValue) (datavalues.IDataValue, error) {
cmp, err := left.Compare(right)
if err != nil {
return nil, err
}
return datavalues.MakeBool(cmp < datavalues.GreaterThan), nil
},
}
}
func EQ(left interface{}, right interface{}) IExpression {
exprs := expressionsFor(left, right)
return &BinaryExpression{
name: "=",
argumentNames: [][]string{
{"left", "right"},
},
description: docs.Text("Equal."),
validate: All(),
left: exprs[0],
right: exprs[1],
updateFn: func(left datavalues.IDataValue, right datavalues.IDataValue) (datavalues.IDataValue, error) {
cmp, err := left.Compare(right)
if err != nil {
return nil, err
}
return datavalues.MakeBool(cmp == datavalues.Equal), nil
},
}
}
func NEQ(left interface{}, right interface{}) IExpression {
exprs := expressionsFor(left, right)
return &BinaryExpression{
name: "!=",
argumentNames: [][]string{
{"left", "right"},
},
description: docs.Text("Not equal."),
validate: All(),
left: exprs[0],
right: exprs[1],
updateFn: func(left datavalues.IDataValue, right datavalues.IDataValue) (datavalues.IDataValue, error) {
cmp, err := left.Compare(right)
if err != nil {
return nil, err
}
return datavalues.MakeBool(cmp != datavalues.Equal), nil
},
}
}
func GT(left interface{}, right interface{}) IExpression {
exprs := expressionsFor(left, right)
return &BinaryExpression{
name: ">",
argumentNames: [][]string{
{"left", "right"},
},
description: docs.Text("Greater than."),
validate: All(),
left: exprs[0],
right: exprs[1],
updateFn: func(left datavalues.IDataValue, right datavalues.IDataValue) (datavalues.IDataValue, error) {
cmp, err := left.Compare(right)
if err != nil {
return nil, err
}
return datavalues.MakeBool(cmp == datavalues.GreaterThan), nil
},
}
}
func GTE(left interface{}, right interface{}) IExpression {
exprs := expressionsFor(left, right)
return &BinaryExpression{
name: ">=",
argumentNames: [][]string{
{"left", "right"},
},
description: docs.Text("Greater than or equal to."),
validate: All(),
left: exprs[0],
right: exprs[1],
updateFn: func(left datavalues.IDataValue, right datavalues.IDataValue) (datavalues.IDataValue, error) {
cmp, err := left.Compare(right)
if err != nil {
return nil, err
}
return datavalues.MakeBool(cmp > datavalues.LessThan), nil
},
}
}
func AND(left interface{}, right interface{}) IExpression {
exprs := expressionsFor(left, right)
return &BinaryExpression{
name: "AND",
argumentNames: [][]string{
{"left", "right"},
},
description: docs.Text("Logic AND."),
validate: All(),
left: exprs[0],
right: exprs[1],
updateFn: func(left datavalues.IDataValue, right datavalues.IDataValue) (datavalues.IDataValue, error) {
l := datavalues.AsBool(left)
r := datavalues.AsBool(right)
return datavalues.ToValue(l && r), nil
},
}
}
func OR(left interface{}, right interface{}) IExpression {
exprs := expressionsFor(left, right)
return &BinaryExpression{
name: "OR",
argumentNames: [][]string{
{"left", "right"},
},
description: docs.Text("Logic OR."),
validate: All(),
left: exprs[0],
right: exprs[1],
updateFn: func(left datavalues.IDataValue, right datavalues.IDataValue) (datavalues.IDataValue, error) {
l := datavalues.AsBool(left)
r := datavalues.AsBool(right)
return datavalues.ToValue(l || r), nil
},
}
}
func LIKE(left interface{}, right interface{}) IExpression {
exprs := expressionsFor(left, right)
return &BinaryExpression{
name: "LIKE",
argumentNames: [][]string{
{"left", "right"},
},
description: docs.Text("LIKE."),
validate: All(),
left: exprs[0],
right: exprs[1],
updateFn: func(left datavalues.IDataValue, right datavalues.IDataValue) (datavalues.IDataValue, error) {
r := datavalues.AsString(right)
return datavalues.ToValue(datavalues.Like(r, left)), nil
},
}
}
func NOT_LIKE(left interface{}, right interface{}) IExpression {
exprs := expressionsFor(left, right)
return &BinaryExpression{
name: "NOT LIKE",
argumentNames: [][]string{
{"left", "right"},
},
description: docs.Text("NOT LIKE."),
validate: All(),
left: exprs[0],
right: exprs[1],
updateFn: func(left datavalues.IDataValue, right datavalues.IDataValue) (datavalues.IDataValue, error) {
r := datavalues.AsString(right)
return datavalues.ToValue(!datavalues.Like(r, left)), nil
},
}
} | src/expressions/expression_condition.go | 0.594551 | 0.474205 | expression_condition.go | starcoder |
package gorm
import (
"fmt"
"strings"
"github.com/infobloxopen/atlas-app-toolkit/query"
)
// FilterStringToGorm is a shortcut to parse a filter string using default FilteringParser implementation
// and call FilteringToGorm on the returned filtering expression.
func FilterStringToGorm(filter string) (string, []interface{}, error) {
f, err := query.ParseFiltering(filter)
if err != nil {
return "", nil, err
}
return FilteringToGorm(f)
}
// FilteringToGorm returns GORM Plain SQL representation of the filtering expression.
func FilteringToGorm(m *query.Filtering) (string, []interface{}, error) {
if m == nil {
return "", nil, nil
}
switch r := m.Root.(type) {
case *query.Filtering_Operator:
return LogicalOperatorToGorm(r.Operator)
case *query.Filtering_StringCondition:
return StringConditionToGorm(r.StringCondition)
case *query.Filtering_NumberCondition:
return NumberConditionToGorm(r.NumberCondition)
case *query.Filtering_NullCondition:
return NullConditionToGorm(r.NullCondition)
default:
return "", nil, fmt.Errorf("%T type is not supported in Filtering", r)
}
}
// LogicalOperatorToGorm returns GORM Plain SQL representation of the logical operator.
func LogicalOperatorToGorm(lop *query.LogicalOperator) (string, []interface{}, error) {
var lres string
var largs []interface{}
var err error
switch l := lop.Left.(type) {
case *query.LogicalOperator_LeftOperator:
lres, largs, err = LogicalOperatorToGorm(l.LeftOperator)
case *query.LogicalOperator_LeftStringCondition:
lres, largs, err = StringConditionToGorm(l.LeftStringCondition)
case *query.LogicalOperator_LeftNumberCondition:
lres, largs, err = NumberConditionToGorm(l.LeftNumberCondition)
case *query.LogicalOperator_LeftNullCondition:
lres, largs, err = NullConditionToGorm(l.LeftNullCondition)
default:
return "", nil, fmt.Errorf("%T type is not supported in Filtering", l)
}
if err != nil {
return "", nil, err
}
var rres string
var rargs []interface{}
switch r := lop.Right.(type) {
case *query.LogicalOperator_RightOperator:
rres, rargs, err = LogicalOperatorToGorm(r.RightOperator)
case *query.LogicalOperator_RightStringCondition:
rres, rargs, err = StringConditionToGorm(r.RightStringCondition)
case *query.LogicalOperator_RightNumberCondition:
rres, rargs, err = NumberConditionToGorm(r.RightNumberCondition)
case *query.LogicalOperator_RightNullCondition:
rres, rargs, err = NullConditionToGorm(r.RightNullCondition)
default:
return "", nil, fmt.Errorf("%T type is not supported in Filtering", r)
}
if err != nil {
return "", nil, err
}
var o string
switch lop.Type {
case query.LogicalOperator_AND:
o = "AND"
case query.LogicalOperator_OR:
o = "OR"
}
var neg string
if lop.IsNegative {
neg = "NOT"
}
return fmt.Sprintf("%s(%s %s %s)", neg, lres, o, rres), append(largs, rargs...), nil
}
// StringConditionToGorm returns GORM Plain SQL representation of the string condition.
func StringConditionToGorm(c *query.StringCondition) (string, []interface{}, error) {
var o string
switch c.Type {
case query.StringCondition_EQ:
o = "="
case query.StringCondition_MATCH:
o = "~"
}
var neg string
if c.IsNegative {
neg = "NOT"
}
return fmt.Sprintf("%s(%s %s ?)", neg, strings.Join(c.FieldPath, "."), o), []interface{}{c.Value}, nil
}
// NumberConditionToGorm returns GORM Plain SQL representation of the number condition.
func NumberConditionToGorm(c *query.NumberCondition) (string, []interface{}, error) {
var o string
switch c.Type {
case query.NumberCondition_EQ:
o = "="
case query.NumberCondition_GT:
o = ">"
case query.NumberCondition_GE:
o = ">="
case query.NumberCondition_LT:
o = "<"
case query.NumberCondition_LE:
o = "<="
}
var neg string
if c.IsNegative {
neg = "NOT"
}
return fmt.Sprintf("%s(%s %s ?)", neg, strings.Join(c.FieldPath, "."), o), []interface{}{c.Value}, nil
}
// NullConditionToGorm returns GORM Plain SQL representation of the null condition.
func NullConditionToGorm(c *query.NullCondition) (string, []interface{}, error) {
o := "IS NULL"
var neg string
if c.IsNegative {
neg = "NOT"
}
return fmt.Sprintf("%s(%s %s)", neg, strings.Join(c.FieldPath, "."), o), nil, nil
} | gorm/filtering.go | 0.684686 | 0.512449 | filtering.go | starcoder |
package memory
import (
"fmt"
"reflect"
"github.com/google/gapid/core/math/u64"
"github.com/google/gapid/core/os/device"
)
// AlignOf returns the byte alignment of the type t.
func AlignOf(t reflect.Type, m *device.MemoryLayout) uint64 {
handlePointer := func() (uint64, bool) {
if t.Implements(tyPointer) {
return uint64(m.GetPointer().GetAlignment()), true
}
return 0, false
}
switch t.Kind() {
case reflect.Uint8:
if t.Implements(tyCharTy) {
return uint64(m.GetChar().GetAlignment())
}
return uint64(m.GetI8().GetAlignment())
case reflect.Bool, reflect.Int8:
return uint64(m.GetI8().GetAlignment())
case reflect.Int16, reflect.Uint16:
return uint64(m.GetI16().GetAlignment())
case reflect.Int32, reflect.Uint32:
return uint64(m.GetI32().GetAlignment())
case reflect.Float32:
return uint64(m.GetF32().GetAlignment())
case reflect.Float64:
return uint64(m.GetF64().GetAlignment())
case reflect.Int64, reflect.Uint64:
if t.Implements(tyIntTy) || t.Implements(tyUintTy) {
return uint64(m.GetInteger().GetAlignment())
}
if t.Implements(tySizeTy) {
return uint64(m.GetSize().GetAlignment())
}
return uint64(m.GetI64().GetAlignment())
case reflect.Int, reflect.Uint:
return uint64(m.GetInteger().GetAlignment())
case reflect.Array, reflect.Slice:
return AlignOf(t.Elem(), m)
case reflect.String:
return 1
case reflect.Struct:
if size, ok := handlePointer(); ok {
return size
}
alignment := uint64(1)
for i, c := 0, t.NumField(); i < c; i++ {
if a := AlignOf(t.Field(i).Type, m); alignment < a {
alignment = a
}
}
return alignment
default:
if size, ok := handlePointer(); ok {
return size
}
panic(fmt.Errorf("MemoryLayout.AlignOf not implemented for type %v (%v)", t, t.Kind()))
}
}
// SizeOf returns the byte size of the type t.
func SizeOf(t reflect.Type, m *device.MemoryLayout) uint64 {
handlePointer := func() (uint64, bool) {
if t.Implements(tyPointer) {
return uint64(m.GetPointer().GetSize()), true
}
return 0, false
}
switch t.Kind() {
case reflect.Uint8:
if t.Implements(tyCharTy) {
return uint64(m.GetChar().GetSize())
}
return uint64(m.GetI8().GetSize())
case reflect.Bool, reflect.Int8:
return uint64(m.GetI8().GetSize())
case reflect.Int16, reflect.Uint16:
return uint64(m.GetI16().GetSize())
case reflect.Int32, reflect.Uint32:
return uint64(m.GetI32().GetSize())
case reflect.Float32:
return uint64(m.GetF32().GetSize())
case reflect.Float64:
return uint64(m.GetF64().GetSize())
case reflect.Int64, reflect.Uint64:
if t.Implements(tyIntTy) || t.Implements(tyUintTy) {
return uint64(m.GetInteger().GetSize())
}
if t.Implements(tySizeTy) {
return uint64(m.GetSize().GetSize())
}
return uint64(m.GetI64().GetSize())
case reflect.Int, reflect.Uint:
return uint64(m.GetInteger().GetSize())
case reflect.Array:
return SizeOf(t.Elem(), m) * uint64(t.Len())
case reflect.String:
return 1
case reflect.Struct:
if size, ok := handlePointer(); ok {
return size
}
var size, align uint64
for i, c := 0, t.NumField(); i < c; i++ {
f := t.Field(i)
a := AlignOf(f.Type, m)
size = u64.AlignUp(size, a)
size += SizeOf(f.Type, m)
align = u64.Max(align, a)
}
size = u64.AlignUp(size, align)
return size
default:
if size, ok := handlePointer(); ok {
return size
}
panic(fmt.Errorf("MemoryLayout.SizeOf not implemented for type %v (%v)", t, t.Kind()))
}
} | gapis/memory/alignof_sizeof.go | 0.645455 | 0.432183 | alignof_sizeof.go | starcoder |
package main
import (
"fmt"
)
//Heap is a struct for deciding the priority of the decision
type Heap struct {
data []Var // The content of data
indices []int // The heap index of Var
activity []float64 // The priority of each variable.
}
//NewHeap returns a pointer of Heap
func NewHeap() *Heap {
return &Heap{}
}
//Less returns a boolean indicating whether two variables are small
func (h *Heap) Less(i, j int) bool {
return h.activity[i] > h.activity[j]
}
//Size returns the size of data
func (h *Heap) Size() int {
return len(h.data)
}
//Empty returns a boolean indicating whether the size of data is zero or not
func (h *Heap) Empty() bool {
return len(h.data) == 0
}
func (h *Heap) InHeap(x Var) bool {
return int(x) < len(h.indices) && h.indices[x] >= 0
}
func (h *Heap) Decrease(x Var) {
if !h.InHeap(x) {
panic(fmt.Errorf("The var is not in heap: %d", x))
}
h.percolateUp(h.indices[x])
}
func (h *Heap) Increase(x Var) {
if !h.InHeap(x) {
panic(fmt.Errorf("The var is not in heap: %d", x))
}
h.percolateDown(h.indices[x])
}
func (h *Heap) Activity(x Var) float64 {
return h.activity[x]
}
func (h *Heap) Update(x Var) {
if !h.InHeap(x) {
h.PushBack(x)
} else {
h.percolateUp(h.indices[x])
h.percolateDown(h.indices[x])
}
}
func (h *Heap) RemoveMin() Var {
x := h.data[0]
h.data[0] = h.data[h.Size()-1]
h.indices[h.data[0]] = 0
h.indices[x] = -1
h.data = h.data[:h.Size()-1]
if h.Size() > 1 {
h.percolateDown(0)
}
return x
}
func (h *Heap) PushBack(x Var) {
if h.InHeap(x) {
panic(fmt.Errorf("This var is already inserted: %v", x))
}
for int(x) >= len(h.indices) {
h.indices = append(h.indices, -1)
h.activity = append(h.activity, 0.0)
}
h.data = append(h.data, x)
h.indices[x] = len(h.data) - 1
}
func (h *Heap) percolateUp(i int) {
x := h.data[i]
p := parentIndex(i)
for i != 0 && h.Less(int(x), p) {
h.indices[h.data[p]] = i
h.data[i] = h.data[p]
i = p
p = parentIndex(i)
}
h.data[i] = x
h.indices[x] = i
}
func (h *Heap) percolateDown(i int) {
x := h.data[i]
for leftIndex(i) < len(h.data) {
var childIndex int
if rightIndex(i) < len(h.data) && h.Less(int(h.data[rightIndex(i)]), int(h.data[leftIndex(i)])) {
childIndex = rightIndex(i)
} else {
childIndex = leftIndex(i)
}
//no more down
if !h.Less(int(h.data[childIndex]), int(x)) {
break
}
h.data[i] = h.data[childIndex]
h.indices[h.data[childIndex]] = i
i = childIndex
}
h.data[i] = x
h.indices[x] = i
}
func leftIndex(i int) int {
return 2*i + 1
}
func rightIndex(i int) int {
return 2*i + 2
}
func parentIndex(i int) int {
return (i - 1) >> 1
}
func (s *Solver) InsertVarOrder(x Var) {
if !s.VarOrder.InHeap(x) && s.Decision[x] {
s.VarOrder.PushBack(x)
}
} | heap.go | 0.699973 | 0.452475 | heap.go | starcoder |
package graph
import "GoCausal/utils"
type NodePoint struct {
node *Node
edge *Edge
}
func MapKeyInNodeSlice(haystack []*Node, needle *Node) bool {
set := make(map[*Node]struct{})
for _, e := range haystack {
set[e] = struct{}{}
}
_, ok := set[needle]
return ok
}
func (n *NodePoint) GetDistalNode() *Node {
return n.edge.GetDistalNode(n.node)
}
func ExistsDirectedPathFromToBreadthFirst(nodeFrom, nodeTo *Node, g *Graph) bool {
v := []*Node{nodeFrom}
q := utils.LinkedQueue{}
q.Append(nodeFrom)
for q.Size() > 0 {
t := q.Pop().(*Node)
for _, u := range g.GetAdjacentNodes(t) {
if g.IsParentOf(t, u) && g.IsParentOf(u, t) {
return true
}
edge := g.GetEdge(t, u)
c := TraverseDirected(t, edge)
if c == nil || MapKeyInNodeSlice(v, c) {
continue
}
if c == nodeTo {
return true
}
v = append(v, c)
q.Append(c)
}
}
return false
}
/*
IsDConnectedTo
Returns true if node1 is d-connected to node2 on the set of nodes z.
*/
func IsDConnectedTo(node1, node2 *Node, z []*Node, g *Graph) bool {
// TODO
if node1 == node2 {
return true
}
q := utils.LinkedQueue{}
for _, e := range g.GetNodeEdges(node1) {
if e.GetDistalNode(node1) == node2 {
return true
}
q.Append(NodePoint{node: node1, edge: e})
}
for q.Size() > 0 {
nodePoint := q.Pop().(NodePoint)
a := nodePoint.node
b := nodePoint.GetDistalNode()
for _, e := range g.GetNodeEdges(b) {
c := e.GetDistalNode(b)
if c == a {
continue
}
if Reachable(nodePoint.edge, e, a, z, g) {
if c == node2 {
return true
} else {
q.Append(NodePoint{node: b, edge: e})
}
}
}
}
return false
}
/*
Reachable
Determines if two edges do or do not form a block for d-separation,
conditional on a set of nodes z starting from a node a
*/
func Reachable(edge1, edge2 *Edge, a *Node, z []*Node, g *Graph) bool {
b := edge1.GetDistalNode(a)
collider := edge1.GetProximalEndpoint(b) == ARROW && edge2.GetProximalEndpoint(b) == ARROW
if !collider && !MapKeyInNodeSlice(z, b) {
return true
}
ancestor := IsAncestor(b, z, g)
return collider && ancestor
}
/*
IsAncestor
Determines if a given node is an ancestor of any node in a set of nodes z.
*/
func IsAncestor(node *Node, z []*Node, g *Graph) bool {
if MapKeyInNodeSlice(z, node) {
return true
}
q := utils.LinkedQueue{}
for _, n := range z {
q.Append(n)
}
for q.Size() > 0 {
t := q.Pop().(*Node)
if t == node {
return true
}
for _, c := range g.GetParents(t) {
if !q.Contains(c) {
q.Append(c)
}
}
}
return false
} | graph/GraphUtils.go | 0.505859 | 0.434221 | GraphUtils.go | starcoder |
package indicators
import "fmt"
// Sma calculates simple moving average of a slice for a certain
// number of time periods.
func (slice mfloat) SMA(period int) []float64 {
var smaSlice []float64
for i := period; i <= len(slice); i++ {
smaSlice = append(smaSlice, Sum(slice[i-period:i])/float64(period))
}
return smaSlice
}
func (slice mfloat) RollingMax(period int) []float64 {
var maxSlice []float64
// println("period is")
// println(period)
// println(len(slice))
for i := 0; i < len(slice) - period; i++ {
maxSlice = append(maxSlice, sliceMax(slice[i:i+period]))
// println("recent value is")
// println(slice[i])
// println(slice[i+period])
}
// println("rolling past rollingmax")
return maxSlice
}
func (slice mfloat) RollingMin(period int) []float64 {
var minSlice []float64
for i := 0; i < len(slice) - period; i++ {
minSlice = append(minSlice, sliceMin(slice[i:i+period]))
}
// println("rolling past rollingMin")
return minSlice
}
// Ema calculates exponential moving average of a slice for a certain
// number of tiSmame periods.
func (slice mfloat) EMA(period int) []float64 {
var emaSlice []float64
ak := period + 1
k := float64(2) / float64(ak)
emaSlice = append(emaSlice, slice[0])
for i := 1; i < len(slice); i++ {
emaSlice = append(emaSlice, (slice[i]*float64(k)) + (emaSlice[i-1]*float64(1-k)))
}
return emaSlice
}
// BollingerBands returns upper band, lower band and simple moving
// average of a slice.
func BollingerBands(slice mfloat, period int, nStd float64) ([]float64, []float64, []float64) {
var upperBand, lowerBand, middleBand mfloat
middleBand = slice.SMA(period)
std := Std(middleBand)
upperBand = middleBand.AddToAll(std * nStd)
lowerBand = middleBand.AddToAll(-1.0 * std * nStd)
return middleBand, upperBand, lowerBand
}
// MACD stands for moving average convergence divergence.
func MACD(data mfloat, ema ...int) ([]float64, []float64) {
var macd, ema1, ema2, ema3 mfloat
if len(ema) < 3 {
ema = []int{12, 26, 9}
}
ema1 = data.EMA(ema[0])
ema2 = data.EMA(ema[1])
macd = SubSlices(ema1, ema2)
ema3 = macd.EMA(ema[2])
return macd, ema3
}
// OBV means On Balance Volume.
func OBV(priceData, volumeData mfloat) []float64 {
obv := []float64{volumeData[0]}
for i, vol := range volumeData[1:] {
if priceData[i] > priceData[i-1] {
obv = append(obv, obv[i-1]+vol)
} else if priceData[i] < priceData[i-1] {
obv = append(obv, obv[i-1]-vol)
} else {
obv = append(obv, obv[i-1])
}
}
return obv
}
// Ichimoku Cloud.
func IchimokuCloud(priceData, lowData, highData mfloat, configs []int) ([]float64, []float64, []float64,[]float64, []float64) {
var conversionLine, baseLine, leadSpanA, leadSpanB, lagSpan []float64
//SubSlices(highData.SMA(configs[0]), lowData.SMA(configs[0]))
// println("WE GOT PAST CONVERSION LINE CA:C")
// fmt.Println(highData.SMA(configs[0])[len(highData)-50:])
// fmt.Println(lowData.SMA(configs[0])[len(lowData)-50:])
// conversionLine = AddSlices(lowData.DivSlice(SubSlices(highData.SMA(configs[0]), lowData.SMA(configs[0])),2)
// fmt.Println(highData[len(highData)-configs[0]*2:].RollingMax(configs[0]))
conversionLine = DivSlice(AddSlices(highData.RollingMax(configs[0]), lowData.RollingMin(configs[0])),2)
//subtract conversion value from high or add it to low to generate line point
// println("WE GOT PAST CONVERSION LINE CA:C")
baseLine = DivSlice(AddSlices(highData.RollingMax(configs[1]), lowData.RollingMin(configs[1])),2)
// println("WE GOT PAST BASE LINE CA:C")
// fmt.Println(baseLine)
// fmt.Println(conversionLine)
leadSpanA = DivSlice(AddSlicesFromReverse(conversionLine, baseLine),2)
// println("WE GOT PAST leadSpanA LINE CA:C")
leadSpanB = DivSlice(AddSlices(highData.RollingMax(configs[1]*2), lowData.RollingMin(configs[1]*2)),2)
// println("WE GOT PAST leadSpanB LINE CA:C")
lagSpan = priceData[configs[3]:len(priceData)]
fmt.Println("Done with cloud")
return conversionLine, baseLine, leadSpanA, leadSpanB, lagSpan
} | indicators.go | 0.59843 | 0.582966 | indicators.go | starcoder |
package vision
import (
"sort"
"github.com/joaowiciuk/matrix"
)
type Region struct {
D []float64
Xi, Xf []int
Yi, Yf []int
}
func (R *Region) Len() int {
return len((*R).D)
}
func (R *Region) Less(i, j int) bool {
return (*R).D[i] < (*R).D[j]
}
func (R *Region) Swap(i, j int) {
aux := Region{
D: make([]float64, 1),
Xi: make([]int, 1),
Xf: make([]int, 1),
Yi: make([]int, 1),
Yf: make([]int, 1),
}
aux.D[0] = (*R).D[i]
aux.Xi[0] = (*R).Xi[i]
aux.Xf[0] = (*R).Xf[i]
aux.Yi[0] = (*R).Yi[i]
aux.Yf[0] = (*R).Yf[i]
(*R).D[i] = (*R).D[j]
(*R).Xi[i] = (*R).Xi[j]
(*R).Xf[i] = (*R).Xf[j]
(*R).Yi[i] = (*R).Yi[j]
(*R).Yf[i] = (*R).Yf[j]
(*R).D[j] = aux.D[0]
(*R).Xi[j] = aux.Xi[0]
(*R).Xf[j] = aux.Xf[0]
(*R).Yi[j] = aux.Yi[0]
(*R).Yf[j] = aux.Yf[0]
}
/* func Find(O, S *matrix.Matrix) (R *Region) {
xc, yc := matrix.Center(S)
ms, ns := S.Size()
mo, no := O.Size()
left, right := xc, ns-xc-1
top, bottom := yc, ms-yc-1
R = &Region{
D: make([]float64, mo*no),
Xi: make([]int, mo*no),
Xf: make([]int, mo*no),
Yi: make([]int, mo*no),
Yf: make([]int, mo*no),
}
P := matrix.Padding(O, left, right, top, bottom)
for x := left; x < no+left; x++ {
for y := top; y < mo+top; y++ {
S := matrix.SubMatrix(P, x-left, x+right+2, y-top, y+bottom+2)
xi := x - left
yi := y - top
fmt.Printf("D[(%d,%d)-(%d,%d)] = %.4f\n", xi-left, xi+right+2, yi-top, yi+bottom+2, matrix.Distance(O, S, matrix.Norm1))
(*R).D[yi*no+xi] = matrix.Distance(O, S, matrix.Norm1)
(*R).Xi[yi*no+xi] = xi - left
(*R).Xf[yi*no+xi] = xi + right + 2
(*R).Yi[yi*no+xi] = yi - top
(*R).Yf[yi*no+xi] = yi + bottom + 2
}
}
for x := left; x < no+left; x++ {
for y := top; y < mo+top; y++ {
S := matrix.SubMatrix(P, x-left, x+right+2, y-top, y+bottom+2)
d := matrix.Distance(O, S, matrix.Norm1)
fmt.Println(d)
}
}
return
} */
// Find finds O in I
/* func Find(O, I *matrix.Matrix) (R *Region) {
xc, yc := matrix.Center(O)
mo, no := O.Size()
mi, ni := I.Size()
left, right := xc, no-xc-1
top, bottom := yc, mo-yc-1
P := matrix.Padding(I, left, right, top, bottom)
R = &Region{
D: make([]float64, mi*ni),
Xi: make([]int, mi*ni),
Xf: make([]int, mi*ni),
Yi: make([]int, mi*ni),
Yf: make([]int, mi*ni),
}
for x := left; x < ni+left; x++ {
for y := top; y < mi+top; y++ {
S := matrix.SubMatrix(P, x-left, x+right+2, y-top, y+bottom+2)
d := matrix.Distance(S, O, matrix.Norm1)
xi := x - left
yi := y - top
(*R).D[yi*ni+xi] = d
(*R).Xi[yi*ni+xi] = xi - left
(*R).Xf[yi*ni+xi] = xi + right + 2
(*R).Yi[yi*ni+xi] = yi - top
(*R).Yf[yi*ni+xi] = yi + bottom + 2
}
}
return
} */
// Find finds O in I
func Find(O, I *matrix.Matrix, dist float64) (R *Region) {
xc, yc := O.Center()
mo, no := O.Size()
mi, ni := I.Size()
left, right := xc, no-xc-1
top, bottom := yc, mo-yc-1
P := I.Pad(left, right, top, bottom)
R = &Region{
D: make([]float64, 0),
Xi: make([]int, 0),
Xf: make([]int, 0),
Yi: make([]int, 0),
Yf: make([]int, 0),
}
for x := left; x < ni+left; x++ {
for y := top; y < mi+top; y++ {
S := P.Submat(x-left, x+right+2, y-top, y+bottom+2)
d := S.Dist(O, matrix.Norm1)
xi := x - left
yi := y - top
if d <= dist {
(*R).D = append((*R).D, d)
(*R).Xi = append((*R).Xi, xi-left)
(*R).Xf = append((*R).Xf, xi+right+2)
(*R).Yi = append((*R).Yi, yi-top)
(*R).Yf = append((*R).Yf, yi+bottom+2)
}
}
}
sort.Sort(R)
return
} | find.go | 0.573678 | 0.542318 | find.go | starcoder |
package aoc2020
/*
Day 07 - Handy Haversacks
https://adventofcode.com/2020/day/7
Sample input
dim red bags contain 2 bright gold bags, 5 striped fuchsia bags.
dotted purple bags contain 5 bright olive bags, 3 faded maroon bags.
plaid chartreuse bags contain 1 vibrant olive bag, 5 bright black bags, 1 clear tomato bag.
wavy orange bags contain 4 dark lavender bags, 4 posh white bags.
light lavender bags contain 4 drab olive bags, 5 dark magenta bags.
Given: a shiny gold bag, how many bag colors can eventually contain at least one shiny gold bag?
etc.
Given
light red bags contain 1 bright white bag, 2 muted yellow bags.
dark orange bags contain 3 bright white bags, 4 muted yellow bags.
bright white bags contain 1 shiny gold bag.
muted yellow bags contain 2 shiny gold bags, 9 faded blue bags.
shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.
dark olive bags contain 3 faded blue bags, 4 dotted black bags.
vibrant plum bags contain 5 faded blue bags, 6 dotted black bags.
faded blue bags contain no other bags.
dotted black bags contain no other bags.
Then
In the above rules, the following options would be available to you:
A bright white bag, which can hold your shiny gold bag directly.
A muted yellow bag, which can hold your shiny gold bag directly, plus some other bags.
A dark orange bag, which can hold bright white and muted yellow bags, either of which could then hold your shiny gold bag.
A light red bag, which can hold bright white and muted yellow bags, either of which could then hold your shiny gold bag.
So, in this example, the number of bag colors that can eventually contain at least one shiny gold bag is 4.
*/
import (
"fmt"
"strconv"
"strings"
goutils "github.com/simonski/goutils"
)
// AOC_2020_07 is the entrypoint
func (app *Application) Y2020D07P1() {
AOC_2020_07_part1_attempt1(app)
}
func (app *Application) Y2020D07P2() {
AOC_2020_07_part2_attempt1(app)
}
func AOC_2020_07_part1_attempt1(app *Application) {
cli := app.CLI
filename := cli.GetFileExistsOrDie("-input")
g := NewBagGraphFromFilename(filename)
g.Debug()
fmt.Printf("There are %v possible combinations.\n", len(g.GetBagsThatCanContain("shiny gold")))
}
func AOC_2020_07_part2_attempt1(app *Application) {
cli := app.CLI
filename := cli.GetFileExistsOrDie("-input")
g := NewBagGraphFromFilename(filename)
fmt.Printf("There are %v bags inside the gold.\n", g.GetTotalBagsContainedBy("shiny gold"))
}
func NewBagGraphFromFilename(filename string) *BagGraph {
lines := goutils.Load_file_to_strings(filename)
return NewBagGraphFromStrings(lines)
}
func NewBagGraphFromStrings(lines []string) *BagGraph {
graph := &BagGraph{bags: make(map[string]*Bag)}
for index := range lines {
line := lines[index]
// mirrored beige bags contain no other bags.
// dotted silver bags contain 1 vibrant green bag.
// light brown bags contain 1 shiny silver bag, 3 plaid olive bags, 1 clear tan bag. // original
// light brown contain 1 shiny silver bag, 3 plaid olive , 1 clear tan bag // remove bags
// light brown contain 1 shiny silver , 3 plaid olive , 1 clear tan // remove bag
// fmt.Printf("%v\n", line)
if strings.TrimSpace(line) == "" {
// nothing
} else if strings.HasSuffix(line, "no other bags.") {
// no children here
splits := strings.Split(line, "contain")
colour := strings.TrimSpace(strings.ReplaceAll(splits[0], "bags", ""))
graph.GetOrCreate(colour) // add this in
} else {
// it has children
line = strings.ReplaceAll(line, "bags", "")
line = strings.ReplaceAll(line, "bag", "")
line = strings.ReplaceAll(line, ".", "")
splits := strings.Split(line, "contain") // [ "light brown", "1 shiny silver, 3 plain olive, 1 clear tan"]
colour := strings.TrimSpace(splits[0])
bag := graph.GetOrCreate(colour)
children := strings.Split(splits[1], ",") // [ "1 shiny silver", "3 plain olive", "1 clear tan" ]
for childIndex := range children {
child := strings.Split(strings.TrimSpace(children[childIndex]), " ")
childCount, _ := strconv.Atoi(child[0])
childColour := strings.Join(child[1:], " ")
childBag := graph.GetOrCreate(childColour)
bag.AddChild(childBag, childCount)
}
}
}
return graph
}
type BagGraph struct {
bags map[string]*Bag
}
func (graph *BagGraph) Debug() {
for key := range graph.bags {
fmt.Printf("%v\n", key)
bag := graph.GetOrCreate(key)
for index := range bag.Children {
child := bag.Children[index]
fmt.Printf(" %v\n", child.Colour)
}
fmt.Printf("\n")
}
}
func (graph *BagGraph) GetOrCreate(colour string) *Bag {
value, exists := graph.bags[colour]
if exists {
return value
}
b := NewBag(colour)
graph.bags[colour] = b
return b
}
func (b *BagGraph) GetBagsThatCanContain(colour string) map[string]*Bag {
bag := b.GetOrCreate(colour)
// so walking 'up' any parent, adding each parent we have to the map
p := make(map[string]*Bag)
walkBagContainedBy(bag, p)
return p
}
func (b *BagGraph) GetTotalBagsContainedBy(colour string) int {
bag := b.GetOrCreate(colour)
total := walkBagTotals(bag, 0) - 1
return total
}
func walkBagContainedBy(bag *Bag, results map[string]*Bag) {
_, exists := results[bag.Colour]
if exists {
return
}
for index := range bag.Parents {
entry := bag.Parents[index]
walkBagContainedBy(entry, results)
results[entry.Colour] = entry
}
// now walk 'up' the tree for each parent until it is empty and we have the total for that
}
func walkBagTotals(bag *Bag, depth int) int {
if len(bag.Children) > 0 {
value := 1
for index := range bag.Children {
relation := bag.Children[index]
value += relation.Number * walkBagTotals(relation.Bag, depth+1)
}
return value
}
return 1
}
// Bag is my own impl of a simple Tree I can walk later
type Bag struct {
Colour string
Children []*BagRelation
Parents []*Bag
}
type BagRelation struct {
*Bag
Number int
}
func (b *Bag) AddChild(child *Bag, number int) {
relation := &BagRelation{child, number}
b.Children = append(b.Children, relation)
child.Parents = append(child.Parents, b)
}
func NewBag(line string) *Bag {
children := make([]*BagRelation, 0)
parents := make([]*Bag, 0)
b := &Bag{Colour: line, Children: children, Parents: parents}
return b
} | app/aoc2020/aoc2020_07.go | 0.72027 | 0.526343 | aoc2020_07.go | starcoder |
package types
import (
"io"
"reflect"
"github.com/lyraproj/pcore/px"
)
type OptionalType struct {
typ px.Type
}
var OptionalMetaType px.ObjectType
func init() {
OptionalMetaType = newObjectType(`Pcore::OptionalType`,
`Pcore::AnyType {
attributes => {
type => {
type => Optional[Type],
value => Any
},
}
}`, func(ctx px.Context, args []px.Value) px.Value {
return newOptionalType2(args...)
})
}
func DefaultOptionalType() *OptionalType {
return optionalTypeDefault
}
func NewOptionalType(containedType px.Type) *OptionalType {
if containedType == nil || containedType == anyTypeDefault {
return DefaultOptionalType()
}
return &OptionalType{containedType}
}
func newOptionalType2(args ...px.Value) *OptionalType {
switch len(args) {
case 0:
return DefaultOptionalType()
case 1:
if containedType, ok := args[0].(px.Type); ok {
return NewOptionalType(containedType)
}
if containedType, ok := args[0].(stringValue); ok {
return newOptionalType3(string(containedType))
}
panic(illegalArgumentType(`Optional[]`, 0, `Variant[Type,String]`, args[0]))
default:
panic(illegalArgumentCount(`Optional[]`, `0 - 1`, len(args)))
}
}
func newOptionalType3(str string) *OptionalType {
return &OptionalType{NewStringType(nil, str)}
}
func (t *OptionalType) Accept(v px.Visitor, g px.Guard) {
v(t)
t.typ.Accept(v, g)
}
func (t *OptionalType) ContainedType() px.Type {
return t.typ
}
func (t *OptionalType) Default() px.Type {
return optionalTypeDefault
}
func (t *OptionalType) Equals(o interface{}, g px.Guard) bool {
if ot, ok := o.(*OptionalType); ok {
return t.typ.Equals(ot.typ, g)
}
return false
}
func (t *OptionalType) Generic() px.Type {
return NewOptionalType(px.GenericType(t.typ))
}
func (t *OptionalType) Get(key string) (value px.Value, ok bool) {
switch key {
case `type`:
return t.typ, true
}
return nil, false
}
func (t *OptionalType) IsAssignable(o px.Type, g px.Guard) bool {
return GuardedIsAssignable(o, undefTypeDefault, g) || GuardedIsAssignable(t.typ, o, g)
}
func (t *OptionalType) IsInstance(o px.Value, g px.Guard) bool {
return o == undef || GuardedIsInstance(t.typ, o, g)
}
func (t *OptionalType) MetaType() px.ObjectType {
return OptionalMetaType
}
func (t *OptionalType) Name() string {
return `Optional`
}
func (t *OptionalType) Parameters() []px.Value {
if t.typ == DefaultAnyType() {
return px.EmptyValues
}
if str, ok := t.typ.(*vcStringType); ok && str.value != `` {
return []px.Value{stringValue(str.value)}
}
return []px.Value{t.typ}
}
func (t *OptionalType) ReflectType(c px.Context) (reflect.Type, bool) {
return ReflectType(c, t.typ)
}
func (t *OptionalType) Resolve(c px.Context) px.Type {
t.typ = resolve(c, t.typ)
return t
}
func (t *OptionalType) CanSerializeAsString() bool {
return canSerializeAsString(t.typ)
}
func (t *OptionalType) SerializationString() string {
return t.String()
}
func (t *OptionalType) String() string {
return px.ToString2(t, None)
}
func (t *OptionalType) ToString(b io.Writer, s px.FormatContext, g px.RDetect) {
TypeToString(t, b, s, g)
}
func (t *OptionalType) PType() px.Type {
return &TypeType{t}
}
var optionalTypeDefault = &OptionalType{typ: anyTypeDefault} | types/optionaltype.go | 0.733929 | 0.503967 | optionaltype.go | starcoder |
package main
/*
题目:
给定一个非负整数 num。对于 0 ≤ i ≤ num 范围中的每个数字 i ,计算其二进制数中的 1 的数目并将它们作为数组返回。
进阶:
给出时间复杂度为O(n*sizeof(integer))的解答非常容易。但你可以在线性时间O(n)内用一趟扫描做到吗?
要求算法的空间复杂度为O(n)。
你能进一步完善解法吗?要求在C++或任何其他语言中不使用任何内置函数(如 C++ 中的 __builtin_popcount)来执行此操作。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/counting-bits
*/
/**
* // This is the interface that allows for creating nested lists.
* // You should not implement it, or speculate about its implementation
* type NestedInteger struct {
* }
*
* // Return true if this NestedInteger holds a single integer, rather than a nested list.
* func (this NestedInteger) IsInteger() bool {}
*
* // Return the single integer that this NestedInteger holds, if it holds a single integer
* // The result is undefined if this NestedInteger holds a nested list
* // So before calling this method, you should have a check
* func (this NestedInteger) GetInteger() int {}
*
* // Set this NestedInteger to hold a single integer.
* func (n *NestedInteger) SetInteger(value int) {}
*
* // Set this NestedInteger to hold a nested list and adds a nested integer to it.
* func (this *NestedInteger) Add(elem NestedInteger) {}
*
* // Return the nested list that this NestedInteger holds, if it holds a nested list
* // The list length is zero if this NestedInteger holds a single integer
* // You can access NestedInteger's List element directly if you want to modify it
* func (this NestedInteger) GetList() []*NestedInteger {}
*/
type NestedInteger struct{}
func (this NestedInteger) IsInteger() bool { return false }
func (this NestedInteger) GetInteger() int { return 0 }
func (n *NestedInteger) SetInteger(value int) {}
func (this *NestedInteger) Add(elem NestedInteger) {}
func (this NestedInteger) GetList() []*NestedInteger { return []*NestedInteger{} }
/*
方法一:广度优先遍历
时间复杂度:О(n)
空间复杂度:O(n)
运行时间:8 ms 内存消耗:6 MB
*/
type NestedIterator struct {
data []int
}
func Constructor(nestedList []*NestedInteger) *NestedIterator {
if len(nestedList) == 0 {
return &NestedIterator{
data: []int{},
}
}
data := []int{}
for len(nestedList) > 0 {
p := nestedList[0]
nestedList = nestedList[1:]
if p.IsInteger() {
data = append(data, p.GetInteger())
} else {
nestedList = append(p.GetList(), nestedList...)
}
}
return &NestedIterator{
data: data,
}
}
func (this *NestedIterator) Next() int {
val := this.data[0]
this.data = this.data[1:]
return val
}
func (this *NestedIterator) HasNext() bool {
return len(this.data) != 0
}
/*
方法二:深度优先遍历
时间复杂度:О(n)
空间复杂度:O(n)
运行时间:4 ms 内存消耗:5.6 MB
*/
type NestedIterator2 struct {
data []int
}
func Constructor2(nestedList []*NestedInteger) *NestedIterator2 {
if len(nestedList) == 0 {
return &NestedIterator2{
data: []int{},
}
}
data := []int{}
var dfs func([]*NestedInteger)
dfs = func(list []*NestedInteger) {
for _, v := range list {
if v.IsInteger() {
data = append(data, v.GetInteger())
} else {
dfs(v.GetList())
}
}
}
dfs(nestedList)
return &NestedIterator2{
data: data,
}
}
func (this *NestedIterator2) Next() int {
val := this.data[0]
this.data = this.data[1:]
return val
}
func (this *NestedIterator2) HasNext() bool {
return len(this.data) != 0
}
/*
方法三:栈
时间复杂度:О(n)
空间复杂度:O(n)
运行时间:8 ms 内存消耗:5.1 MB
*/
type NestedIterator3 struct {
stack [][]*NestedInteger
}
func Constructor3(nestedList []*NestedInteger) *NestedIterator3 {
return &NestedIterator3{
stack: [][]*NestedInteger{nestedList},
}
}
func (this *NestedIterator3) Next() int {
queue := this.stack[len(this.stack)-1]
val := queue[0].GetInteger()
this.stack[len(this.stack)-1] = queue[1:]
return val
}
func (this *NestedIterator3) HasNext() bool {
for len(this.stack) > 0 {
queue := this.stack[len(this.stack)-1]
if len(queue) == 0 {
this.stack = this.stack[:len(this.stack)-1]
continue
}
next := queue[0]
if next.IsInteger() {
return true
}
this.stack[len(this.stack)-1] = queue[1:]
this.stack = append(this.stack, next.GetList())
}
return false
} | internal/leetcode/341.flatten-nested-list-iterator/main.go | 0.608012 | 0.534248 | main.go | starcoder |
package store
import (
"fmt"
"math/rand"
"time"
)
// Chooser is structure that knows how to choose the next trigram/word to use in a given moment.
type Chooser interface {
// ChooseInitialTrigram chooses the initial trigram to start a text with, given a TrigramMap of available trigrams.
ChooseInitialTrigram(availableTrigrams TrigramMap) Trigram
// ChooseNextWord chooses the next word to use within a text, given the frequencies of each possible word to be used at this point.
ChooseNextWord(possibleWords map[string]int) string
}
// RandomChooser implements a Chooser that makes random decisions.
type RandomChooser struct{}
// ChooseInitialTrigram chooses randomly a trigram to start the text.
func (c *RandomChooser) ChooseInitialTrigram(trigramMap TrigramMap) Trigram {
rand.Seed(time.Now().UnixNano())
var word1 string
r1 := rand.Intn(len(trigramMap))
for word := range trigramMap {
r1--
if r1 <= 0 {
word1 = word
break
}
}
var word2 string
r2 := rand.Intn(len(trigramMap[word1]))
for word := range trigramMap[word1] {
r2--
if r2 <= 0 {
word2 = word
break
}
}
var word3 string
r3 := rand.Intn(len(trigramMap[word1][word2]))
for word := range trigramMap[word1][word2] {
r3--
if r3 <= 0 {
word3 = word
break
}
}
if word1 == "" || word2 == "" || word3 == "" {
fmt.Println("WARNING: Could not randomly choose initial trigram to make text. Will use a trigram made of empty strings.")
}
return Trigram{word1, word2, word3}
}
// ChooseNextWord chooses randomly the next word to complete the text with.
// This random selection takes into account the frequencies of the sequencenin the learned texts.
func (c *RandomChooser) ChooseNextWord(wordFreqs map[string]int) string {
// Count total frequencies:
totalFreqs := 0
for _, v := range wordFreqs {
totalFreqs += v
}
rand.Seed(time.Now().UnixNano())
partialFreq := rand.Intn(totalFreqs)
for word, freq := range wordFreqs {
partialFreq -= freq
if partialFreq <= 0 {
return word
}
}
fmt.Println("WARNING: Could not choose the next word. Will use an empty string as the next word.")
return ""
} | store/chooser.go | 0.693577 | 0.444565 | chooser.go | starcoder |
package quadedge
import (
"context"
"fmt"
"log"
"github.com/go-spatial/geom"
"github.com/go-spatial/geom/planar/intersect"
"github.com/go-spatial/geom/winding"
)
const (
precision = 6
)
var glbIdx uint64
// Edge describes a directional edge in a quadedge
type Edge struct {
glbIdx uint64
num int
next *Edge
qe *QuadEdge
v *geom.Point
}
// New will return a new edge that is part of an QuadEdge
func New() *Edge {
ql := NewQEdge()
return &ql.e[0]
}
// NewWithEndPoints creates a new edge with the given end points
func NewWithEndPoints(a, b *geom.Point) *Edge {
e := New()
e.EndPoints(a, b)
return e
}
// QEdge returns the quadedge this edge is part of
func (e *Edge) QEdge() *QuadEdge {
if e == nil {
return nil
}
return e.qe
}
// Orig returns the origin end point
func (e *Edge) Orig() *geom.Point {
if e == nil {
return nil
}
return e.v
}
// Dest returns the destination end point
func (e *Edge) Dest() *geom.Point {
return e.Sym().Orig()
}
// EndPoints sets the end points of the Edge
func (e *Edge) EndPoints(org, dest *geom.Point) {
e.v = org
e.Sym().v = dest
}
// AsLine returns the Edge as a geom.Line
func (e *Edge) AsLine() geom.Line {
porig, pdest := e.Orig(), e.Dest()
orig, dest := geom.EmptyPoint, geom.EmptyPoint
if porig != nil {
orig = *porig
}
if pdest != nil {
dest = *pdest
}
return geom.Line{[2]float64(orig), [2]float64(dest)}
}
/******** Edge Algebra *********************************************************/
// Rot returns the dual of the current edge, directed from its right
// to its left.
func (e *Edge) Rot() *Edge {
if e == nil {
return nil
}
if e.num == 3 {
return &(e.qe.e[0])
}
return &(e.qe.e[e.num+1])
}
// InvRot returns the dual of the current edge, directed from its left
// to its right.
func (e *Edge) InvRot() *Edge {
if e == nil {
return nil
}
if e.num == 0 {
return &(e.qe.e[3])
}
return &(e.qe.e[e.num-1])
}
// Sym returns the edge from the destination to the origin of the current edge.
func (e *Edge) Sym() *Edge {
if e == nil {
return nil
}
if e.num < 2 {
return &(e.qe.e[e.num+2])
}
return &(e.qe.e[e.num-2])
}
// ONext returns the next ccw edge around (from) the origin of the current edge
func (e *Edge) ONext() *Edge {
if e == nil {
return nil
}
return e.next
}
// OPrev returns the next cw edge around (from) the origin of the current edge.
func (e *Edge) OPrev() *Edge {
return e.Rot().ONext().Rot()
}
// DNext returns the next ccw edge around (into) the destination of the current edge.
func (e *Edge) DNext() *Edge {
return e.Sym().ONext().Sym()
}
// DPrev returns the next cw edge around (into) the destination of the current edge.
func (e *Edge) DPrev() *Edge {
return e.InvRot().ONext().InvRot()
}
// LNext returns the ccw edge around the left face following the current edge.
func (e *Edge) LNext() *Edge {
return e.InvRot().ONext().Rot()
}
// LPrev returns the ccw edge around the left face before the current edge.
func (e *Edge) LPrev() *Edge {
return e.ONext().Sym()
}
// RNext returns the edge around the right face ccw following the current edge.
func (e *Edge) RNext() *Edge {
return e.Rot().ONext().InvRot()
}
// RPrev returns the edge around the right face ccw before the current edge.
func (e *Edge) RPrev() *Edge {
return e.Sym().ONext()
}
/*****************************************************************************/
/* Convenience functions to find edges */
/*****************************************************************************/
// FindONextDest will look for and return a ccw edge the given dest point, if it
// exists.
func (e *Edge) FindONextDest(dest geom.Point) *Edge {
if e == nil {
return nil
}
if cmp.GeomPointEqual(dest, *e.Dest()) {
return e
}
for ne := e.ONext(); ne != e; ne = ne.ONext() {
if cmp.GeomPointEqual(dest, *ne.Dest()) {
return ne
}
}
return nil
}
// DumpAllEdges dumps all the edges as a multiline string
func (e *Edge) DumpAllEdges() string {
var ml geom.MultiLineString
e.WalkAllONext(func(ee *Edge) bool {
ln := ee.AsLine()
ml = append(ml, [][2]float64{ln[0], ln[1]})
return true
})
str, err := wkt.EncodeString(ml)
if err != nil {
return err.Error()
}
return str
}
func (e *Edge) WalkAllOPrev(fn func(*Edge) (loop bool)) {
var seen = map[uint64]bool{}
cwe := e
for cwe != nil && !seen[cwe.glbIdx] {
if !fn(cwe) {
return
}
seen[cwe.glbIdx] = true
cwe = cwe.OPrev()
}
}
func (e *Edge) WalkAllONext(fn func(*Edge) (loop bool)) {
var seen = map[uint64]bool{}
ccwe := e
for ccwe != nil && !seen[ccwe.glbIdx] {
if !fn(ccwe) {
return
}
seen[ccwe.glbIdx] = true
ccwe = ccwe.ONext()
}
}
// IsEqual checks to see if the edges are the same
func (e *Edge) IsEqual(e1 *Edge) bool {
if e == nil {
return e1 == nil
}
if e1 == nil {
return e == nil
}
// first let's get the edge numbers the same
return e == &e1.qe.e[e.num]
}
// Validate check to se if the edges in the edges are correctly
// oriented
func Validate(e *Edge, order winding.Order) (err1 error) {
if debug {
log.Printf("\n\nValidating edge\n%v", wkt.MustEncode(e.AsLine()))
}
const radius = 10
var err ErrInvalid
el := e.Rot()
ed := el.Rot()
er := ed.Rot()
if ed.Sym() != e {
// The Sym of Sym should be self
err = append(err, "invalid Sym")
}
if ed != e.Sym() {
err = append(err, fmt.Sprintf("invalid Rot: left.Rot != e.Sym %p : %p", el, e.Sym()))
}
if er != el.Sym() {
err = append(err, fmt.Sprintf("invalid Rot: rot != e %p : %p", er, el.Sym()))
}
if e != el.InvRot() {
err = append(err, "invalid Rot: rot != esym.InvRot")
}
if len(err) != 0 {
return err
}
if e.Orig() == nil {
err = append(err, "expected edge to have origin")
return err
}
orig := *e.Orig()
points := []geom.Point{}
didSee := func(pt geom.Point) (int, bool) {
for i := range points {
if cmp.GeomPointEqual(pt, points[i]) {
return i, true
}
}
return -1, false
}
segs := []geom.Line{}
var (
onextCounterClockwiseCount int
oprevClockwiseCount int
)
if debug {
log.Print("walking edges\n\n")
}
e.WalkAllONext(func(ee *Edge) bool {
dest := ee.Dest()
if dest == nil {
err = append(err, "dest is nil")
return false
}
if ee.Orig() == nil {
err = append(err, "expected edge to have origin")
return false
}
if debug {
log.Printf("edge . (%p): %v", ee, wkt.MustEncode(ee.Dest()))
log.Printf("edge.ONext(%p): %v", ee.ONext(), wkt.MustEncode(ee.ONext().Dest()))
}
if i, ok := didSee(*dest); ok {
err = append(err, fmt.Sprintf("dest[%v] not unique -- %v : %v", wkt.MustEncode(*dest), i, wkt.MustEncode(points[i])))
err = append(err, ee.DumpAllEdges())
return false
}
points = append(points, *ee.Dest())
if !cmp.GeomPointEqual(*ee.Orig(), orig) {
err = append(
err,
fmt.Sprintf(
"expected edge %v to have same origin %v instead of %v",
len(points), wkt.MustEncode(orig),
wkt.MustEncode(*ee.Orig()),
),
)
}
segs = append(segs, e.AsLine())
if debug {
log.Printf("edge . : %v", wkt.MustEncode(ee.AsLine()))
log.Printf("edge.ONext: %v", wkt.MustEncode(ee.ONext().AsLine()))
log.Printf("edge.OPrev: %v", wkt.MustEncode(ee.OPrev().AsLine()))
}
// Check to see if ONext edge is not clockwise
onextDest := ee.ONext().Dest()
onextWinding := order.OfGeomPoints(orig, *dest, *onextDest)
switch {
case onextWinding.IsClockwise():
onextCounterClockwiseCount--
case onextWinding.IsCounterClockwise():
onextCounterClockwiseCount++
}
oprevDest := ee.OPrev().Dest()
oprevWinding := order.OfGeomPoints(orig, *dest, *oprevDest)
switch {
case oprevWinding.IsClockwise():
oprevClockwiseCount++
case oprevWinding.IsCounterClockwise():
oprevClockwiseCount--
}
return true
})
if len(err) != 0 {
return err
}
if len(points) > 2 {
if oprevClockwiseCount <= 0 {
err = append(
err,
fmt.Sprintf("expected all points to be clockwise"),
)
}
if onextCounterClockwiseCount <= 0 {
err = append(
err,
fmt.Sprintf("expected all points to be counter-clockwise"),
)
}
// New we need to check that there are no self intersecting lines.
eq := intersect.NewEventQueue(segs)
eq.CMP = cmp
_ = eq.FindIntersects(
context.Background(),
true,
func(src, dest int, pt [2]float64) error {
// make sure the point is not an end point
gpt := geom.Point(pt)
if (cmp.GeomPointEqual(gpt, *segs[src].Point1()) || cmp.GeomPointEqual(gpt, *segs[src].Point2())) ||
(cmp.GeomPointEqual(gpt, *segs[dest].Point1()) || cmp.GeomPointEqual(gpt, *segs[dest].Point2())) {
return nil
}
// the second point in each segment should be the vertex we care about.
// this is because of the way we build up the segments above.
err = append(err,
fmt.Sprintf("found self interstion for vertices %v and %v at %v",
wkt.MustEncode(segs[src]),
wkt.MustEncode(segs[dest]),
pt,
),
)
return err
},
)
}
if len(err) == 0 {
return nil
}
return err
} | planar/triangulate/delaunay/quadedge/edge.go | 0.731251 | 0.587292 | edge.go | starcoder |
package dfa
import (
"github.com/flapflapio/simulator/core/errors"
"github.com/flapflapio/simulator/core/simulation"
"github.com/flapflapio/simulator/core/simulation/machine"
)
type DFASimulation struct {
machine *DFA
currentState *machine.State
input string
path []string
rejected bool
}
// Perform a transition
func (dfa *DFASimulation) Step() {
if dfa.Done() {
return
}
dfa.logState()
dfa.takeNextTransition()
if dfa.Done() {
dfa.logState()
}
}
// Get the current status (state + other info) of a simulation
func (dfa *DFASimulation) Stat() simulation.Report {
return simulation.Report{
Result: simulation.Result{
Accepted: dfa.isAccepted(),
Path: dfa.path,
RemainingInput: dfa.input,
},
}
}
// Get the final result of your simulation.
// Returns a SimulationIncomplete error if the simulation is not done
func (dfa *DFASimulation) Result() (simulation.Result, error) {
if !dfa.Done() {
return simulation.Result{}, errors.ErrSimulationIncomplete
}
return simulation.Result{
Accepted: dfa.isAccepted(),
Path: dfa.path,
RemainingInput: dfa.input,
}, nil
}
// Check if a simulation is finished
func (dfa *DFASimulation) Done() bool {
return dfa.rejected || len(dfa.input) == 0
}
func (dfa *DFASimulation) takeNextTransition() {
if dfa.rejected {
return
}
next, err := dfa.nextTransition()
if err != nil {
dfa.rejected = true
return
}
dfa.takeTransition(next)
}
func (dfa *DFASimulation) takeTransition(t machine.Transition) {
dfa.currentState = t.End
dfa.input = dfa.input[1:]
}
func (dfa *DFASimulation) nextTransition() (machine.Transition, error) {
for _, t := range dfa.machine.Transitions {
if dfa.shouldTakeTransition(t) {
return t, nil
}
}
return machine.Transition{}, errors.ErrNoTransition
}
func (dfa *DFASimulation) shouldTakeTransition(t machine.Transition) bool {
return !dfa.rejected &&
len(dfa.input) > 0 &&
len(t.Symbol) > 0 &&
dfa.currentState == t.Start &&
dfa.input[0] == t.Symbol[0]
}
func (dfa *DFASimulation) isAccepted() bool {
return !dfa.rejected &&
len(dfa.input) == 0 &&
dfa.currentState.Ending
}
// Appends the current state of the DFA onto the path
func (dfa *DFASimulation) logState() {
dfa.path = append(dfa.path, dfa.currentState.Id)
} | core/simulation/automata/dfa/dfa_simulation.go | 0.601477 | 0.403391 | dfa_simulation.go | starcoder |
package gointegration
import (
"fmt"
"net/http"
"regexp"
"strings"
"testing"
"github.com/btm6084/gojson"
"github.com/stretchr/testify/assert"
)
// ClientResponse holds the pertinent information returned from a third party request.
type ClientResponse struct {
Body string `json:"body"`
Cookies []*http.Cookie `json:"cookies"`
Error error `json:"error"`
Headers map[string]string `json:"headers"`
RequestTime string `json:"request_time"`
RequestURL string `json:"request_url"`
Status string `json:"status"`
StatusCode int `json:"status_code"`
}
// ExpectError is used to assert that a certain error condition has occured.
func (c ClientResponse) ExpectError(t *testing.T, err error) ClientResponse {
// To avoid a panic inside assert, we will handle nil values explicitly
if err == nil {
if c.Error == nil {
return c
}
assert.True(t, false, fmt.Sprintf("expected no error, got error `%v` instead", c.Error))
return c
}
if c.Error == nil {
assert.True(t, false, fmt.Sprintf("expected error `%v`, had nil instead", err))
return c
}
assert.Equal(t, err, c.Error, fmt.Sprintf("expected error with message `%v`, got error with message `%v`", err, c.Error))
return c
}
// Expect allows custom assertions to be run.
// A error returned from the eval function will cause the test to be failed.
func (c ClientResponse) Expect(t *testing.T, eval func(c ClientResponse) error) ClientResponse {
if c.Error != nil {
return c
}
err := eval(c)
msg := ""
if err != nil {
msg = err.Error()
}
assert.Nil(t, err, msg)
return c
}
// ExpectStatus asserts that a specific status code was received.
func (c ClientResponse) ExpectStatus(t *testing.T, status int) ClientResponse {
if c.Error != nil {
return c
}
assert.Equal(t, status, c.StatusCode, fmt.Sprintf("expected statuscode '%d', got '%d' instead", status, c.StatusCode))
return c
}
// ExpectHeaderEmpty asserts that there was no header value set at a given key.
func (c ClientResponse) ExpectHeaderEmpty(t *testing.T, key string) ClientResponse {
if c.Error != nil {
return c
}
if _, isset := c.Headers[key]; !isset {
return c
}
assert.Fail(t, fmt.Sprintf("expected no header with key '%s' set", key))
return c
}
// ExpectHeaderValue asserts that the header value at the given key will match the given value.
func (c ClientResponse) ExpectHeaderValue(t *testing.T, key string, value string) ClientResponse {
if c.Error != nil {
return c
}
if _, isset := c.Headers[key]; !isset {
assert.True(t, isset, fmt.Sprintf("no header with key '%s' set", key))
return c
}
assert.Equal(t, value, c.Headers[key], fmt.Sprintf("expected header '%s' to have value '%s', got '%s' instead", key, value, c.Headers[key]))
return c
}
// OptionalHeaderValue differs from ExpectHeaderValue in that it can only fail if the given key exists. If the key is missing entirely, the test will pass.
func (c ClientResponse) OptionalHeaderValue(t *testing.T, key string, value string) ClientResponse {
if _, isset := c.Headers[key]; !isset {
return c
}
return c.ExpectHeaderValue(t, key, value)
}
// ExpectHeaderMatch asserts that the header value at the given key will match the given regular expression.
func (c ClientResponse) ExpectHeaderMatch(t *testing.T, key string, re *regexp.Regexp) ClientResponse {
if c.Error != nil {
return c
}
if _, isset := c.Headers[key]; !isset {
assert.True(t, isset, fmt.Sprintf("no header with key '%s' set", key))
return c
}
val := c.Headers[key]
assert.True(t, re.Match([]byte(val)), fmt.Sprintf("expect header match error: '%s' did not pass the regex test `%s`", val, re.String()))
return c
}
// OptionalHeaderMatch differs from ExpectHeaderMatch in that it can only fail if the given key exists. If the key is missing entirely, the test will pass.
func (c ClientResponse) OptionalHeaderMatch(t *testing.T, key string, re *regexp.Regexp) ClientResponse {
if _, isset := c.Headers[key]; !isset {
return c
}
return c.ExpectHeaderMatch(t, key, re)
}
// JSONResponse is a ClientResponse with added functionality specifically for dealing with json API responses.
type JSONResponse struct {
ClientResponse
Reader *gojson.JSONReader `json:"-"`
}
// ExpectError is used to assert that a certain error condition has occured.
func (c JSONResponse) ExpectError(t *testing.T, err error) JSONResponse {
// To avoid a panic inside assert, we will handle nil values explicitly
if err == nil {
if c.Error == nil {
return c
}
assert.True(t, false, fmt.Sprintf("expected no error, got error `%v` instead", c.Error))
return c
}
if c.Error == nil {
assert.True(t, false, fmt.Sprintf("expected error `%v`, had nil instead", err))
return c
}
assert.Equal(t, err, c.Error, fmt.Sprintf("expected error with message `%v`, got error with message `%v`", err, c.Error))
return c
}
// Expect allows custom assertions to be run.
// A error returned from the eval function will cause the test to be failed.
func (c JSONResponse) Expect(t *testing.T, eval func(c JSONResponse) error) JSONResponse {
if c.Error != nil {
return c
}
err := eval(c)
msg := ""
if err != nil {
msg = err.Error()
}
assert.Nil(t, err, msg)
return c
}
// ExpectStatus asserts that a specific status code was received.
func (c JSONResponse) ExpectStatus(t *testing.T, status int) JSONResponse {
if c.Error != nil {
return c
}
assert.Equal(t, status, c.StatusCode, fmt.Sprintf("expected statuscode '%d', got '%d' instead", status, c.StatusCode))
return c
}
// ExpectType asserts the data type at the given key will match the given JSON data type.
func (c JSONResponse) ExpectType(t *testing.T, key, typ string) JSONResponse {
if c.Error != nil {
return c
}
r := c.Reader.Get(key)
// Allow for int or float when it's not important.
if typ == "number" && (r.Type == gojson.JSONInt || r.Type == gojson.JSONFloat) {
return c
}
assert.Equal(t, typ, r.Type, fmt.Sprintf("expected value at key `%s` to be `%s`, got `%s` instead", key, typ, r.Type))
return c
}
// ExpectTypes asserts the data type at the given key will match the given JSON data types.
func (c JSONResponse) ExpectTypes(t *testing.T, key string, typ ...string) JSONResponse {
if c.Error != nil {
return c
}
r := c.Reader.Get(key)
for _, check := range typ {
if check == r.Type {
return c
}
// Allow for int or float when it's not important.
if check == "number" && (r.Type == gojson.JSONInt || r.Type == gojson.JSONFloat) {
return c
}
}
assert.Equal(t, typ, r.Type, fmt.Sprintf("expected value at key `%s` to be `%s`, got `%s` instead", key, strings.Join(typ, `, `), r.Type))
return c
}
// OptionalType differs from ExpectType in that it can only fail if the given key exists. If the key is missing entirely, the test will pass.
func (c JSONResponse) OptionalType(t *testing.T, key, typ string) JSONResponse {
if !c.Reader.KeyExists(key) {
return c
}
return c.ExpectType(t, key, typ)
}
// OptionalTypes differs from ExpectTypes in that it can only fail if the given key exists. If the key is missing entirely, the test will pass.
func (c JSONResponse) OptionalTypes(t *testing.T, key string, typ ...string) JSONResponse {
if !c.Reader.KeyExists(key) {
return c
}
return c.ExpectTypes(t, key, typ...)
}
// ExpectValue asserts the value at the given key will match the given value.
func (c JSONResponse) ExpectValue(t *testing.T, key string, b interface{}) JSONResponse {
if c.Error != nil {
return c
}
a := c.Reader.GetInterface(key)
assert.Equal(t, b, a, fmt.Sprintf("expected '%s' to equal '%s'", b, a))
return c
}
// ExpectValueString asserts the value at the given key will match the given value. All comparisons are done as string comparisons.
func (c JSONResponse) ExpectValueString(t *testing.T, key, b string) JSONResponse {
if c.Error != nil {
return c
}
a := c.Reader.GetString(key)
assert.Equal(t, b, a, fmt.Sprintf("expected '%s' to equal '%s'", b, a))
return c
}
// OptionalValue differs from ExpectValue in that it can only fail if the given key exists. If the key is missing entirely, the test will pass.
func (c JSONResponse) OptionalValue(t *testing.T, key string, b interface{}) JSONResponse {
if !c.Reader.KeyExists(key) {
return c
}
return c.ExpectValue(t, key, b)
}
// ExpectValueMatch asserts that the value at the given key will match the given regular expression.
func (c JSONResponse) ExpectValueMatch(t *testing.T, key string, re *regexp.Regexp) JSONResponse {
if c.Error != nil {
return c
}
val := c.Reader.GetString(key)
assert.True(t, re.Match([]byte(val)), fmt.Sprintf("expect value match error: '%s' did not pass the regex test `%s`", val, re.String()))
return c
}
// OptionalValueMatch differs from ExpectValueMatch in that it can only fail if the given key exists. If the key is missing entirely, the test will pass.
func (c JSONResponse) OptionalValueMatch(t *testing.T, key string, re *regexp.Regexp) JSONResponse {
if !c.Reader.KeyExists(key) {
return c
}
return c.ExpectValueMatch(t, key, re)
}
// ExpectValueCountCompare asserts the aggregate data type at the given key will have the given number of child nodes.
func (c JSONResponse) ExpectValueCountCompare(t *testing.T, key string, comp string, count int) JSONResponse {
if c.Error != nil {
return c
}
r := c.Reader.Get(key)
switch comp {
case "=":
assert.Equal(t, count, len(r.Keys), fmt.Sprintf("expected count to not be %d items, found %d", count, len(r.Keys)))
case "!=":
assert.NotEqual(t, count, len(r.Keys), fmt.Sprintf("expected exactly %d items, found %d", count, len(r.Keys)))
case ">":
assert.True(t, len(r.Keys) > count, fmt.Sprintf("expected more than %d items, found %d", count, len(r.Keys)))
case ">=":
assert.True(t, len(r.Keys) >= count, fmt.Sprintf("expected at least %d items, found %d", count, len(r.Keys)))
case "<":
assert.True(t, len(r.Keys) < count, fmt.Sprintf("expected less than %d items, found %d", count, len(r.Keys)))
case "<=":
assert.True(t, len(r.Keys) <= count, fmt.Sprintf("expected a minimum of %d items, found %d", count, len(r.Keys)))
}
return c
}
// ExpectValueCount asserts the aggregate data type at the given key will have the given number of child nodes.
func (c JSONResponse) ExpectValueCount(t *testing.T, key string, count int) JSONResponse {
if c.Error != nil {
return c
}
r := c.Reader.Get(key)
assert.Equal(t, count, len(r.Keys), fmt.Sprintf("expected exactly %d items, found %d", count, len(r.Keys)))
return c
}
// ExpectHeaderEmpty asserts that there was no header value set at a given key.
func (c JSONResponse) ExpectHeaderEmpty(t *testing.T, key string) JSONResponse {
if c.Error != nil {
return c
}
if _, isset := c.Headers[key]; !isset {
return c
}
assert.Fail(t, fmt.Sprintf("expected no header with key '%s' set", key))
return c
}
// ExpectHeaderValue asserts that the header value at the given key will match the given value.
func (c JSONResponse) ExpectHeaderValue(t *testing.T, key string, value string) JSONResponse {
if c.Error != nil {
return c
}
if _, isset := c.Headers[key]; !isset {
assert.True(t, isset, fmt.Sprintf("no header with key '%s' set", key))
return c
}
assert.Equal(t, value, c.Headers[key], fmt.Sprintf("expected header '%s' to have value '%s', got '%s' instead", key, value, c.Headers[key]))
return c
}
// OptionalHeaderValue differs from ExpectHeaderValue in that it can only fail if the given key exists. If the key is missing entirely, the test will pass.
func (c JSONResponse) OptionalHeaderValue(t *testing.T, key string, value string) JSONResponse {
if _, isset := c.Headers[key]; !isset {
return c
}
return c.ExpectHeaderValue(t, key, value)
}
// ExpectHeaderMatch asserts that the header value at the given key will match the given regular expression.
func (c JSONResponse) ExpectHeaderMatch(t *testing.T, key string, re *regexp.Regexp) JSONResponse {
if c.Error != nil {
return c
}
if _, isset := c.Headers[key]; !isset {
assert.True(t, isset, fmt.Sprintf("no header with key '%s' set", key))
return c
}
val := c.Headers[key]
assert.True(t, re.Match([]byte(val)), fmt.Sprintf("expect header match error: '%s' did not pass the regex test `%s`", val, re.String()))
return c
}
// OptionalHeaderMatch differs from ExpectHeaderMatch in that it can only fail if the given key exists. If the key is missing entirely, the test will pass.
func (c JSONResponse) OptionalHeaderMatch(t *testing.T, key string, re *regexp.Regexp) JSONResponse {
if _, isset := c.Headers[key]; !isset {
return c
}
return c.ExpectHeaderMatch(t, key, re)
} | response.go | 0.769773 | 0.436022 | response.go | starcoder |
package dax
// AttributeBuffer holds per-vertex attribute. There is one AttributeBuffer per
// kind of data we want to keep with each vertex.
type AttributeBuffer struct {
Name string
NumComponents int
Data []float32
}
func NewAttributeBuffer(name string, size int, NumComponents int) *AttributeBuffer {
ab := new(AttributeBuffer)
ab.Init(name, size, NumComponents)
return ab
}
func (ab *AttributeBuffer) Init(name string, size int, NumComponents int) {
data := make([]float32, size*NumComponents, size*NumComponents)
ab.InitFromData(name, data, NumComponents)
}
func (ab *AttributeBuffer) InitFromData(name string, data []float32, NumComponents int) {
ab.Name = name
ab.NumComponents = NumComponents
ab.Data = data
}
// Len returns the number of elements in the AttributeBuffer. Because each
// element has a number of components, the length of the Data array is then
// Len() * NumComponents.
func (ab *AttributeBuffer) Len() int {
return len(ab.Data) / ab.NumComponents
}
func (ab *AttributeBuffer) SetX(index int, x float32) {
ab.Data[index*ab.NumComponents+0] = x
}
func (ab *AttributeBuffer) GetX(index int) (x float32) {
x = ab.Data[index*ab.NumComponents+0]
return
}
func (ab *AttributeBuffer) SetXY(index int, x, y float32) {
ab.Data[index*ab.NumComponents+0] = x
ab.Data[index*ab.NumComponents+1] = y
}
func (ab *AttributeBuffer) GetXY(index int) (x, y float32) {
x = ab.Data[index*ab.NumComponents+0]
y = ab.Data[index*ab.NumComponents+1]
return
}
func (ab *AttributeBuffer) SetXYZ(index int, x, y, z float32) {
ab.Data[index*ab.NumComponents+0] = x
ab.Data[index*ab.NumComponents+1] = y
ab.Data[index*ab.NumComponents+2] = z
}
func (ab *AttributeBuffer) GetXYZ(index int) (x, y, z float32) {
x = ab.Data[index*ab.NumComponents+0]
y = ab.Data[index*ab.NumComponents+1]
z = ab.Data[index*ab.NumComponents+2]
return
}
func (ab *AttributeBuffer) SetXYZW(index int, x, y, z, w float32) {
ab.Data[index*ab.NumComponents+0] = x
ab.Data[index*ab.NumComponents+1] = y
ab.Data[index*ab.NumComponents+2] = z
ab.Data[index*ab.NumComponents+3] = w
}
func (ab *AttributeBuffer) GetXYZW(index int) (x, y, z, w float32) {
x = ab.Data[index*ab.NumComponents+0]
y = ab.Data[index*ab.NumComponents+1]
z = ab.Data[index*ab.NumComponents+2]
w = ab.Data[index*ab.NumComponents+3]
return
}
type IndexBuffer struct {
data16 []uint16
data32 []uint32
}
func (ib *IndexBuffer) Init(size int) {
if size > 65536 {
ib.data32 = make([]uint32, size, size)
} else {
ib.data16 = make([]uint16, size, size)
}
}
func (ib *IndexBuffer) Len() int {
if len(ib.data16) > 0 {
return len(ib.data16)
}
return len(ib.data32)
}
func (ib *IndexBuffer) InitFromData(data []uint) {
ib.Init(len(data))
for i, v := range data {
ib.Set(i, v)
}
}
func (ib *IndexBuffer) Set(nth int, index uint) {
if ib.data16 != nil {
ib.data16[nth] = uint16(index)
} else {
ib.data32[nth] = uint32(index)
}
}
// VertexMode defines how vertices should be interpreted by the draw call.
type VertexMode int
const (
// VertexModePoints draws a single dot for each vertex.
VertexModePoints VertexMode = iota
VertexModeLineStrip
VertexModeLineLoop
VertexModeLines
VertexModeTriangleStrip
VertexModeTriangleFan
VertexModeTriangles
)
type Mesh struct {
flags uint32
mode VertexMode
attributes []AttributeBuffer
indices IndexBuffer
}
func NewMesh() *Mesh {
m := &Mesh{
mode: VertexModeTriangles,
}
return m
}
// GetVertexMode returns how vertices in the Mesh are interpreted. New meshes
// default to VertexModeTriangles.
func (m *Mesh) GetVertexMode() VertexMode {
return m.mode
}
// SetVertexMode sets how vertices in the should be interpreted.
func (m *Mesh) SetVertexMode(mode VertexMode) {
m.mode = mode
}
func (m *Mesh) GetAttribute(name string) *AttributeBuffer {
for _, ab := range m.attributes {
if ab.Name == name {
return &ab
}
}
return nil
}
func (m *Mesh) getNewAttribute(name string) *AttributeBuffer {
ab := m.GetAttribute(name)
if ab != nil {
return ab
}
var buffer AttributeBuffer
m.attributes = append(m.attributes, buffer)
return &m.attributes[len(m.attributes)-1]
}
func (m *Mesh) AddAttribute(name string, data []float32, NumComponents int) {
ab := m.getNewAttribute(name)
ab.InitFromData(name, data, NumComponents)
}
func (m *Mesh) AddAttributeBuffer(buffer *AttributeBuffer) {
ab := m.getNewAttribute(buffer.Name)
*ab = *buffer
}
func (m *Mesh) HasIndices() bool {
return m.indices.data16 != nil || m.indices.data32 != nil
}
func (m *Mesh) AddIndices(data []uint) {
m.indices.InitFromData(data)
} | mesh.go | 0.803945 | 0.510252 | mesh.go | starcoder |
package wm
import (
"fmt"
"github.com/aaronjanse/3mux/ecma48"
)
// A Split splits a region of the screen into a areas reserved for multiple child nodes
type split struct {
verticallyStacked bool
elements []SizedNode
selectionIdx int
renderer ecma48.Renderer
renderRect Rect
selected bool
onDeath func(error)
Dead bool
newPane NewPaneFunc
u *Universe
}
func newSplit(renderer ecma48.Renderer, u *Universe, onDeath func(error), rect Rect, verticallyStacked bool, selectionIdx int, children []Node, newPane NewPaneFunc) *split {
s := &split{
verticallyStacked: verticallyStacked,
renderer: renderer,
onDeath: onDeath,
newPane: newPane,
selectionIdx: selectionIdx,
renderRect: rect,
u: u,
}
if children == nil {
children = []Node{newPane(renderer)}
}
childSize := 1 / float32(len(children))
s.elements = make([]SizedNode, len(children))
for i, child := range children {
child.SetDeathHandler(s.handleChildDeath)
s.elements[i] = SizedNode{
size: childSize,
contents: child,
}
}
return s
}
func (s *split) IsDead() bool {
return s.Dead
}
func (s *split) SetDeathHandler(onDeath func(error)) {
s.onDeath = onDeath
}
func (s *split) Serialize() string {
var out string
if s.verticallyStacked {
out = "VSplit"
} else {
out = "HSplit"
}
out += fmt.Sprintf("[%d]", s.selectionIdx)
out += "("
for i, e := range s.elements {
if i != 0 {
out += ", "
}
out += e.contents.Serialize()
}
out += ")"
return out
}
// setRenderRect updates the Split's renderRect cache after which it calls refreshRenderRect
// this for when a split is reshaped
func (s *split) SetRenderRect(fullscreen bool, x, y, w, h int) {
s.renderRect = Rect{x, y, w, h}
// NOTE: should we clear the screen?
s.refreshRenderRect(fullscreen)
}
func (s *split) GetRenderRect() Rect {
return s.renderRect
}
// refreshRenderRect recalculates the coordinates of a Split's elements and calls setRenderRect on each of its children
// this is for when one or more of a split's children are reshaped
func (s *split) refreshRenderRect(fullscreen bool) {
x := s.renderRect.X
y := s.renderRect.Y
w := s.renderRect.W
h := s.renderRect.H
s.redrawLines()
var area int
if s.verticallyStacked {
area = h
} else {
area = w
}
dividers := getDividerPositions(area, s.elements)
if len(s.elements) == 1 {
dividers = []int{area}
}
for idx, pos := range dividers {
lastPos := -1
if idx > 0 {
lastPos = dividers[idx-1]
}
childArea := pos - lastPos - 1
if idx == len(dividers)-1 && idx != 0 {
childArea = area - lastPos - 1
}
childNode := s.elements[idx]
if s.verticallyStacked {
childNode.contents.SetRenderRect(fullscreen, x, y+lastPos+1, w, childArea)
} else {
childNode.contents.SetRenderRect(fullscreen, x+lastPos+1, y, childArea, h)
}
}
}
func (s *split) redrawLines() {
x := s.renderRect.X
y := s.renderRect.Y
w := s.renderRect.W
h := s.renderRect.H
var area int
if s.verticallyStacked {
area = h
} else {
area = w
}
dividers := getDividerPositions(area, s.elements)
for idx, pos := range dividers {
if idx == len(dividers)-1 {
break
}
if s.verticallyStacked {
for i := 0; i < w; i++ {
s.renderer.HandleCh(ecma48.PositionedChar{
Rune: '─',
Cursor: ecma48.Cursor{X: x + i, Y: y + pos},
})
}
} else {
for j := 0; j < h; j++ {
s.renderer.HandleCh(ecma48.PositionedChar{
Rune: '│',
Cursor: ecma48.Cursor{X: x + pos, Y: y + j},
})
}
}
}
}
func getDividerPositions(area int, contents []SizedNode) []int {
var dividerPositions []int
for idx, node := range contents {
var lastPos int
if idx == 0 {
lastPos = 0
} else {
lastPos = dividerPositions[idx-1]
}
pos := lastPos + int(node.size*float32(area))
dividerPositions = append(dividerPositions, pos)
}
return dividerPositions
} | wm/split.go | 0.514156 | 0.412234 | split.go | starcoder |
package main
import (
"math"
"math/rand"
"time"
)
type Boid struct {
position Vector2d
velocity Vector2d
id int
}
func (b *Boid) calcAcceleration() Vector2d {
upper, lower := b.position.AddV(viewRadius), b.position.AddV(-viewRadius)
sumVelocity := Vector2d{0,0}
sumPosition := Vector2d{0,0}
separation := Vector2d{0,0}
count := 0
rwLock.RLock()
for i := math.Max(lower.x, 0); i <= math.Min(upper.x, screenWidth); i++ {
for j := math.Max(lower.y, 0); j <= math.Min(upper.y, screenHeight); j++ {
if otherBoidId := boidMap[int(i)][int(j)]; otherBoidId != -1 && otherBoidId != b.id {
if distance := boids[otherBoidId].position.Distance(b.position); distance < viewRadius {
count++
sumVelocity = sumVelocity.Add(boids[otherBoidId].velocity)
sumPosition = sumPosition.Add(boids[otherBoidId].position)
separation = separation.Add(b.position.Subtract(boids[otherBoidId].position).DivisionV(distance))
}
}
}
}
rwLock.RUnlock()
accel := Vector2d{b.borderBounce(b.position.x, screenWidth), b.borderBounce(b.position.y, screenHeight)}
if count == 0 || rand.Intn(100) > 20 {
return accel
}
avgVelocity := sumVelocity.DivisionV(float64(count))
avgPosition := sumPosition.DivisionV(float64(count))
accelAlignment := avgVelocity.Subtract(b.velocity).MultiplyV(adjRate)
accelCohesion := avgPosition.Subtract(b.position).MultiplyV(adjRate)
accelSeparation := separation.MultiplyV(adjRate)
accel = accel.Add(accelAlignment).Add(accelCohesion).Add(accelSeparation)
if rand.Intn(100) > 90 {
accel = accel.MultiplyV(adjRate)
}
return accel
}
func (b *Boid) borderBounce(pos, maxBorderPos float64) float64 {
if pos < viewRadius {
return 1/ pos
} else if pos > maxBorderPos - viewRadius {
return 1/ (pos - maxBorderPos)
}
return 0
}
func (b * Boid) moveOne() {
acc := b.calcAcceleration()
rwLock.Lock()
b.velocity = b.velocity.Add(acc).limit(-1, 1)
boidMap[int(b.position.x)][int(b.position.y)] = -1
b.position = b.position.Add(b.velocity)
boidMap[int(b.position.x)][int(b.position.y)] = b.id
rwLock.Unlock()
}
func (b *Boid) start() {
for {
b.moveOne()
time.Sleep(1 * time.Millisecond)
}
}
func createBoid(bid int) {
b := Boid {
position: Vector2d{x: rand.Float64()* screenWidth, y: rand.Float64()* screenHeight},
velocity: Vector2d{x: (rand.Float64() * 2) - 1.0, y: (rand.Float64() * 2) - 1.0},
id: bid,
}
boids[bid] = &b
boidMap[int(b.position.x)][int(b.position.y)] = b.id
go b.start()
} | boid.go | 0.524395 | 0.462473 | boid.go | starcoder |
package simulation
import (
"fmt"
"log"
"math/rand"
"strings"
_ "embed"
)
//go:embed alien-names.txt
var alienNames string
// Simulation stores the state of the simulation.
type Simulation struct {
iterationCounter int
iterationLimit int
// citiesMap represents a graph as an adjacency list.
worldMap WorldMap
// alienPositions maps alien name to its current position (city).
alienPositions map[Alien]City
}
// NewSimulation returned initialized Simulation structure with aliens randomly placed on the map.
func NewSimulation(iterationLimit, aliensCount int, worldMap WorldMap) (*Simulation, error) {
if len(worldMap) == 0 {
return nil, fmt.Errorf("map cannot be empty")
}
alienPositions := generateAlienPlacement(aliensCount, worldMap)
s := &Simulation{
iterationCounter: 0,
iterationLimit: iterationLimit,
worldMap: copyMap(worldMap),
alienPositions: alienPositions,
}
return s, nil
}
// Run starts simulation, executes steps until the stop condition is met and returns a WorldMap as result.
func (s *Simulation) Run() (WorldMap, error) {
log.Println("Alien invasion started!")
for !s.ShouldStop() {
s.Step()
}
log.Println("Alien invasion finished!")
return s.worldMap, nil
}
// ShouldStop returns true if a stop condition is met.
func (s *Simulation) ShouldStop() bool {
return s.iterationCounter >= s.iterationLimit || len(s.alienPositions) == 0 || len(s.worldMap) == 0
}
// Step moves all aliens on the map and evaluate the rules.
func (s *Simulation) Step() {
// evaluate the rules for the initial alien placement
if s.iterationCounter == 0 {
s.evaluateRules()
}
s.updateAlienPositions()
s.evaluateRules()
s.iterationCounter += 1
}
// generateAlienPlacement randomly assigns positions on the map for the provided alien count.
func generateAlienPlacement(aliensCount int, worldMap WorldMap) AlienPositions {
aliens := getAliens(aliensCount)
cities := make([]City, 0, len(worldMap))
for city := range worldMap {
cities = append(cities, city)
}
alienPositions := make(AlienPositions, len(aliens))
for _, alien := range aliens {
randomCityIdx := rand.Intn(len(cities))
alienPositions[alien] = cities[randomCityIdx]
}
return alienPositions
}
// getAliens returns a slice of aliens with a provided count.
// Pre-generated names aer returned for up to 75 aliens.
// For greater counts, aliens are named ["Alien 1", "Alien 2",...]
func getAliens(count int) []Alien {
result := make([]Alien, count)
if count <= 75 {
names := strings.Split(strings.TrimSpace(alienNames), "\n")
for i := 0; i < count; i++ {
result[i] = Alien(names[i])
}
} else {
for i := 0; i < count; i++ {
result[i] = Alien(fmt.Sprintf("Alien %d", i+1))
}
}
return result
}
// updateAlienPositions calculates updated alien positions using connections between the cities.
func (s *Simulation) updateAlienPositions() {
updatedAlienPositions := make(AlienPositions)
for alien, city := range s.alienPositions {
cityNeighbors := s.worldMap[city]
// pick random direction
possibleDirections := make([]City, 0, 4)
if cityNeighbors.North != "" {
possibleDirections = append(possibleDirections, cityNeighbors.North)
}
if cityNeighbors.South != "" {
possibleDirections = append(possibleDirections, cityNeighbors.South)
}
if cityNeighbors.East != "" {
possibleDirections = append(possibleDirections, cityNeighbors.East)
}
if cityNeighbors.West != "" {
possibleDirections = append(possibleDirections, cityNeighbors.West)
}
// check if alien is trapped
if len(possibleDirections) == 0 {
updatedAlienPositions[alien] = city
continue
}
randomIdx := rand.Intn(len(possibleDirections))
updatedAlienPositions[alien] = possibleDirections[randomIdx]
}
s.alienPositions = updatedAlienPositions
}
// evaluateRules check for cities where two or more aliens are currently located in.
// Such cities and aliens are deleted from the simulation state.
func (s *Simulation) evaluateRules() {
// check for alien fights
cityAlienCount := make(map[City]int)
for _, city := range s.alienPositions {
cityAlienCount[city] += 1
}
// destroy aliens and cities
for city, count := range cityAlienCount {
if count >= 2 {
// delete aliens
var destroyedAlienNames []string
for alien, position := range s.alienPositions {
if position == city {
destroyedAlienNames = append(destroyedAlienNames, string(alien))
delete(s.alienPositions, alien)
}
}
// delete city
delete(s.worldMap, city)
// delete all roads leading to this city
for c, neighbors := range s.worldMap {
if neighbors.North != "" && neighbors.North == city {
neighbors.North = ""
}
if neighbors.South != "" && neighbors.South == city {
neighbors.South = ""
}
if neighbors.East != "" && neighbors.East == city {
neighbors.East = ""
}
if neighbors.West != "" && neighbors.West == city {
neighbors.West = ""
}
s.worldMap[c] = neighbors
}
log.Printf(
"%s has been destroyed by %s and %s!",
city,
strings.Join(destroyedAlienNames[:len(destroyedAlienNames)-1], ", "),
destroyedAlienNames[len(destroyedAlienNames)-1],
)
}
}
}
func copyMap(worldMap WorldMap) WorldMap {
newWorldMap := make(WorldMap, len(worldMap))
for c, n := range worldMap {
newWorldMap[c] = n
}
return newWorldMap
} | internal/simulation/simulation.go | 0.63624 | 0.510192 | simulation.go | starcoder |
package histogram
import (
"fmt"
"sort"
"strconv"
"strings"
"time"
"github.com/360EntSecGroup-Skylar/excelize"
"github.com/grokify/gocharts/data/table"
"github.com/grokify/gocharts/data/table/format"
"github.com/grokify/gocharts/data/table/sheet"
"github.com/grokify/gocharts/data/timeseries"
"github.com/grokify/simplego/time/timeutil"
"github.com/grokify/simplego/type/stringsutil"
)
type HistogramSet struct {
Name string
HistogramMap map[string]*Histogram
KeyIsTime bool
}
func NewHistogramSet(name string) *HistogramSet {
return &HistogramSet{
Name: name,
HistogramMap: map[string]*Histogram{}}
}
func NewHistogramSetWithData(name string, data map[string]map[string]int) *HistogramSet {
hset := &HistogramSet{
Name: name,
HistogramMap: map[string]*Histogram{}}
for statsName, statsData := range data {
for statsItemName, statsItemValue := range statsData {
hset.Add(statsName, statsItemName, statsItemValue)
}
}
return hset
}
func (hset *HistogramSet) AddDateUidCount(dt time.Time, uid string, count int) {
fName := dt.Format(time.RFC3339)
hset.Add(fName, uid, count)
if !hset.KeyIsTime {
hset.KeyIsTime = true
}
}
// Add provides an easy method to add a histogram bin name
// and count for an existing or new histogram in the set.
func (hset *HistogramSet) Add(histName, binName string, binCount int) {
hist, ok := hset.HistogramMap[histName]
if !ok {
hist = NewHistogram(histName)
}
hist.Add(binName, binCount)
hset.HistogramMap[histName] = hist
}
// ItemCount returns the number of histograms.
func (hset *HistogramSet) ItemCount() uint {
return uint(len(hset.HistogramMap))
}
// ItemCounts returns the number of histograms.
func (hset *HistogramSet) ItemCounts() *Histogram {
histCount := NewHistogram("histogram counts counts")
for histName, hist := range hset.HistogramMap {
histCount.Bins[histName] = len(hist.Bins)
}
histCount.Inflate()
return histCount
}
// ItemNames returns the number of histograms.
// Alias for `HistogramNames()`.
func (hset *HistogramSet) ItemNames() []string {
return hset.HistogramNames()
}
// HistogramNames returns the number of histograms.
func (hset *HistogramSet) HistogramNames() []string {
names := []string{}
for name := range hset.HistogramMap {
names = append(names, name)
}
sort.Strings(names)
return names
}
// HistogramNameExists returns a boolean indicating if
// the supplied histogram name exists.
func (hset *HistogramSet) HistogramNameExists(histName string) bool {
if _, ok := hset.HistogramMap[histName]; ok {
return true
}
return false
}
// ValueSum returns the sum of all the histogram bin values.
func (hset *HistogramSet) ValueSum() int {
valueSum := 0
for _, hist := range hset.HistogramMap {
valueSum += hist.ValueSum()
}
return valueSum
}
// BinNameExists returns a boolean indicating if a bin name
// exists in any histogram.
func (hset *HistogramSet) BinNameExists(binName string) bool {
for _, hist := range hset.HistogramMap {
if hist.BinNameExists(binName) {
return true
}
}
return false
}
// BinNames returns all the bin names used across all the
// histograms.
func (hset *HistogramSet) BinNames() []string {
binNames := []string{}
for _, hist := range hset.HistogramMap {
binNames = append(binNames, hist.BinNames()...)
}
return stringsutil.SliceCondenseSpace(binNames, true, true)
}
// HistogramBinNames returns the bin names for a single
// histogram whose name is provided as a function parameter.
func (hset *HistogramSet) HistogramBinNames(setName string) []string {
if hist, ok := hset.HistogramMap[setName]; ok {
return hist.BinNames()
}
return []string{}
}
// LeafStats returns a histogram by combining the histogram
// bins across histograms, removing the histogram distinction.
func (hset *HistogramSet) LeafStats(name string) *Histogram {
if len(name) == 0 {
name = "leaf stats"
}
setLeafStats := NewHistogram(name)
for _, hist := range hset.HistogramMap {
for binName, binCount := range hist.Bins {
setLeafStats.Add(binName, binCount)
}
}
return setLeafStats
}
func (hset *HistogramSet) ToTimeSeriesDistinct() (timeseries.TimeSeries, error) {
ds := timeseries.NewTimeSeries(hset.Name)
for rfc3339, hist := range hset.HistogramMap {
dt, err := time.Parse(time.RFC3339, rfc3339)
if err != nil {
return ds, err
}
ds.AddItems(timeseries.TimeItem{
SeriesName: hset.Name,
Time: dt,
Value: int64(len(hist.Bins))})
}
return ds, nil
}
// WriteXLSXMatrix creates an XLSX file where the first column is the
// histogram name and the other columns are the bin names. This is
// useful for easy visualization of a table and also creating
// charts such as grouped bar charts.
func (hset *HistogramSet) WriteXLSXMatrix(filename, sheetName, histColName string) error {
tbl, err := hset.TableMatrix(sheetName, histColName)
if err != nil {
return err
}
return tbl.WriteXLSX(filename, sheetName)
}
// TableMatrix returns a `*table.Table` where the first column is the
// histogram name and the other columns are the bin names. This is
// useful for easy visualization of a table and also creating
// charts such as grouped bar charts.
func (hset *HistogramSet) TableMatrix(tableName, histColName string) (*table.Table, error) {
if len(strings.TrimSpace(tableName)) == 0 {
tableName = strings.TrimSpace(hset.Name)
}
tbl := table.NewTable(tableName)
if len(strings.TrimSpace(histColName)) == 0 {
histColName = "Histogram Name"
}
binNames := hset.BinNames()
tbl.Columns = append(tbl.Columns, histColName)
tbl.Columns = append(tbl.Columns, binNames...)
tbl.FormatMap = map[int]string{
-1: table.FormatInt}
if hset.KeyIsTime {
tbl.FormatMap[0] = table.FormatTime
} else {
tbl.FormatMap[0] = table.FormatString
}
hnames := hset.HistogramNames()
for _, hname := range hnames {
row := []string{hname}
hist, ok := hset.HistogramMap[hname]
if !ok {
return nil, fmt.Errorf("histogram name present without histogram [%s]", hname)
}
for _, binName := range binNames {
if binVal, ok := hist.Bins[binName]; ok {
row = append(row, strconv.Itoa(binVal))
} else {
row = append(row, "0")
}
}
tbl.Rows = append(tbl.Rows, row)
}
return &tbl, nil
}
// WriteXLSX creates an XLSX file where the first column is the
// histogram name, the second column is the bin name and the
// third column is the bin count.
func (hset *HistogramSet) WriteXLSX(filename, sheetName, colName1, colName2, colNameCount string) error {
// WriteXLSX writes a table as an Excel XLSX file with
// row formatter option.
f := excelize.NewFile()
// Create a new sheet.
if len(strings.TrimSpace(sheetName)) == 0 {
sheetName = strings.TrimSpace(hset.Name)
}
if len(sheetName) == 0 {
sheetName = "Sheet0"
}
index := f.NewSheet(sheetName)
colName1 = strings.TrimSpace(colName1)
if len(colName1) == 0 {
colName1 = hset.Name
}
if len(colName1) == 0 {
colName1 = "Column1"
}
colName2 = strings.TrimSpace(colName2)
if len(colName1) == 0 {
for _, fstats := range hset.HistogramMap {
fstats.Name = strings.TrimSpace(fstats.Name)
if len(fstats.Name) > 0 {
colName2 = fstats.Name
break
}
}
}
colNameCount = strings.TrimSpace(colNameCount)
if len(colNameCount) == 0 {
colNameCount = "Count"
}
header := []interface{}{colName1, colName2, colNameCount}
sheet.SetRowValues(f, sheetName, 0, header)
var err error
rowIdx := uint(1)
for fstatsName, fstats := range hset.HistogramMap {
fstatsNameDt := time.Now()
if hset.KeyIsTime {
fstatsNameDt, err = time.Parse(time.RFC3339, fstatsName)
if err != nil {
return err
}
}
for binName, binCount := range fstats.Bins {
var rowVals []interface{}
if hset.KeyIsTime {
rowVals = []interface{}{fstatsNameDt, binName, binCount}
} else {
rowVals = []interface{}{fstatsName, binName, binCount}
}
sheet.SetRowValues(f, sheetName, rowIdx, rowVals)
rowIdx++
}
}
f.SetActiveSheet(index)
// Delete Original Sheet
f.DeleteSheet(f.GetSheetName(0))
// Save xlsx file by the given path.
return f.SaveAs(filename)
}
// DatetimeKeyToQuarter converts a HistogramSet
// by date to one by quarters.
func (hset *HistogramSet) DatetimeKeyToQuarter(name string) (*HistogramSet, error) {
fsetQtr := NewHistogramSet(name)
for rfc3339, hist := range hset.HistogramMap {
dt, err := time.Parse(time.RFC3339, rfc3339)
if err != nil {
return fsetQtr, err
}
dt = timeutil.QuarterStart(dt)
rfc3339Qtr := dt.Format(time.RFC3339)
for binName, binCount := range hist.Bins {
fsetQtr.Add(rfc3339Qtr, binName, binCount)
}
}
return fsetQtr, nil
}
// DatetimeKeyCount returns a TimeSeries when the first key is a RFC3339 time
// and a sum of items is desired per time.
func (hset *HistogramSet) DatetimeKeyCount() (timeseries.TimeSeries, error) {
ts := timeseries.NewTimeSeries(hset.Name)
for rfc3339, hist := range hset.HistogramMap {
dt, err := time.Parse(time.RFC3339, rfc3339)
if err != nil {
return ts, err
}
ts.AddItems(timeseries.TimeItem{
SeriesName: hset.Name,
Time: dt,
Value: int64(len(hist.Bins))})
}
return ts, nil
}
func (hset *HistogramSet) DatetimeKeyCountTable(interval timeutil.Interval, countColName string) (table.Table, error) {
ts, err := hset.DatetimeKeyCount()
if err != nil {
return table.NewTable(hset.Name), err
}
ts.Interval = interval
if len(strings.TrimSpace(countColName)) == 0 {
countColName = "Count"
}
return ts.ToTable(hset.Name, "", countColName, timeseries.TimeFormatRFC3339), nil
}
func (hset *HistogramSet) HistogramSetTimeKeyCountWriteXLSX(filename string, interval timeutil.Interval, countColName string) error {
tbl, err := hset.DatetimeKeyCountTable(interval, countColName)
if err != nil {
return err
}
tbl.FormatFunc = format.FormatTimeAndInts
return table.WriteXLSX(filename, &tbl)
} | data/histogram/histogram_set.go | 0.801237 | 0.505737 | histogram_set.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.