code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package model
import "fmt"
// Threshold defines how close two values can be and still be considered identical (e.g. for de-duplicating points).
const Threshold = 0.0000001
// Circuit provides an abstract representation of a set of points (locations, vertices) for the TSP solver to interact with.
// This allows it to ignore whether the implementation is a set of N-dimensional points, a graph, or any other representation of points.
type Circuit interface {
// FindNextVertexAndEdge determines the next vertex to add to the circuit, along with which edge it should be added to.
// For example, in the ClosestGreedy algorithm this returns the vertex and edge with the minimum distance increase.
// This should return (nil,nil) when the circuit is complete.
FindNextVertexAndEdge() (CircuitVertex, CircuitEdge)
// GetAttachedVertices returns all vertices that have been added to the circuit (either as part of BuildPerimeter or Update).
// This returns them in the order they should be traversed as part of the circuit (ignoring any unattached vertices).
GetAttachedVertices() []CircuitVertex
// GetLength returns the length of the circuit (at the current stage of processing).
GetLength() float64
// GetUnattachedVertices returns the set of vertices that have not been added to the circuit yet. (all of these points are internal to the perimeter)
GetUnattachedVertices() map[CircuitVertex]bool
// Update adds the supplied vertex to circuit by splitting the supplied edge and creating two edges with the supplied point as the common vertex of the edges.
Update(vertexToAdd CircuitVertex, edgeToSplit CircuitEdge)
}
// CircuitVertex provides an abstract representation of a single point (location, vertex) for the TSP solver to interact with.
type CircuitVertex interface {
Equal
fmt.Stringer
// DistanceTo returns the distance between the two vertices; this should always be a positive number.
DistanceTo(other CircuitVertex) float64
// EdgeTo creates a new CircuitEdge from this point (start) to the supplied point (end).
EdgeTo(end CircuitVertex) CircuitEdge
}
// CircuitVertex provides an abstract representation of an edge for the TSP solver to interact with.
type CircuitEdge interface {
Equal
fmt.Stringer
// DistanceIncrease returns the difference in length between the edge
// and the two edges formed by inserting the vertex between the edge's start and end.
// For example, if start->end has a length of 5, start->vertex has a length of 3,
// and vertex->end has a length of 6, this will return 4 (i.e. 6 + 3 - 5)
DistanceIncrease(vertex CircuitVertex) float64
// GetEnd returns the ending point of this edge.
GetEnd() CircuitVertex
// GetLength returns the distance between the start and end vertices.
GetLength() float64
// GetStart returns the starting point of this edge.
GetStart() CircuitVertex
// Intersects checks if the two edges go through at least one identical point.
// Note: Edges may share multiple points if they are co-linear, or in the use-case of graphs.
Intersects(other CircuitEdge) bool
// Merge creates a new edge starting from this edge's start vertex and ending at the supplied edge's end vertex.
Merge(CircuitEdge) CircuitEdge
// Split creates two new edges "start-to-vertex" and "vertex-to-end" based on this edge and the supplied vertex.
Split(vertex CircuitVertex) (CircuitEdge, CircuitEdge)
}
// Deduplicator is a function that takes in an array of vertices, and returns a copy of the array without duplicate points.
type Deduplicator func([]CircuitVertex) []CircuitVertex
// PerimeterBuilder creates an initial circuit, using the minimum vertices required to fully enclose the other (interior) vertices.
// For example, when using 2-D points, this constructs a convex polygon such that all points are either vertices or inside the polygon.
// This returns the perimeter as an array of edges, and a map of unattached vertices.
type PerimeterBuilder func(verticesArg []CircuitVertex) (edges []CircuitEdge, unattached map[CircuitVertex]bool) | model/circuit.go | 0.849035 | 0.867261 | circuit.go | starcoder |
package main
import (
"bufio"
"fmt"
"math"
"os"
"strconv"
"strings"
)
// Add all the values in an []int.
func sum(numbers []int) (value int) {
for _, number := range numbers {
value += number
}
return value
}
// Finds the index of the needle in the haystack.
func indexOf(needle int, haystack []int) int {
for idx, value := range haystack {
if needle == value {
return idx
}
}
return -1
}
// Finds the index of the minimum value.
func min(numbers []int) (index int) {
lowestValue := math.MaxInt
for idx, value := range numbers {
if value < lowestValue {
index, lowestValue = idx, value
}
}
return index
}
// Finds the index of the maximum value.
func max(numbers []int) (index int) {
highestValue := math.MinInt
for idx, value := range numbers {
if value > highestValue {
index, highestValue = idx, value
}
}
return index
}
// Create the boards given the lines of numbers.
func makeBoards(scanner *bufio.Scanner) (boards [][][]int) {
boards = make([][][]int, 0)
board := make([][]int, 0)
var numbers []int
for scanner.Scan() {
line := scanner.Text()
if len(strings.TrimSpace(line)) == 0 {
if len(board) > 0 {
boards = append(boards, board)
board = make([][]int, 0, len(board))
}
continue
}
numbers = make([]int, 0)
for _, key := range strings.Split(line, " ") {
if len(key) == 0 {
continue
}
key, _ := strconv.Atoi(key)
numbers = append(numbers, key)
}
board = append(board, numbers)
}
if len(board) > 0 {
boards = append(boards, board)
}
return boards
}
// Finds the lowest index to win for each board.
func solve(numbers []int, boards [][][]int) (winningIndex []int) {
winningIndex = make([]int, 0, len(boards))
for _, board := range boards {
lowestIdx := len(numbers)
highestColIdx := make([]int, 0)
// Get lowest index to win for row.
for _, row := range board {
highestIdx := -1
for colIdx, value := range row {
valIdx := indexOf(value, numbers)
if valIdx > highestIdx {
highestIdx = valIdx
}
if colIdx == len(highestColIdx) {
highestColIdx = append(highestColIdx, valIdx)
} else if valIdx > highestColIdx[colIdx] {
highestColIdx[colIdx] = valIdx
}
}
if highestIdx < lowestIdx {
lowestIdx = highestIdx
}
}
// Compare with lowest index to win for column.
for _, colIdx := range highestColIdx {
if colIdx < lowestIdx {
lowestIdx = colIdx
}
}
winningIndex = append(winningIndex, lowestIdx)
}
return winningIndex
}
// Finds unmarked numbers in a board.
func findUnmarked(board [][]int, marked []int) (unmarked []int) {
unmarked = make([]int, 0)
for _, rows := range board {
for _, value := range rows {
if indexOf(value, marked) < 0 {
unmarked = append(unmarked, value)
}
}
}
return unmarked
}
func main() {
input, _ := os.Open("var/aoc4_input.txt")
defer input.Close()
scanner := bufio.NewScanner(input)
scanner.Scan()
numberStr := strings.Split(scanner.Text(), ",")
numbers := make([]int, 0, len(numberStr))
for _, numStr := range numberStr {
numStr, _ := strconv.Atoi(numStr)
numbers = append(numbers, numStr)
}
boards := makeBoards(scanner)
results := solve(numbers, boards)
firstIdx, lastIdx := min(results), max(results)
firstWinningNum, lastWinningNum := numbers[results[firstIdx]], numbers[results[lastIdx]]
firstBoardUnmarked := findUnmarked(boards[firstIdx], numbers[:results[firstIdx]+1])
lastBoardUnmarked := findUnmarked(boards[lastIdx], numbers[:results[lastIdx]+1])
firstBoardUnmarkedSum, lastBoardUnmarkedSum := sum(firstBoardUnmarked), sum(lastBoardUnmarked)
fmt.Printf("Score at first board: %d * %d = %d\n", firstWinningNum, firstBoardUnmarkedSum, firstWinningNum*firstBoardUnmarkedSum)
fmt.Printf("Score at last board: %d * %d = %d\n", lastWinningNum, lastBoardUnmarkedSum, lastWinningNum*lastBoardUnmarkedSum)
} | aoc4.go | 0.63273 | 0.44746 | aoc4.go | starcoder |
package cryptderivekey
import (
"crypto/md5"
"crypto/sha1"
"fmt"
"strings"
)
func md5HashSlice(data []byte) []byte {
md5Hash := md5.Sum(data)
return md5Hash[:]
}
func sha1HashSlice(data []byte) []byte {
sha1Hash := sha1.Sum(data)
return sha1Hash[:]
}
func CryptDeriveKey(key []byte, hashType string) ([]byte, error) {
// https://docs.microsoft.com/en-us/windows/win32/api/wincrypt/nf-wincrypt-cryptderivekey
// https://www.fireeye.com/content/dam/fireeye-www/global/en/blog/threat-research/flareon2016/challenge2-solution.pdf
var key_hash, b0_hash, b1_hash []byte
// Initialise byte arrays with required padding bytes
b0 := [64]byte{0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36}
b1 := [64]byte{0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C}
if strings.ToLower(hashType) == "md5" {
key_hash = md5HashSlice(key)
} else if strings.ToLower(hashType) == "sha1" {
key_hash = sha1HashSlice(key)
} else {
return []byte{0}, fmt.Errorf("%s is not a valid hash type to derive key for.", hashType)
}
for i, x := range key_hash {
b0[i] = x ^ 0x36
b1[i] = x ^ 0x5C
}
if strings.ToLower(hashType) == "md5" {
b0_hash = md5HashSlice(b0[:])
b1_hash = md5HashSlice(b1[:])
} else if strings.ToLower(hashType) == "sha1" {
b0_hash = sha1HashSlice(b0[:])
b1_hash = sha1HashSlice(b1[:])
}
// Return the whole derived key, which the amount required can be determined by the caller
// Need the '...' at the end to tell it to append the whole slice
return append(b0_hash[:], b1_hash[:]...), nil
} | cryptderivekey.go | 0.627837 | 0.472988 | cryptderivekey.go | starcoder |
package lcs
// Myers represents an implementation of Myer's longest common subsequence
// and shortest edit script algorithm as as documented in:
// An O(ND) Difference Algorithm and Its Variations, 1986.
type Myers[T comparable] struct {
a, b []T
/* na, nb int
slicer func(v interface{}, from, to int32) interface{}
edits func(v interface{}, op EditOp, cx, cy int) []Edit*/
}
// NewMyers returns a new instance of Myers. The implementation supports slices
// of bytes/uint8, rune/int32 and int64s.
func NewMyers[T comparable](a, b []T) *Myers[T] {
return &Myers[T]{a: a, b: b}
}
// Details on the implementation and details of the algorithms can be found
// here:
// http://xmailserver.org/diff2.pdf
// http://simplygenius.net/Article/DiffTutorial1
// https://blog.robertelder.org/diff-algorithm/
func forwardSearch[T comparable](a, b []T, d int32, forward, reverse []int32, offset int32) (nd, mx, my, x, y int32, ok bool) {
na, nb := int32(len(a)), int32(len(b))
delta := na - nb
odd := delta%2 != 0
for k := -d; k <= d; k += 2 {
// Edge cases are:
// k == -d - move down
// k == d * 2 - move right
// Normal case:
// move down or right depending on how far the move would be.
if k == -d || k != d && forward[offset+k-1] < forward[offset+k+1] {
x = forward[offset+k+1]
} else {
x = forward[offset+k-1] + 1
}
y = x - k
mx, my = x, y
for x < na && y < nb && a[x] == b[y] {
x++
y++
}
forward[offset+k] = x
// Can this snake potentially overlap with one of the reverse ones?
// Going forward only odd paths can be the longest ones.
if odd && (-(k - delta)) >= -(d-1) && (-(k - delta)) <= (d-1) {
// Doe this snake overlap with one of the reverse ones? If so,
// the last snake is the longest one.
if forward[offset+k]+reverse[offset-(k-delta)] >= na {
return 2*d - 1, mx, my, x, y, true
}
}
}
return 0, 0, 0, 0, 0, false
}
func reverseSearch[T comparable](a, b []T, d int32, forward, reverse []int32, offset int32) (nd, mx, my, x, y int32, ok bool) {
na, nb := int32(len(a)), int32(len(b))
delta := na - nb
even := delta%2 == 0
for k := -d; k <= d; k += 2 {
// Edge cases as per forward search, but looking at the reverse
// stored values.
if k == -d || k != d && reverse[offset+k-1] < reverse[offset+k+1] {
x = reverse[offset+k+1]
} else {
x = reverse[offset+k-1] + 1
}
y = x - k
mx, my = x, y
for x < na && y < nb && a[na-x-1] == b[nb-y-1] {
x++
y++
}
reverse[offset+k] = x
// Can this snake potentially overlap with one of the forward ones?
// Going backward only even paths can be the longest ones.
if even && (-(k - delta)) >= -d && (-(k - delta)) <= d {
// Doe this snake overlap with one of the forward ones? If so,
// the last snake is the longest one.
if reverse[offset+k]+forward[offset-(k-delta)] >= na {
return 2 * d, na - x, nb - y, na - mx, nb - my, true
}
}
}
return 0, 0, 0, 0, 0, false
}
func middleSnake[T comparable](a, b []T) (d, x1, y1, x2, y2 int32) {
max := int32(len(a) + len(b)) // max # edits (delete all a, insert all of b)
// forward and reverse are accessed using k which is in the
// range -d .. +d, hence offset must be added to k.
forward := make([]int32, max+2)
reverse := make([]int32, max+2)
offset := int32(len(forward) / 2)
// Only need to search for D halfway through the table.
halfway := max / 2
if max%2 != 0 {
halfway++
}
for d := int32(0); d <= halfway; d++ {
if nd, mx, my, x, y, ok := forwardSearch(a, b, d, forward, reverse, offset); ok {
return nd, mx, my, x, y
}
if nd, mx, my, x, y, ok := reverseSearch(a, b, d, forward, reverse, offset); ok {
return nd, mx, my, x, y
}
}
panic("unreachable")
}
func myersLCS[T comparable](a, b []T) []T {
if len(a) == 0 || len(b) == 0 {
return []T{}
}
d, x, y, u, v := middleSnake(a, b)
if d > 1 {
nd := myersLCS(a[:x], b[:y])
nd = append(nd, a[x:u]...)
nd = append(nd, myersLCS(a[u:], b[v:])...)
return nd
}
if len(b) > len(a) {
return append([]T{}, a...)
}
return append([]T{}, b...)
}
// LCS returns the longest common subsquence.
func (m *Myers[T]) LCS() []T {
return myersLCS(m.a, m.b)
}
func (m *Myers[T]) ses(idx int, a, b []T, na, nb, cx, cy int32) []Edit[T] {
var ses []Edit[T]
if na > 0 && nb > 0 {
d, x, y, u, v := middleSnake(a, b)
if d > 1 || (x != u && y != v) {
ses = append(ses,
m.ses(idx+1, a[0:x], b[0:y], x, y, cx, cy)...)
if x != u && y != v {
// middle snake is part of the lcs.
ses = append(ses, m.edits(a[x:u], Identical, int(cx+x), int(cy+y))...)
}
return append(ses,
m.ses(idx+1, a[u:na], b[v:nb], na-u, nb-v, cx+u, cy+v)...)
}
if nb > na {
// a is part of the LCS.
ses = append(ses, m.edits(a[0:na], Identical, int(cx), int(cy))...)
return append(ses,
m.ses(idx+1, nil, b[na:nb], 0, nb-na, cx+na, cy+na)...)
}
if na > nb {
// b is part of the LCS.
ses = append(ses, m.edits(b[0:nb], Identical, int(cx), int(cy))...)
return append(ses,
m.ses(idx+1, a[nb:na], nil, na-nb, 0, cx+nb, cy+nb)...)
}
return ses
}
if na > 0 {
return m.edits(a, Delete, int(cx), int(cy))
}
return m.edits(b, Insert, int(cx), int(cy))
}
func (m *Myers[T]) edits(vals []T, op EditOp, cx, cy int) []Edit[T] {
createEdit := func(cx, cy, i int, op EditOp, val T) Edit[T] {
atx := cx + i
if op == Insert {
atx = cx
}
return Edit[T]{op, atx, cy + i, val}
}
var edits []Edit[T]
for i, v := range vals {
edits = append(edits, createEdit(cx, cy, i, op, v))
}
return edits
}
// SES returns the shortest edit script.
func (m *Myers[T]) SES() *EditScript[T] {
var es EditScript[T] = m.ses(0, m.a, m.b, int32(len(m.a)), int32(len(m.b)), 0, 0)
return &es
} | algo/lcs/myers.go | 0.639961 | 0.533276 | myers.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// ParseExpressionResponse
type ParseExpressionResponse struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// Error details, if expression evaluation resulted in an error.
error PublicErrorable
// A collection of values produced by the evaluation of the expression.
evaluationResult []string
// true if the evaluation was successful.
evaluationSucceeded *bool
// An attributeMappingSource object representing the parsed expression.
parsedExpression AttributeMappingSourceable
// true if the expression was parsed successfully.
parsingSucceeded *bool
}
// NewParseExpressionResponse instantiates a new parseExpressionResponse and sets the default values.
func NewParseExpressionResponse()(*ParseExpressionResponse) {
m := &ParseExpressionResponse{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreateParseExpressionResponseFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateParseExpressionResponseFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewParseExpressionResponse(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *ParseExpressionResponse) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetError gets the error property value. Error details, if expression evaluation resulted in an error.
func (m *ParseExpressionResponse) GetError()(PublicErrorable) {
if m == nil {
return nil
} else {
return m.error
}
}
// GetEvaluationResult gets the evaluationResult property value. A collection of values produced by the evaluation of the expression.
func (m *ParseExpressionResponse) GetEvaluationResult()([]string) {
if m == nil {
return nil
} else {
return m.evaluationResult
}
}
// GetEvaluationSucceeded gets the evaluationSucceeded property value. true if the evaluation was successful.
func (m *ParseExpressionResponse) GetEvaluationSucceeded()(*bool) {
if m == nil {
return nil
} else {
return m.evaluationSucceeded
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *ParseExpressionResponse) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["error"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreatePublicErrorFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetError(val.(PublicErrorable))
}
return nil
}
res["evaluationResult"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfPrimitiveValues("string")
if err != nil {
return err
}
if val != nil {
res := make([]string, len(val))
for i, v := range val {
res[i] = *(v.(*string))
}
m.SetEvaluationResult(res)
}
return nil
}
res["evaluationSucceeded"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetEvaluationSucceeded(val)
}
return nil
}
res["parsedExpression"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateAttributeMappingSourceFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetParsedExpression(val.(AttributeMappingSourceable))
}
return nil
}
res["parsingSucceeded"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetParsingSucceeded(val)
}
return nil
}
return res
}
// GetParsedExpression gets the parsedExpression property value. An attributeMappingSource object representing the parsed expression.
func (m *ParseExpressionResponse) GetParsedExpression()(AttributeMappingSourceable) {
if m == nil {
return nil
} else {
return m.parsedExpression
}
}
// GetParsingSucceeded gets the parsingSucceeded property value. true if the expression was parsed successfully.
func (m *ParseExpressionResponse) GetParsingSucceeded()(*bool) {
if m == nil {
return nil
} else {
return m.parsingSucceeded
}
}
// Serialize serializes information the current object
func (m *ParseExpressionResponse) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteObjectValue("error", m.GetError())
if err != nil {
return err
}
}
if m.GetEvaluationResult() != nil {
err := writer.WriteCollectionOfStringValues("evaluationResult", m.GetEvaluationResult())
if err != nil {
return err
}
}
{
err := writer.WriteBoolValue("evaluationSucceeded", m.GetEvaluationSucceeded())
if err != nil {
return err
}
}
{
err := writer.WriteObjectValue("parsedExpression", m.GetParsedExpression())
if err != nil {
return err
}
}
{
err := writer.WriteBoolValue("parsingSucceeded", m.GetParsingSucceeded())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *ParseExpressionResponse) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetError sets the error property value. Error details, if expression evaluation resulted in an error.
func (m *ParseExpressionResponse) SetError(value PublicErrorable)() {
if m != nil {
m.error = value
}
}
// SetEvaluationResult sets the evaluationResult property value. A collection of values produced by the evaluation of the expression.
func (m *ParseExpressionResponse) SetEvaluationResult(value []string)() {
if m != nil {
m.evaluationResult = value
}
}
// SetEvaluationSucceeded sets the evaluationSucceeded property value. true if the evaluation was successful.
func (m *ParseExpressionResponse) SetEvaluationSucceeded(value *bool)() {
if m != nil {
m.evaluationSucceeded = value
}
}
// SetParsedExpression sets the parsedExpression property value. An attributeMappingSource object representing the parsed expression.
func (m *ParseExpressionResponse) SetParsedExpression(value AttributeMappingSourceable)() {
if m != nil {
m.parsedExpression = value
}
}
// SetParsingSucceeded sets the parsingSucceeded property value. true if the expression was parsed successfully.
func (m *ParseExpressionResponse) SetParsingSucceeded(value *bool)() {
if m != nil {
m.parsingSucceeded = value
}
} | models/parse_expression_response.go | 0.67854 | 0.41947 | parse_expression_response.go | starcoder |
package quadtree
const (
MinLongitude float32 = -180.0
MaxLongitude float32 = 180.0
MinLatitude float32 = -90.0
MaxLatitude float32 = 90.0
Capacity = 8
MaxDepth = 8
)
type (
Node struct {
bounds *Rectangle
depth int
points []*Point
parent *Node
childs [4]*Node
}
)
// NewQuadTree creates QuadTree. With bounding box for whole world and maximum depth 8.
// In case you need special settings for QuadTree you can use NewQuadTreeNode directly.
func NewQuadTree() *Node {
return NewNode(
NewRectangle(MinLongitude, MaxLongitude, MinLatitude, MaxLatitude),
0,
nil,
)
}
// NewQuadTreeNode creates QuadTree node.
func NewNode(rectangle *Rectangle, depth int, parent *Node) *Node {
return &Node{
bounds: rectangle,
depth: depth,
parent: parent,
}
}
// split creates 4 quadrants in current node and moves point to it's children.
func (n *Node) split() {
if n.childs[0] != nil {
return
}
// Crete top left quadrant (north-west).
boxNorthWest := NewRectangle(
n.bounds.minimumX,
(n.bounds.minimumX+n.bounds.minimumX)/2,
(n.bounds.minimumY+n.bounds.maximumY)/2,
n.bounds.maximumY,
)
n.childs[0] = NewNode(boxNorthWest, n.depth+1, n)
// Create top right quadrant (north-east).
boxNorthEast := NewRectangle(
(n.bounds.minimumX+n.bounds.minimumX)/2,
n.bounds.maximumX,
(n.bounds.minimumY+n.bounds.maximumY)/2,
n.bounds.maximumY,
)
n.childs[1] = NewNode(boxNorthEast, n.depth+1, n)
// Create bottom left quadrant (south-west).
boxSouthWest := NewRectangle(
n.bounds.minimumX,
(n.bounds.minimumX+n.bounds.minimumX)/2,
n.bounds.minimumY,
(n.bounds.minimumY+n.bounds.maximumY)/2,
)
n.childs[2] = NewNode(boxSouthWest, n.depth+1, n)
// Create bottom right quadrant (south-east).
boxSouthEast := NewRectangle(
(n.bounds.minimumX+n.bounds.minimumX)/2,
n.bounds.maximumX,
n.bounds.minimumY,
(n.bounds.minimumY+n.bounds.maximumY)/2,
)
n.childs[3] = NewNode(boxSouthEast, n.depth+1, n)
// Reinsert points to child nodes.
for _, p := range n.points {
for _, child := range n.childs {
if child.Insert(p) {
break
}
}
}
n.points = nil
}
// Search will return all the points within bounding box definition.
func (n *Node) Search(a *Rectangle) []*Point {
if !n.bounds.Intersect(a) {
return nil
}
// Add points in current node.
var results []*Point
for _, p := range n.points {
if a.FallsIn(p) {
results = append(results, p)
}
}
// Exit if child node does not exists.
if n.childs[0] == nil {
return results
}
// Range over child nodes recursively and search for points.
for _, child := range n.childs {
results = append(results, child.Search(a)...)
}
return results
}
// Insert will try to insert point into the QuadTree.
func (n *Node) Insert(p *Point) bool {
// Check if point falls in bound of current node.
if !n.bounds.FallsIn(p) {
return false
}
// We insert point in current node, if there is space available or if we are on MaxDepth.
if n.childs[0] == nil {
if len(n.points) < Capacity || n.depth == MaxDepth {
n.points = append(n.points, p)
return true
}
// Split full node in 4 quadrants.
n.split()
}
// Range over child nodes recursively and try to insert point.
for _, child := range n.childs {
if child.Insert(p) {
return true
}
}
return false
}
// Update will update the location of a point within the tree.
func (n *Node) Update(p *Point, np *Point) bool {
// Check if point falls in bound of current node.
if !n.bounds.FallsIn(p) {
return false
}
// Update point in current node, if there are no child nodes.
if n.childs[0] == nil {
for i, val := range n.points {
if val != p {
continue
}
// Update coordinates to new point.
p.x = np.x
p.y = np.y
// Check if new position still falls in current node.
if n.bounds.FallsIn(np) {
return true
}
// Remove and reinsert point to matching node.
n.points = append(n.points[:i], n.points[i+1:]...)
return n.reInsert(p)
}
return false
}
// Range over child nodes recursively and try to update point.
for _, child := range n.childs {
if child.Update(p, np) {
return true
}
}
return false
}
// reInsert inserts point in to matching node.
func (n *Node) reInsert(p *Point) bool {
// Try to insert point in child nodes.
if n.Insert(p) {
return true
}
// We climbed to root node.
if n.parent == nil {
return false
}
// Try to insert point in parent node.
return n.parent.reInsert(p)
}
// Remove recursively over nodes and tries to remove a point from the QuadTree.
func (n *Node) Remove(p *Point) bool {
// Check if point falls in bound of current node.
if !n.bounds.FallsIn(p) {
return false
}
// Remove point in current node, if there are no child nodes.
if n.childs[0] == nil {
for i, val := range n.points {
if val != p {
continue
}
// Remove point from points array.
n.points = append(n.points[:i], n.points[i+1:]...)
return true
}
return false
}
// Range over child nodes recursively and try to delete point.
for _, child := range n.childs {
if child.Remove(p) {
return true
}
}
return false
} | node.go | 0.847763 | 0.546859 | node.go | starcoder |
package data
import (
"fmt"
"regexp"
)
type Entry[T any] struct {
Label string
Val T
}
type LabelMap[T any] struct {
labels []Entry[T]
}
func EmptyLabelMap[T any]() LabelMap[T] {
return LabelMap[T]{labels: []Entry[T]{}}
}
func LabelMapFrom[T any](labels ...Entry[T]) LabelMap[T] {
return LabelMap[T]{labels}
}
func LabelMapSingleton[T any](label string, val T) LabelMap[T] {
return LabelMap[T]{labels: []Entry[T]{{Label: label, Val: val}}}
}
func ShowRaw[T fmt.Stringer](lm LabelMap[T]) string {
return fmt.Sprintf("{%s}", lm.Show(func(k string, v T) string {
return fmt.Sprintf("%s: %s", k, v.String())
}))
}
var labelRegex = regexp.MustCompile("^[a-z](?:\\w+|_)*$")
func ShowLabel(label string) string {
if labelRegex.MatchString(label) {
return label
}
return fmt.Sprintf(`"%s"`, label)
}
func (lm LabelMap[T]) Show(f func(string, T) string) string {
return JoinToStringFunc(lm.labels, ", ", func(tu Entry[T]) string {
return f(ShowLabel(tu.Label), tu.Val)
})
}
func (lm LabelMap[T]) Values() []T {
res := make([]T, len(lm.labels))
for i, tu := range lm.labels {
res[i] = tu.Val
}
return res
}
func (lm LabelMap[T]) IsEmpty() bool {
return len(lm.labels) == 0
}
func (lm LabelMap[T]) Size() int {
return len(lm.labels)
}
func (lm LabelMap[T]) Entries() []Entry[T] {
return lm.labels
}
// Creates a shallow copy of the map
func (lm LabelMap[T]) Copy() LabelMap[T] {
res := make([]Entry[T], len(lm.labels))
copy(res, lm.labels)
return LabelMap[T]{labels: res}
}
func (lm LabelMap[T]) Put(key string, vals []T) LabelMap[T] {
labels := lm.labels
for _, v := range vals {
labels = append(labels, Entry[T]{Label: key, Val: v})
}
return LabelMap[T]{labels: labels}
}
// Merge the two maps together with duplicated keys
func (lm LabelMap[T]) Merge(other LabelMap[T]) LabelMap[T] {
m := make([]Entry[T], 0, len(lm.labels)+len(other.labels))
for _, e := range lm.labels {
m = append(m, e)
}
for _, e := range other.labels {
m = append(m, e)
}
return LabelMap[T]{labels: m}
}
func LabelMapValues[T, R any](lm LabelMap[T], mapper func(T) R) LabelMap[R] {
res := make([]Entry[R], len(lm.labels))
for i, elem := range lm.labels {
res[i] = Entry[R]{Label: elem.Label, Val: mapper(elem.Val)}
}
return LabelMap[R]{labels: res}
}
func LabelFlatMapValues[T, R any](lm LabelMap[T], mapper func(T) []R) []R {
res := make([]R, 0, len(lm.labels))
for _, elem := range lm.labels {
res = append(res, mapper(elem.Val)...)
}
return res
}
// Like LabelMapValues but short circuits on error
func LabelMapValuesErr[T, R any](lm LabelMap[T], mapper func(T) (R, error)) (LabelMap[R], error) {
res := make([]Entry[R], len(lm.labels))
for i, elem := range lm.labels {
val, err := mapper(elem.Val)
if err != nil {
return LabelMap[R]{}, err
}
res[i] = Entry[R]{Label: elem.Label, Val: val}
}
return LabelMap[R]{labels: res}, nil
} | data/label_map.go | 0.591015 | 0.5425 | label_map.go | starcoder |
package isc
func ListAll[T any](list []T, f func(T) bool) bool {
for _, e := range list {
if !f(e) {
return false
}
}
return true
}
func ListAny[T any](list []T, f func(T) bool) bool {
for _, e := range list {
if f(e) {
return true
}
}
return false
}
func ListNone[T any](list []T, f func(T) bool) bool {
for _, e := range list {
if f(e) {
return false
}
}
return true
}
func ListCount[T any](list []T, f func(T) bool) int {
num := 0
for _, e := range list {
if f(e) {
num++
}
}
return num
}
func MapAll[K comparable, V any](m map[K]V, f func(K, V) bool) bool {
for k, v := range m {
if !f(k, v) {
return false
}
}
return true
}
func MapAny[K comparable, V any](m map[K]V, f func(K, V) bool) bool {
for k, v := range m {
if f(k, v) {
return true
}
}
return false
}
func MapNone[K comparable, V any](m map[K]V, f func(K, V) bool) bool {
for k, v := range m {
if f(k, v) {
return false
}
}
return true
}
func MapCount[K comparable, V any](m map[K]V, f func(K, V) bool) int {
num := 0
for k, v := range m {
if f(k, v) {
num++
}
}
return num
}
func MapAllKey[K comparable, V any](m map[K]V, f func(K) bool) bool {
for k := range m {
if !f(k) {
return false
}
}
return true
}
func MapAnyKey[K comparable, V any](m map[K]V, f func(K) bool) bool {
for k := range m {
if f(k) {
return true
}
}
return false
}
func MapNoneKey[K comparable, V any](m map[K]V, f func(K) bool) bool {
for k := range m {
if f(k) {
return false
}
}
return true
}
func MapCountKey[K comparable, V any](m map[K]V, f func(K) bool) int {
num := 0
for k := range m {
if f(k) {
num++
}
}
return num
}
func MapAllValue[K comparable, V any](m map[K]V, f func(V) bool) bool {
for _, v := range m {
if !f(v) {
return false
}
}
return true
}
func MapAnyValue[K comparable, V any](m map[K]V, f func(V) bool) bool {
for _, v := range m {
if f(v) {
return true
}
}
return false
}
func MapNoneValue[K comparable, V any](m map[K]V, f func(V) bool) bool {
for _, v := range m {
if f(v) {
return false
}
}
return true
}
func MapCountValue[K comparable, V any](m map[K]V, f func(V) bool) int {
num := 0
for _, v := range m {
if f(v) {
num++
}
}
return num
} | isc/match.go | 0.506591 | 0.421433 | match.go | starcoder |
package main
import (
"machine"
"time"
"tinygo.org/x/drivers/dht"
)
var (
// RGB LED 1 (Temp) pins {R,G,B}
pinsTemp = [3]machine.Pin{machine.GP0, machine.GP1, machine.GP2}
// RGB LED 2 (Humidity) pins {R,G,B}
pinsHumidity = [3]machine.Pin{machine.GP3, machine.GP4, machine.GP5}
// DHT 11
pinDHT = machine.GP15
dht11 dht.Device
)
const (
// +/- thresholds for changing LED colour
tempThreshold = 0.2 // degrees celsius
humidityThreshold = 2.0 // percent
)
func setupPins() {
// Configure pins for the LEDs
ledConfig := machine.PinConfig{Mode: machine.PinOutput}
// - RGB LED 1 (Temp)
for i := 0; i < len(pinsTemp); i++ {
pinsTemp[i].Configure(ledConfig)
}
// - RGB LED 2 (Humidty)
for i := 0; i < len(pinsHumidity); i++ {
pinsHumidity[i].Configure(ledConfig)
}
// Configure DHT11 to update manually
dht11 = dht.New(pinDHT, dht.DHT11)
dht11.Configure(dht.UpdatePolicy{
UpdateTime: time.Second * 2,
UpdateAutomatically: false})
}
func initialWait() {
// We need to wait a second or two after power on before reading from the DHT11 to allow it to initialize
// Show alternating red LEDs during this phase
for i := 0; i < 2; i++ {
ledRed(pinsTemp)
ledOff(pinsHumidity)
time.Sleep(time.Millisecond * 500)
ledRed(pinsHumidity)
ledOff(pinsTemp)
time.Sleep(time.Millisecond * 500)
}
ledOff(pinsTemp)
ledOff(pinsHumidity)
}
func calibrate() (temp, humidity float32) {
// Take a few readings - 2s apart - and compute averages to return 'normal' values for temp & humidity
temp, humidity = 0.0, 0.0
for i := 0; i < 5; i++ {
readMeasurements()
newTemp, err := dht11.TemperatureFloat(dht.C)
if err == nil {
temp = (temp*float32(i) + newTemp) / float32(i+1)
}
newHumidity, err := dht11.HumidityFloat()
if err == nil {
humidity = (humidity*float32(i) + newHumidity) / float32(i+1)
}
// Show alternating yellow LEDs during this phase whilst we wait 2s for next reading
for j := 0; j < 2; j++ {
ledYellow(pinsTemp)
ledOff(pinsHumidity)
time.Sleep(time.Millisecond * 500)
ledYellow(pinsHumidity)
ledOff(pinsTemp)
time.Sleep(time.Millisecond * 500)
}
}
ledGreen(pinsTemp)
ledGreen(pinsHumidity)
return
}
func readMeasurements() {
// Loop reading measurements until we don't get an error
for {
err := dht11.ReadMeasurements()
if nil == err {
return
}
time.Sleep(time.Microsecond * 10)
}
}
// Convenience method to check a value against it's normal value within a given threshoold tolerance
// Sets LED colour Red if above threshold, Blue if below threshold and Green if within tolerance.
func checkMetric(value, norm, threshold float32, ledPins [3]machine.Pin) {
if value < norm-threshold {
ledBlue(ledPins)
} else if value > norm+threshold {
ledRed(ledPins)
} else {
ledGreen(ledPins)
}
}
func monitor(normalTemp, normalHumidity float32) {
for {
// Read every two seconds
time.Sleep(time.Second * 2)
readMeasurements()
temp, _ := dht11.TemperatureFloat(dht.C)
checkMetric(temp, normalTemp, tempThreshold, pinsTemp)
humidity, _ := dht11.HumidityFloat()
checkMetric(humidity, normalHumidity, humidityThreshold, pinsHumidity)
}
}
func main() {
// Configure all peripherals
setupPins()
// Initial setup
initialWait()
// Calibrate to ambient conditions
normalTemp, normalHumidity := calibrate()
// Finally monitor for any chnages
monitor(normalTemp, normalHumidity)
} | elegoo_most_complete_starter_kit/12_temperature_humidity_sensor/main.go | 0.583085 | 0.430447 | main.go | starcoder |
package main
// Given a robot cleaner in a room modeled as a grid.
// Each cell in the grid can be empty or blocked.
// The robot cleaner with 4 given APIs can move forward,
// turn left or turn right. Each turn it made is 90 degrees.
// When it tries to move into a blocked cell, its bumper sensor
// detects the obstacle and it stays on the current cell.
// Design an algorithm to clean the entire room using only the 4 given APIs shown below.
// interface Robot {
// // returns true if next cell is open and robot moves into the cell.
// // returns false if next cell is obstacle and robot stays on the current cell.
// boolean move();
// // Robot will stay on the same cell after calling turnLeft/turnRight.
// // Each turn will be 90 degrees.
// void turnLeft();
// void turnRight();
// // Clean the current cell.
// void clean();
// }
// Input:
// room = [
// [1,1,1,1,1,0,1,1],
// [1,1,1,1,1,0,1,1],
// [1,0,1,1,1,1,1,1],
// [0,0,0,1,0,0,0,0],
// [1,1,1,1,1,1,1,1]
// ],
// row = 1,
// col = 3
// Explanation:
// All grids in the room are marked by either 0 or 1.
// 0 means the cell is blocked, while 1 means the cell is accessible.
// The robot initially starts at the position of row=1, col=3.
// From the top left corner, its position is one row below and three columns right.
func main() {
// rm := [][]int{
// {1, 1, 1, 1, 1, 0, 1, 1},
// {1, 1, 1, 1, 1, 0, 1, 1},
// {1, 0, 1, 1, 1, 1, 1, 1},
// {0, 0, 0, 1, 0, 0, 0, 0},
// {1, 1, 1, 1, 1, 1, 1, 1}}
}
type Robot struct{}
type location struct {
x int
y int
}
var directions = [][]int{{-1, 0}, {0, 1}, {1, 0}, {0, -1}}
func track(robot *Robot, r int, c int, d int, visited map[location]bool) {
currentLocation := location{r, c}
visited[currentLocation] = true
robot.Clean()
for i := 0; i < 4; i++ {
dx := r + directions[d][0]
dy := c + directions[d][1]
if !visited[location{dx, dy}] && robot.Move() {
track(robot, dx, dy, d, visited)
}
d = (d + 1) % 4
robot.TurnRight()
}
robot.TurnRight()
robot.TurnRight()
robot.Move()
robot.TurnLeft()
robot.TurnLeft()
}
func cleanRoom(robot *Robot) {
visited := make(map[location]bool)
track(robot, 0, 0, 0, visited)
}
func (robot *Robot) Move() bool { return true }
func (robot *Robot) TurnLeft() {}
func (robot *Robot) TurnRight() {}
func (robot *Robot) Clean() {} | robotroomcleaner/main.go | 0.811863 | 0.7242 | main.go | starcoder |
package main
import (
"encoding/csv"
"errors"
"fmt"
"io"
"os"
)
/*
Problem taken from: https://www.fluentcpp.com/2017/10/23/results-expressive-cpp17-coding-challenge/
The task proposed in the challenge was to write a command line tool that takes in a CSV file, overwrites all the
data of a given column by a given value, and outputs the results into a new CSV file.
More specifically, this command line tool should accept the following arguments:
the filename of a CSV file, the name of the column to overwrite in that file, the string that will be used as a
replacement for that column, the filename where the output will be written.
For instance, if the CSV file had a column “City” with various values for the entries in the file, calling the tool
with the name of the input file, City, London and the name of output file would result in a copy of the initial
file, but with all cities set equal to “London”:
Input file:
First Name,Last Name,Age,City,Eyes color,Species
John,Doe,32,Tokyo,Blue,Human
Flip,Helm,12,Canberra,Red,Unknown
Terdos,Bendarian,165,Cracow,Blue,Magic tree
Dominik,Elpos,33,Paris,Blue,Human
Ewan,Grath,51,New Delhi,Green,Human
will be converted to (City -> London)
Output file:
First Name,Last Name,Age,City,Eyes color,Species
John,Doe,32,London,Blue,Human
Flip,Helm,12,London,Red,Unknown
Terdos,Bendarian,165,London,Blue,Magic tree
Dominik,Elpos,33,London,Blue,Human
Ewan,Grath,51,London,Green,Human
Here was how to deal with edge cases:
if the input file is empty, the program should write “input file missing” to the console. if the input file does
not contain the specified column, the program should write “column name doesn’t exists in the input file” to the
console.
In both cases, there shouldn’t be any output file generated.
And if the program succeeds but there is already a file having the name specified for output, the program should
overwrite this file.
The goal of the challenge was double: using as many C++17 features as possible (as long as they were useful to solve
the case), and write the clearest code possible with them.
*/
var ErrColumnNotFound = errors.New("column not found")
func main() {
// inputFile, column, replacement, outputFile
if len(os.Args[1:]) == 0 {
printUsage()
return
}
var (
inputFile = os.Args[1]
column = os.Args[2]
replacement = os.Args[3]
outputFile = os.Args[4]
)
run(inputFile, column, replacement, outputFile)
}
func run(inputFile, column, replacement, outputFile string) {
inFile, err := os.Open(inputFile)
if err != nil {
reportError("cannot open file", err)
return
}
defer inFile.Close()
outFile, err := os.Create(outputFile)
if err != nil {
reportError("cannot open file", err)
return
}
defer outFile.Close()
csvReader := csv.NewReader(inFile)
csvWriter := csv.NewWriter(outFile)
defer csvWriter.Flush()
colIndex := -1
for {
row, err := csvReader.Read()
if err == io.EOF {
return
}
if err != nil {
reportError("error reading row", err)
return
}
if colIndex < 0 {
colIndex, err = columnIndex(row, column)
if err != nil {
reportError("column not found", err)
return
}
if err := csvWriter.Write(row); err != nil {
reportError("cannot write row %s", err)
return
}
} else {
row[colIndex] = replacement
if err := csvWriter.Write(row); err != nil {
reportError("cannot write row %s", err)
return
}
}
}
}
func columnIndex(row []string, columnName string) (int, error) {
for i := 0; i < len(row); i++ {
if columnName == row[i] {
return i, nil
}
}
return 0, ErrColumnNotFound
}
func printUsage() {
fmt.Println("<this program> <input file> <column> <replacement> <output file>")
}
func reportError(message string, err error) {
fmt.Printf("%s\ndetails:\n%s\n", message, err)
} | csv/main.go | 0.549157 | 0.405979 | main.go | starcoder |
package optimizer
import (
"strconv"
"github.com/rhwilr/lemur/ast"
"github.com/rhwilr/lemur/token"
)
type Optimizer struct {
program *ast.Program
optimized *ast.Program
}
func New(program *ast.Program) *Optimizer {
return &Optimizer{
program: program,
optimized: &ast.Program{},
}
}
func (o *Optimizer) Optimize() (*ast.Program, error) {
for _, statement := range o.program.Statements {
o.optimizeStatement(statement)
}
return o.optimized, nil
}
func (o *Optimizer) optimizeStatement(statement ast.Statement) {
switch statement := statement.(type) {
case *ast.LetStatement:
value := optimizeLetStatement(statement.Value)
if (value != nil) {
statement.Value = value
}
o.optimized.Statements = append(o.optimized.Statements, statement)
case *ast.ExpressionStatement:
value, _ := evaluateExpression(statement.Expression)
statement.Expression = value
o.optimized.Statements = append(o.optimized.Statements, statement)
default:
o.optimized.Statements = append(o.optimized.Statements, statement)
}
}
func optimizeLetStatement(node ast.Expression) ast.Expression {
switch node := node.(type) {
case *ast.InfixExpression:
return optimizeInfixExpression(node)
}
return node
}
func optimizeInfixExpression(node *ast.InfixExpression) ast.Expression {
left, ok := evaluateExpression(node.Left)
if !ok {
return node
}
right, ok := evaluateExpression(node.Right)
if !ok {
return node
}
// Integers
_, okL := left.(*ast.IntegerLiteral)
_, okR := right.(*ast.IntegerLiteral)
if okL && okR {
if opt := optimizeIntegerInfixExpression(node.Operator, left, right); opt != nil {
return opt
}
}
// Strings
_, okL = left.(*ast.StringLiteral)
_, okR = right.(*ast.StringLiteral)
if okL && okR {
if opt := optimizeStringInfixExpression(node.Operator, left, right); opt != nil {
return opt
}
}
// Boolean
_, okL = left.(*ast.Boolean)
_, okR = right.(*ast.Boolean)
if okL && okR {
if opt := optimizeBooleanInfixExpression(node.Operator, left, right); opt != nil {
return opt
}
}
return node
}
func optimizeIntegerInfixExpression(operator string, left, right ast.Expression) ast.Expression {
leftVal := left.(*ast.IntegerLiteral).Value
rightVal := right.(*ast.IntegerLiteral).Value
switch operator {
case "==":
return nativeBoolToBooleanAst(leftVal == rightVal)
case "!=":
return nativeBoolToBooleanAst(leftVal != rightVal)
case "<":
return nativeBoolToBooleanAst(leftVal < rightVal)
case ">":
return nativeBoolToBooleanAst(leftVal > rightVal)
case "<=":
return nativeBoolToBooleanAst(leftVal <= rightVal)
case ">=":
return nativeBoolToBooleanAst(leftVal >= rightVal)
case "||":
return nativeBoolToBooleanAst(leftVal != 0 || rightVal != 0)
case "&&":
return nativeBoolToBooleanAst(leftVal != 0 && rightVal != 0)
case "+", "+=":
return nativeIntegerToIntegerAst(leftVal + rightVal)
case "-", "-=":
return nativeIntegerToIntegerAst(leftVal - rightVal)
case "*", "*=":
return nativeIntegerToIntegerAst(leftVal * rightVal)
case "/", "/=":
return nativeIntegerToIntegerAst(leftVal / rightVal)
default:
return nil
}
}
func optimizeStringInfixExpression(operator string, left, right ast.Expression) ast.Expression {
leftVal := left.(*ast.StringLiteral).Value
rightVal := right.(*ast.StringLiteral).Value
switch operator {
case "==":
return nativeBoolToBooleanAst(leftVal == rightVal)
case "!=":
return nativeBoolToBooleanAst(leftVal != rightVal)
case "<":
return nativeBoolToBooleanAst(leftVal < rightVal)
case ">":
return nativeBoolToBooleanAst(leftVal > rightVal)
case "<=":
return nativeBoolToBooleanAst(leftVal <= rightVal)
case ">=":
return nativeBoolToBooleanAst(leftVal >= rightVal)
case "+":
return nativeStringToStringAst(leftVal + rightVal)
default:
return nil
}
}
func optimizeBooleanInfixExpression(operator string, left, right ast.Expression) ast.Expression {
leftVal := left.(*ast.Boolean).Value
rightVal := right.(*ast.Boolean).Value
switch operator {
case "==":
return nativeBoolToBooleanAst(leftVal == rightVal)
case "!=":
return nativeBoolToBooleanAst(leftVal != rightVal)
case "||":
return nativeBoolToBooleanAst(leftVal || rightVal)
case "&&":
return nativeBoolToBooleanAst(leftVal && rightVal)
default:
return nil
}
}
func optimizeWhileLoopExpression(node *ast.WhileLoopExpression) ast.Expression {
condition, _ := evaluateExpression(node.Condition)
node.Condition = condition
return node
}
func optimizeCallExpression(node *ast.CallExpression) ast.Expression {
list := []ast.Expression{}
for _, argument := range node.Arguments {
optimized, _ := evaluateExpression(argument)
list = append(list, optimized)
}
node.Arguments = list
return node
}
func evaluateExpression(node ast.Expression) (ast.Expression, bool) {
switch node := node.(type) {
case *ast.IntegerLiteral:
return node, true
case *ast.Boolean:
return node, true
case *ast.StringLiteral:
return node, true
case *ast.InfixExpression:
return optimizeInfixExpression(node), true
case *ast.WhileLoopExpression:
return optimizeWhileLoopExpression(node), true
case *ast.CallExpression:
return optimizeCallExpression(node), true
}
return node, false
}
/*
** Helper
*/
func nativeBoolToBooleanAst(input bool) *ast.Boolean {
if input {
return &ast.Boolean{
Token: token.Token{
Type: token.TRUE,
Literal: "true",
},
Value: input,
}
}
return &ast.Boolean{
Token: token.Token{
Type: token.FALSE,
Literal: "false",
},
Value: input,
}
}
func nativeIntegerToIntegerAst(value int64) *ast.IntegerLiteral {
return &ast.IntegerLiteral{
Token: token.Token{
Type:token.INT,
Literal: strconv.FormatInt(value, 10),
},
Value: value,
}
}
func nativeStringToStringAst(value string) *ast.StringLiteral {
return &ast.StringLiteral{
Token: token.Token{
Type:token.STRING,
Literal: value,
},
Value: value,
}
} | optimizer/optimizer.go | 0.720958 | 0.410166 | optimizer.go | starcoder |
package three
// NewSphericalHarmonics3 :
// Primary reference:
// https://graphics.stanford.edu/papers/envmap/envmap.pdf
// Secondary reference:
// https://www.ppsloan.org/publications/StupidSH36.pdf
// 3-band SH defined by 9 coefficients
func NewSphericalHarmonics3() *SphericalHarmonics3 {
coefficients := [9]Vector3{}
for i := 0; i < 9; i++ {
coefficients[i] = Vector3{}
}
return &SphericalHarmonics3{coefficients}
}
// SphericalHarmonics3 :
type SphericalHarmonics3 struct {
Coefficients [9]Vector3
}
// Set :
func (s SphericalHarmonics3) Set(coefficients [9]Vector3) *SphericalHarmonics3 {
for i := 0; i < 9; i++ {
s.Coefficients[i].Copy(coefficients[i])
}
return &s
}
// Zero :
func (s SphericalHarmonics3) Zero() *SphericalHarmonics3 {
for i := 0; i < 9; i++ {
s.Coefficients[i].Set(0, 0, 0)
}
return &s
}
// GetAt get the radiance in the direction of the normal
// target is a Vector3
func (s SphericalHarmonics3) GetAt(normal, target Vector3) *Vector3 {
// normal is assumed to be unit length
x, y, z := normal.X, normal.Y, normal.Z
coeff := s.Coefficients
// band 0
target.Copy(coeff[0]).MultiplyScalar(0.282095)
// band 1
target.AddScaledVector(coeff[1], 0.488603*y)
target.AddScaledVector(coeff[2], 0.488603*z)
target.AddScaledVector(coeff[3], 0.488603*x)
// band 2
target.AddScaledVector(coeff[4], 1.092548*(x*y))
target.AddScaledVector(coeff[5], 1.092548*(y*z))
target.AddScaledVector(coeff[6], 0.315392*(3.0*z*z-1.0))
target.AddScaledVector(coeff[7], 1.092548*(x*z))
target.AddScaledVector(coeff[8], 0.546274*(x*x-y*y))
return &target
}
// GetIrradianceAt get the irradiance (radiance convolved with cosine lobe) in the direction of the normal
// target is a Vector3
// https://graphics.stanford.edu/papers/envmap/envmap.pdf
func (s SphericalHarmonics3) GetIrradianceAt(normal, target Vector3) *Vector3 {
// normal is assumed to be unit length
x, y, z := normal.X, normal.Y, normal.Z
var coeff = s.Coefficients
// band 0
target.Copy(coeff[0]).MultiplyScalar(0.886227) // π * 0.282095
// band 1
target.AddScaledVector(coeff[1], 2.0*0.511664*y) // ( 2 * π / 3 ) * 0.488603
target.AddScaledVector(coeff[2], 2.0*0.511664*z)
target.AddScaledVector(coeff[3], 2.0*0.511664*x)
// band 2
target.AddScaledVector(coeff[4], 2.0*0.429043*x*y) // ( π / 4 ) * 1.092548
target.AddScaledVector(coeff[5], 2.0*0.429043*y*z)
target.AddScaledVector(coeff[6], 0.743125*z*z-0.247708) // ( π / 4 ) * 0.315392 * 3
target.AddScaledVector(coeff[7], 2.0*0.429043*x*z)
target.AddScaledVector(coeff[8], 0.429043*(x*x-y*y)) // ( π / 4 ) * 0.546274
return &target
}
// Add :
func (s SphericalHarmonics3) Add(sh SphericalHarmonics3) *SphericalHarmonics3 {
for i := 0; i < 9; i++ {
s.Coefficients[i].Add(sh.Coefficients[i])
}
return &s
}
// AddScaledSH :
func (s SphericalHarmonics3) AddScaledSH(sh SphericalHarmonics3, t float64) *SphericalHarmonics3 {
for i := 0; i < 9; i++ {
s.Coefficients[i].AddScaledVector(sh.Coefficients[i], t)
}
return &s
}
// Scale :
func (s SphericalHarmonics3) Scale(t float64) *SphericalHarmonics3 {
for i := 0; i < 9; i++ {
s.Coefficients[i].MultiplyScalar(t)
}
return &s
}
// Lerp :
func (s SphericalHarmonics3) Lerp(sh SphericalHarmonics3, alpha float64) *SphericalHarmonics3 {
for i := 0; i < 9; i++ {
s.Coefficients[i].Lerp(sh.Coefficients[i], alpha)
}
return &s
}
// Equals :
func (s SphericalHarmonics3) Equals(sh SphericalHarmonics3) bool {
for i := 0; i < 9; i++ {
if !s.Coefficients[i].Equals(sh.Coefficients[i]) {
return false
}
}
return true
}
// Copy :
func (s SphericalHarmonics3) Copy(sh SphericalHarmonics3) *SphericalHarmonics3 {
return s.Set(sh.Coefficients)
}
// Clone :
func (s SphericalHarmonics3) Clone() *SphericalHarmonics3 {
return NewSphericalHarmonics3().Copy(s)
}
// FromArray :
func (s SphericalHarmonics3) FromArray(array []float64, offset int) *SphericalHarmonics3 {
if len(array) < offset+27 {
panic("array length should be greater than offset+27")
}
coefficients := s.Coefficients
for i := 0; i < 9; i++ {
coefficients[i].FromArray(array, offset+(i*3))
}
return &s
}
// ToArray :
func (s SphericalHarmonics3) ToArray(array []float64, offset int) []float64 {
if len(array) < offset+27 {
panic("array length should be greater than offset+27")
}
coefficients := s.Coefficients
for i := 0; i < 9; i++ {
coefficients[i].ToArray(array, offset+(i*3))
}
return array
}
// getBasisAt evaluate the basis functions
// shBasis is an Array[ 9 ]
func getBasisAt(normal Vector3, shBasis [9]float64) {
// normal is assumed to be unit length
x, y, z := normal.X, normal.Y, normal.Z
// band 0
shBasis[0] = 0.282095
// band 1
shBasis[1] = 0.488603 * y
shBasis[2] = 0.488603 * z
shBasis[3] = 0.488603 * x
// band 2
shBasis[4] = 1.092548 * x * y
shBasis[5] = 1.092548 * y * z
shBasis[6] = 0.315392 * (3*z*z - 1)
shBasis[7] = 1.092548 * x * z
shBasis[8] = 0.546274 * (x*x - y*y)
} | server/three/sphericalharmonics3.go | 0.760828 | 0.697525 | sphericalharmonics3.go | starcoder |
package inject
import (
"fmt"
"reflect"
"sort"
)
// Graph describes a dependency graph that resolves nodes using well defined relationships.
// These relationships are defined with node pointers and Providers.
type Graph interface {
Finalizable
Add(Definition)
Define(ptr interface{}, provider Provider) Definition
Resolve(ptr interface{}) reflect.Value
ResolveByType(ptrType reflect.Type) []reflect.Value
ResolveByAssignableType(ptrType reflect.Type) []reflect.Value
ResolveAll() []reflect.Value
fmt.Stringer
}
type graph struct {
definitions map[interface{}]Definition
}
// NewGraph constructs a new Graph, initializing the provider and value maps.
func NewGraph(defs ...Definition) Graph {
defMap := make(map[interface{}]Definition, len(defs))
for _, def := range defs {
defMap[def.Ptr()] = def
}
return &graph{
definitions: defMap,
}
}
func (g *graph) Add(def Definition) {
g.definitions[def.Ptr()] = def
}
// Define a pointer as being resolved by a provider
func (g *graph) Define(ptr interface{}, provider Provider) Definition {
def := NewDefinition(ptr, provider)
g.Add(def)
return def
}
// Resolve a pointer into a value by recursively resolving its dependencies and/or returning the cached result
func (g *graph) Resolve(ptr interface{}) reflect.Value {
ptrType := reflect.TypeOf(ptr)
if ptrType.Kind() != reflect.Ptr {
panic(fmt.Sprintf("ptr (%v) is not a pointer", ptrType))
}
ptrValueElem := reflect.ValueOf(ptr).Elem()
def, found := g.definitions[ptr]
if !found {
// no known definition - return the current value of the pointer
return ptrValueElem
}
return def.Resolve(g)
}
// Resolve a type into a list of values by resolving all defined pointers with that exact type
func (g *graph) ResolveByType(ptrType reflect.Type) []reflect.Value {
var values []reflect.Value
for ptr, def := range g.definitions {
if reflect.TypeOf(ptr).Elem() == ptrType {
values = append(values, def.Resolve(g))
}
}
return values
}
// Resolve a type into a list of values by resolving all defined pointers assignable to that type
func (g *graph) ResolveByAssignableType(ptrType reflect.Type) []reflect.Value {
var values []reflect.Value
for ptr, def := range g.definitions {
if reflect.TypeOf(ptr).Elem().AssignableTo(ptrType) {
values = append(values, def.Resolve(g))
}
}
return values
}
// ResolveAll known pointers into values, caching and returning the results
func (g *graph) ResolveAll() []reflect.Value {
var values []reflect.Value
for _, def := range g.definitions {
values = append(values, def.Resolve(g))
}
return values
}
// Finalize obscures (finalizes) all the resolved definitions
func (g *graph) Finalize() {
for _, def := range g.definitions {
def.Obscure(g)
}
}
// String returns a multiline string representation of the dependency graph
func (g graph) String() string {
return fmt.Sprintf("&graph{\n%s\n}",
indent(fmt.Sprintf("definitions: %s", g.fmtDefinitions()), 1),
)
}
func (g graph) fmtDefinitions() string {
a := make([]string, 0, len(g.definitions))
for _, def := range g.definitions {
a = append(a, def.String())
}
sort.Strings(a)
return arrayString(a)
} | vendor/github.com/karlkfi/inject/graph.go | 0.628863 | 0.422922 | graph.go | starcoder |
package graph
import (
"github.com/DmitryBogomolov/algorithms/graph/internal/utils"
)
// In a DFS tree edge "u-v" is bridge if "v" subtree has no back edges to ancestors of "u".
func findCutEdgesCore(
gr Graph,
// original vertex distances
distances []int,
// updated vertex distances
updatedDistances []int,
cutEdges *[]Edge,
// distance from DFS root to current vertex
distance int,
parentVertexID int, vertexID int,
) {
distances[vertexID] = distance
updatedDistances[vertexID] = distances[vertexID]
for _, adjacentVertexID := range gr.AdjacentVertices(vertexID) {
if distances[adjacentVertexID] == -1 {
findCutEdgesCore(gr, distances, updatedDistances, cutEdges, distance+1, vertexID, adjacentVertexID)
// If child vertex distance is less than current vertex distance
// then there is back edge from child vertex to ancestors of current vertex.
updatedDistances[vertexID] = utils.Min(updatedDistances[vertexID], updatedDistances[adjacentVertexID])
// If child vertex had back edge then its updated distance would be less then its original distance.
if updatedDistances[adjacentVertexID] == distances[adjacentVertexID] {
*cutEdges = append(*cutEdges, NewEdge(vertexID, adjacentVertexID))
}
} else if adjacentVertexID != parentVertexID {
// Update current vertex distance - it can be reached faster going through child vertex.
updatedDistances[vertexID] = utils.Min(updatedDistances[vertexID], distances[adjacentVertexID])
}
}
}
// FindCutEdges finds cut-edges in a graph.
// Cut-edge is an edge whose deletion increases number of connected components.
// An edge is a bridge iif it is not contained in any cycle.
// https://algs4.cs.princeton.edu/41graph/Bridge.java.html
func FindCutEdges(gr Graph) []Edge {
distances := make([]int, gr.NumVertices())
updatedDistances := make([]int, gr.NumVertices())
utils.ResetList(distances)
utils.ResetList(updatedDistances)
var cutEdges []Edge
for vertexID := 0; vertexID < gr.NumVertices(); vertexID++ {
if distances[vertexID] == -1 {
findCutEdgesCore(gr, distances, updatedDistances, &cutEdges, 0, vertexID, vertexID)
}
}
return cutEdges
} | graph/graph/cut_edges.go | 0.680348 | 0.434881 | cut_edges.go | starcoder |
package ch5
var maxCompare = func(a, b int) bool {
return a > b
}
var minCompare = func(a, b int) bool {
return a < b
}
type heap struct {
arr []int
compare func(a1, a2 int) bool
}
func NewHeap(com func(a1, a2 int) bool) *heap {
return &heap{
compare: com,
}
}
func (h *heap) Clear() {
h.arr = nil
}
func (h *heap) parentIndex(index int) int {
return (index - 1) / 2
}
func (h *heap) leftIndex(index int) int {
return 2*index + 1
}
func (h *heap) rightIndex(index int) int {
return 2*index + 2
}
func (h *heap) append(item int) int {
h.arr = append(h.arr, item)
return len(h.arr) - 1
}
func (h *heap) deleteTail() bool {
if h.empty() {
return false
}
h.arr = h.arr[:len(h.arr)-1]
return true
}
func (h *heap) empty() bool {
return len(h.arr) == 0
}
func (h *heap) Add(items ...int) {
for _, v := range items {
h.add(v)
}
}
func (h *heap) add(item int) {
index := h.append(item)
for index > 0 {
parent := h.parentIndex(index)
if h.compare(item, h.arr[parent]) {
h.arr[index] = h.arr[parent]
index = parent
} else {
break
}
}
h.arr[index] = item
}
func (h *heap) Delete() (int, bool) {
if h.empty() {
return 0, false
}
node := h.arr[0]
length := len(h.arr)
h.arr[0] = h.arr[length-1]
h.deleteTail()
h.down(0)
return node, true
}
// problem_3
func (h *heap) Change(index int, val int) {
h.arr[index] = val
h.up(index)
h.down(index)
}
func (h heap) up(index int) {
for index > 0 {
parent := h.parentIndex(index)
if h.compare(h.arr[index], h.arr[parent]) {
h.arr[index], h.arr[parent] = h.arr[parent], h.arr[index]
index = parent
} else {
break
}
}
}
func (h heap) down(index int) {
length := len(h.arr)
var left, right int
for {
left = h.leftIndex(index)
if left >= length {
break
}
right = left + 1
if right >= length {
if h.compare(h.arr[left], h.arr[index]) {
h.arr[left], h.arr[index] = h.arr[index], h.arr[left]
index = left
}
break
}
if h.compare(h.arr[left], h.arr[right]) {
if h.compare(h.arr[left], h.arr[index]) {
h.arr[left], h.arr[index] = h.arr[index], h.arr[left]
index = left
} else {
break
}
} else {
if h.compare(h.arr[right], h.arr[index]) {
h.arr[right], h.arr[index] = h.arr[index], h.arr[right]
index = right
} else {
break
}
}
}
}
// problem_4
func (h *heap) DeleteWithIndex(index int) (int, bool) {
length := len(h.arr)
if length <= index {
return 0, false
}
node := h.arr[index]
h.arr[index] = h.arr[length-1]
h.deleteTail()
h.up(index)
h.down(index)
return node, true
}
// problem_5
func (h *heap) Find(val int) (int, bool) {
for k, v := range h.arr {
if v == val {
return k, true
}
}
return 0, false
}
// -------------------------------------problem 6--------------------------------------------------------------
// 每个节点有四个指针,左右儿子和父节点指针,next为当前节点的层次遍历的下一个节点指针
type headNode struct {
val int
left, right, parent *headNode
next *headNode
}
type linkHeap struct {
length int
root *headNode
compare func(a1, a2 int) bool
}
func NewLinkHeap(compare func(a1, a2 int) bool) *linkHeap {
return &linkHeap{
compare: compare,
}
}
func (l *linkHeap) up(node *headNode) {
for node.parent != nil {
if l.compare(node.val, node.parent.val) {
node.val, node.parent.val = node.parent.val, node.val
node = node.parent
} else {
break
}
}
}
func (l *linkHeap) down(node *headNode) {
for node.left != nil {
if node.right == nil {
if l.compare(node.left.val, node.val) {
node.val, node.left.val = node.left.val, node.val
node = node.left
}
break
}
if l.compare(node.left.val, node.right.val) {
if l.compare(node.left.val, node.val) {
node.val, node.left.val = node.left.val, node.val
node = node.left
} else {
break
}
} else {
if l.compare(node.right.val, node.val) {
node.val, node.right.val = node.right.val, node.val
node = node.right
} else {
break
}
}
}
}
func (l *linkHeap) insertTail(val int) *headNode {
node := &headNode{
val: val,
}
if l.length == 0 {
l.root = node
l.length++
return node
}
tailParent := l.root
for i := 0; i < (l.length-1)/2; i++ {
tailParent = tailParent.next
}
if tailParent.left == nil {
tailParent.left = node
} else {
tailParent.right = node
}
node.parent = tailParent
for tailParent.next != nil {
tailParent = tailParent.next
}
tailParent.next = node
l.length++
return node
}
func (l *linkHeap) deleteTail() *headNode {
if l.length == 0 {
return nil
}
var node *headNode
if l.length == 1 {
node = l.root
l.root = nil
l.length--
return node
}
tailParent := l.root
for i := 0; i < (l.length-2)/2; i++ {
tailParent = tailParent.next
}
if tailParent.right != nil {
node = tailParent.right
tailParent.right = nil
} else {
node = tailParent.left
tailParent.left = nil
}
for tailParent.next != node {
tailParent = tailParent.next
}
tailParent.next = nil
l.length--
return node
}
func (l *linkHeap) Add(items ...int) {
for _, v := range items {
l.add(v)
}
}
func (l *linkHeap) add(item int) {
node := l.insertTail(item)
l.up(node)
}
func (l *linkHeap) Delete() (int, bool) {
if l.length == 0 {
return 0, false
}
node := l.deleteTail()
if l.length == 0 {
return node.val, true
}
result := l.root.val
l.root.val = node.val
l.down(l.root)
return result, true
}
func (l *linkHeap) Arr() []int {
var arr []int
temp := l.root
for temp != nil {
arr = append(arr, temp.val)
temp = temp.next
}
return arr
} | adt/ch5/part6.go | 0.587825 | 0.400515 | part6.go | starcoder |
package iso20022
// Chain of parties involved in the settlement of a transaction, including receipts and deliveries, book transfers, treasury deals, or other activities, resulting in the movement of a security or amount of money from one account to another.
type SettlementParties5 struct {
// First party in the settlement chain. In a plain vanilla settlement, it is the Central Securities Depository where the counterparty requests to receive the financial instrument or from where the counterparty delivers the financial instruments.
Depository *PartyIdentification2 `xml:"Dpstry,omitempty"`
// Party that, in a settlement chain interacts with the depository.
Party1 *PartyIdentificationAndAccount1 `xml:"Pty1,omitempty"`
// Party that, in a settlement chain interacts with the party 1.
Party2 *PartyIdentificationAndAccount1 `xml:"Pty2,omitempty"`
// Party that, in a settlement chain interacts with the party 2.
Party3 *PartyIdentificationAndAccount1 `xml:"Pty3,omitempty"`
// Party that, in a settlement chain interacts with the party 3.
Party4 *PartyIdentificationAndAccount1 `xml:"Pty4,omitempty"`
// Party that, in a settlement chain interacts with the party 4.
Party5 *PartyIdentificationAndAccount1 `xml:"Pty5,omitempty"`
}
func (s *SettlementParties5) AddDepository() *PartyIdentification2 {
s.Depository = new(PartyIdentification2)
return s.Depository
}
func (s *SettlementParties5) AddParty1() *PartyIdentificationAndAccount1 {
s.Party1 = new(PartyIdentificationAndAccount1)
return s.Party1
}
func (s *SettlementParties5) AddParty2() *PartyIdentificationAndAccount1 {
s.Party2 = new(PartyIdentificationAndAccount1)
return s.Party2
}
func (s *SettlementParties5) AddParty3() *PartyIdentificationAndAccount1 {
s.Party3 = new(PartyIdentificationAndAccount1)
return s.Party3
}
func (s *SettlementParties5) AddParty4() *PartyIdentificationAndAccount1 {
s.Party4 = new(PartyIdentificationAndAccount1)
return s.Party4
}
func (s *SettlementParties5) AddParty5() *PartyIdentificationAndAccount1 {
s.Party5 = new(PartyIdentificationAndAccount1)
return s.Party5
} | SettlementParties5.go | 0.682468 | 0.512693 | SettlementParties5.go | starcoder |
package numbers
import (
"log"
"math"
"math/rand"
//DEBUG: "fmt"
)
//Sample InverseNormal returns a simulated value from a normal distribution.
func SampleInverseNormal(mu float64, sigma float64) float64 {
return rand.NormFloat64()*sigma + mu
}
//InitializeFastRejectionSampler takes in the parameters of a rejection sampler and returns the binHeights and sumHeights variables.
func InitializeFastRejectionSampler(xLeft float64, xRight float64, f func(float64) float64, bins int) ([]float64, float64) {
if xLeft >= xRight {
log.Fatalf("Error in FastRejectionSample: xRight must be greater than xLeft.")
}
var binHeights []float64 = make([]float64, bins)
var sumHeights float64
var support float64 = xRight - xLeft
var stepSize float64 = support / float64(bins)
var currLeft float64 = xLeft
var currRight float64 = xLeft + stepSize
var fCurrLeft, fCurrRight float64
var firstTime bool = true
for i := 0; i < bins; i++ {
if firstTime {
firstTime = false
fCurrLeft = f(currLeft)
fCurrRight = f(currRight)
binHeights[i] = MaxFloat64(fCurrLeft, fCurrRight)
} else {
fCurrLeft = fCurrRight
currRight += stepSize
fCurrRight = f(currRight)
binHeights[i] = MaxFloat64(fCurrLeft, fCurrRight)
}
sumHeights += binHeights[i]
}
return binHeights, sumHeights
}
//FastRejectionSampler returns simulated values from an a func(float64) float64 between a left and right value using an optimized rejection sampler
//that divides the function support into discrete bins with optimized sampling heights.
//maxSampleDepth triggers the log.Fatalf in the RejectionSample func, and samples is the number of values to be returned.
func FastRejectionSampler(xLeft float64, xRight float64, f func(float64) float64, bins int, maxSampleDepth int, samples int) []float64 {
var answer []float64 = make([]float64, samples)
var stepSize float64 = (xRight - xLeft) / float64(bins)
binHeights, sumHeights := InitializeFastRejectionSampler(xLeft, xRight, f, bins)
for j := 0; j < samples; j++ {
answer[j] = RejectionSampleChooseBin(xLeft, xRight, stepSize, f, maxSampleDepth, sumHeights, binHeights)
}
return answer
}
//RejectionSampleChooseBin is a helper function of FAstRejectionSampler.
func RejectionSampleChooseBin(xLeft float64, xRight float64, stepSize float64, f func(float64) float64, maxIteration int, sumHeights float64, binHeights []float64) float64 {
var x, y float64
var currBin int
var currLeft, currRight float64
for i := 0; i < maxIteration; i++ {
currBin = chooseBin(sumHeights, binHeights)
currLeft = xLeft + float64(currBin)*stepSize
currRight = currLeft + stepSize
x = RandFloat64InRange(currLeft, currRight)
y = f(x)
if RandFloat64InRange(0.0, binHeights[currBin]) < y {
return x
}
}
log.Fatalf("Exceeded max iteration in RejectionSampleChooseBin.")
return -1.0
}
//chooseBin picks which bin should be used for the FastRejectionSampler, where the choice of bin is weighted by its relative contribution to the overall integral of f.
func chooseBin(sumHeights float64, binHeights []float64) int {
var rand float64 = rand.Float64()
var cumulative float64 = 0.0
for i := 0; i < len(binHeights); i++ {
cumulative += (binHeights[i] / sumHeights)
if cumulative > rand {
return i
}
}
log.Fatalf("Error in chooseBin: failed to choose a bin.")
return -1
}
//RejectionSample returns simulated values from an arbitrary function between a specified left and right bound using a simple rejection sampling method.
func RejectionSample(xLeft float64, xRight float64, yMax float64, f func(float64) float64, maxIteration int) float64 {
var x, y float64
for i := 0; i < maxIteration; i++ {
x = RandFloat64InRange(xLeft, xRight) //rand float64 in range xleft to xright
y = f(x)
if RandFloat64InRange(0.0, yMax) < y {
return y
}
}
log.Fatalf("Exceeded max iterations.")
return -1.0
}
//BoundedRejectionSample returns a rejection sample of a function f using a bounding function boundingSampler between a specified left and right bound.
func BoundedRejectionSample(boundingSampler func() (float64, float64), f func(float64) float64, xLeft float64, xRight float64, maxIteration int) (float64, float64) {
var xSampler, ySampler, y float64
for i := 0; i < maxIteration; i++ {
xSampler, ySampler = boundingSampler()
y = f(xSampler)
if y > ySampler {
log.Fatalf("BoundedRejectionSample: function was not a valid bounding function, ySampler is greater than y. xSampler: %e. ySampler: %e. y: %e.", xSampler, ySampler, y)
}
if RandFloat64InRange(0.0, ySampler) < y {
return xSampler, y
}
}
log.Fatalf("BoundedRejectionSample: Exceeded max iteration.")
return -1.0, -1.0
}
//RandExp Returns a random variable as a float64 from a standard exponential distribution. f(x)=e**-x.
//Algorithm from <NAME>. and <NAME>. (1972). Computer methods for sampling from the exponential and normal distributions. Comm. ACM, 15, 873-882.
func RandExp() (float64, float64) {
//q series where q[k-1] = sum(log(2)^k / k!) for k=1,2,...n
q := [16]float64{0.6931471805599453, 0.9333736875190459, 0.9888777961838675, 0.9984959252914960, 0.9998292811061389, 0.9999833164100727, 0.9999985691438767, 0.9999998906925558, 0.9999999924734159, 0.9999999995283275, 0.9999999999728814, 0.9999999999985598, 0.9999999999999289, 0.9999999999999968, 0.9999999999999999, 1.0000000000000000}
var a float64 = 0.0
var r float64 = rand.Float64()
for r <= 0.0 || r >= 1.0 { //prevents the case where u is exactly 0 or 1, which breaks the code.
r = rand.Float64()
}
for 1 > 0 {
r += r
if r > 1.0 {
break
}
a += q[0]
}
r -= 1
if r <= q[0] {
return a + r, ExpDist(a + r)
}
var i int = 0
ustart := rand.Float64()
umin := ustart
for r > q[i] {
ustart = rand.Float64()
if umin > ustart {
umin = ustart
}
i++
}
return a + umin*q[0], ExpDist(a + umin*q[0])
}
//ScaledBetaSampler returns an instatiation of RandBeta where the returned density has been scaled by the input variable 'multiplier'.
func ScaledBetaSampler(a float64, b float64, multiplier float64) func() (float64, float64) {
return func() (float64, float64) {
answer := RandBeta(a, b)
return answer, multiplier * BetaDist(answer, a, b)
}
}
//BetaSampler returns an instantiation of RandBeta for a specified a and b parameter.
func BetaSampler(a float64, b float64) func() (float64, float64) {
return func() (float64, float64) {
answer := RandBeta(a, b)
return answer, BetaDist(answer, a, b)
}
}
//RandGamma returns a random x,y point drawn from a gamma distribution with parameters alpha and beta. y corresponds to the function density at that x value.
//a > 1 uses the method from Marsaglia and Tsang 2000. Written for k, theta parameters, so the first step converts b to 1 / b to evaluate gamma in terms of alpha and beta parameters.
//a < 1 uses the method from <NAME>. and <NAME>. (1974). Computer methods for sampling from gamma, beta, poisson and binomial distributions. Computing, 12, 223-246.
func RandGamma(a float64, b float64) (float64, float64) {
if a < 0 || b < 0 {
log.Fatalf("Error: The gamma distribution is defined with alpha and beta parameters greater than zero.")
}
b = 1 / b //convert parameter system
var x, v, u, rExp float64
if a < 1 {
/* Marsaglia and Tsang code, does not appear to work.
u = rand.Float64()
return RandGamma(1.0+a, b) * math.Pow(u, 1.0/a)
*/
e1 := 0.36787944117144232159 //exp(-1), left as a constant to speed up computation
e := 1.0 + e1*a
for 1 > 0 { //repeat loop until breaks
p := e * rand.Float64()
rExp, _ = RandExp()
if p >= 1.0 {
x = -1 * math.Log((e-p)/a)
if rExp >= (1.0-a)*math.Log(x) {
break
}
} else {
x = math.Exp(math.Log(p) / a)
if rExp >= x {
break
}
}
}
return b * x, GammaDist(a, b, b*x)
}
var d float64 = a - (1.0 / 3.0)
var c float64 = (1.0 / 3.0) / math.Sqrt(d)
for 1 > 0 {
x = rand.NormFloat64()
v = 1.0 + c*x
for v <= 0 { //do while loop
x = rand.NormFloat64()
v = 1.0 + c*x
}
v = v * v * v
u = rand.Float64()
if u < 1-0.0331*x*x*x*x {
break
}
if math.Log(u) < 0.5*x*x+d*(1-v+math.Log(v)) {
break
}
}
return b * d * v, GammaDist(a, b, b*d*v)
}
//GammaSampler returns an instantiation of RandGamma for specified a and b parameters.
func GammaSampler(a float64, b float64) func() (float64, float64) {
return func() (float64, float64) {
return RandGamma(a, b)
}
} | numbers/monteCarlo.go | 0.772273 | 0.588032 | monteCarlo.go | starcoder |
package main
import (
"bufio"
"flag"
"fmt"
"os"
"strconv"
"strings"
)
func main() {
// Use Flags to run a part
methodP := flag.String("method", "p1", "The method/part that should be run, valid are p1,p2 and test")
flag.Parse()
switch *methodP {
case "p1":
partOne()
break
case "p2":
partTwo()
break
case "test":
break
}
}
func partOne() {
input := readInput()
grid1 := make(map[point]bool)
currentPoint := point{x: 0, y: 0}
// Create a map of points that we have moved accross
for _, move := range strings.Split(input[0], ",") {
// first character is move direction
// second is distance
moveDir := string(move[0])
moveDist, _ := strconv.Atoi(move[1:])
// Move
moveGrid(moveDir, moveDist, grid1, ¤tPoint)
}
grid2 := make(map[point]bool)
currentPoint = point{x: 0, y: 0}
// Create a map of points that we have moved accross
for _, move := range strings.Split(input[1], ",") {
// first character is move direction
// second is distance
moveDir := string(move[0])
moveDist, _ := strconv.Atoi(move[1:])
// Move
moveGrid(moveDir, moveDist, grid2, ¤tPoint)
}
// check both lists and find any same points
matchGrid := make(map[point]bool)
for i, _ := range grid1 {
if grid2[i] == true {
matchGrid[i] = true
}
}
// find closest point to origin
closest := point{x: 1000000, y: 1000000}
origin := point{x: 0, y: 0}
for p, _ := range matchGrid {
if p.distanceBetween(origin) <= closest.distanceBetween(origin) {
closest = p
}
}
fmt.Println("The closest point is", closest)
}
func moveGrid(moveDir string, moveDist int, grid map[point]bool, currentPoint *point) {
xMove := 0
yMove := 0
switch moveDir {
case "R":
xMove = 1
break
case "U":
yMove = 1
break
case "L":
xMove = -1
break
case "D":
yMove = -1
break
}
for i := 0; i < moveDist; i++ {
// move first
currentPoint.x += xMove
currentPoint.y += yMove
grid[*currentPoint] = true
}
}
func partTwo() {
input := readInput()
grid1 := make(map[point]int)
currentPoint := point{x: 0, y: 0}
// Create a map of points that we have moved accross
totalSteps := 0
for _, move := range strings.Split(input[0], ",") {
// first character is move direction
// second is distance
moveDir := string(move[0])
moveDist, _ := strconv.Atoi(move[1:])
// Move
moveGridCountingSteps(moveDir, moveDist, grid1, ¤tPoint, &totalSteps)
}
grid2 := make(map[point]int)
currentPoint = point{x: 0, y: 0}
totalSteps = 0
// Create a map of points that we have moved accross
for _, move := range strings.Split(input[1], ",") {
// first character is move direction
// second is distance
moveDir := string(move[0])
moveDist, _ := strconv.Atoi(move[1:])
// Move
moveGridCountingSteps(moveDir, moveDist, grid2, ¤tPoint, &totalSteps)
}
// check both lists and find any same points
matchGrid := make(map[point]int)
for i, _ := range grid1 {
if grid2[i] > 0 {
matchGrid[i] = grid1[i] + grid2[i]
}
}
// find closest point to origin
fmt.Println(matchGrid)
closest := 100000
var closestPoint point
var steps int
for p, v := range matchGrid {
if v <= closest {
closest = v
closestPoint = p
steps = v
}
}
fmt.Println("The closest point is", closestPoint, " with steps", steps)
}
func moveGridCountingSteps(moveDir string, moveDist int, grid map[point]int, currentPoint *point, totalSteps *int) {
xMove := 0
yMove := 0
switch moveDir {
case "R":
xMove = 1
break
case "U":
yMove = 1
break
case "L":
xMove = -1
break
case "D":
yMove = -1
break
}
for i := 0; i < moveDist; i++ {
*totalSteps++
// move first
currentPoint.x += xMove
currentPoint.y += yMove
if grid[*currentPoint] == 0 {
grid[*currentPoint] = *totalSteps
}
}
}
type point struct {
x int
y int
}
func (p point) toString() string {
return strconv.Itoa(p.x) + "," + strconv.Itoa(p.y)
}
func (p point) distanceBetween(p2 point) int {
return p.x - p2.x + p.y - p2.y
}
// Read data from input.txt
// Return the string, so that we can deal with it however
func readInput() []string {
var input []string
f, _ := os.Open("input.txt")
scanner := bufio.NewScanner(f)
for scanner.Scan() {
input = append(input, scanner.Text())
}
return input
} | 2019/3-CrossedWires/main.go | 0.600071 | 0.446434 | main.go | starcoder |
package leetcode
import (
"github.com/halfrost/LeetCode-Go/structures"
)
// Interval define
type Interval = structures.Interval
// SummaryRanges define
type SummaryRanges struct {
intervals []Interval
}
// Constructor352 define
func Constructor352() SummaryRanges {
return SummaryRanges{}
}
// AddNum define
func (sr *SummaryRanges) AddNum(val int) {
if sr.intervals == nil {
sr.intervals = []Interval{
{
Start: val,
End: val,
},
}
return
}
low, high := 0, len(sr.intervals)-1
for low <= high {
mid := low + (high-low)>>1
if sr.intervals[mid].Start <= val && val <= sr.intervals[mid].End {
return
} else if val < sr.intervals[mid].Start {
high = mid - 1
} else {
low = mid + 1
}
}
if low == 0 {
if sr.intervals[0].Start-1 == val {
sr.intervals[0].Start--
return
}
ni := Interval{Start: val, End: val}
sr.intervals = append(sr.intervals, ni)
copy(sr.intervals[1:], sr.intervals)
sr.intervals[0] = ni
return
}
if low == len(sr.intervals) {
if sr.intervals[low-1].End+1 == val {
sr.intervals[low-1].End++
return
}
sr.intervals = append(sr.intervals, Interval{Start: val, End: val})
return
}
if sr.intervals[low-1].End+1 < val && val < sr.intervals[low].Start-1 {
sr.intervals = append(sr.intervals, Interval{})
copy(sr.intervals[low+1:], sr.intervals[low:])
sr.intervals[low] = Interval{Start: val, End: val}
return
}
if sr.intervals[low-1].End == val-1 && val+1 == sr.intervals[low].Start {
sr.intervals[low-1].End = sr.intervals[low].End
n := len(sr.intervals)
copy(sr.intervals[low:], sr.intervals[low+1:])
sr.intervals = sr.intervals[:n-1]
return
}
if sr.intervals[low-1].End == val-1 {
sr.intervals[low-1].End++
} else {
sr.intervals[low].Start--
}
}
// GetIntervals define
func (sr *SummaryRanges) GetIntervals() [][]int {
intervals := [][]int{}
for _, interval := range sr.intervals {
intervals = append(intervals, []int{interval.Start, interval.End})
}
return intervals
}
/**
* Your SummaryRanges object will be instantiated and called as such:
* obj := Constructor();
* obj.AddNum(val);
* param_2 := obj.GetIntervals();
*/ | leetcode/9990352.Data-Stream-as-Disjoint-Intervals/352. Data Stream as Disjoint Intervals.go | 0.686265 | 0.419767 | 352. Data Stream as Disjoint Intervals.go | starcoder |
package statsgod
import (
"errors"
"math"
"sort"
)
// ValueSlice provides a storage for float64 values.
type ValueSlice []float64
// Get is a getter for internal values.
func (values ValueSlice) Get(i int) float64 { return values[i] }
// Len gets the length of the internal values.
func (values ValueSlice) Len() int { return len(values) }
// Less is used for the sorting interface.
func (values ValueSlice) Less(i, j int) bool { return values[i] < values[j] }
// Swap is used for the sorting interface.
func (values ValueSlice) Swap(i, j int) { values[i], values[j] = values[j], values[i] }
// UniqueCount provides the number of unique values sent during this period.
func (values ValueSlice) UniqueCount() int {
sort.Sort(values)
count := 0
var p float64
for i, v := range values {
if i > 0 && p != v {
count++
} else if i == 0 {
count++
}
p = v
}
return count
}
// Minmax provides the minimum and maximum values for a ValueSlice structure.
func (values ValueSlice) Minmax() (min float64, max float64, err error) {
length := values.Len()
min = values.Get(0)
max = values.Get(0)
for i := 0; i < length; i++ {
xi := values.Get(i)
if math.IsNaN(xi) {
err = errors.New("NaN value")
} else if xi < min {
min = xi
} else if xi > max {
max = xi
}
}
return
}
// Median finds the median value from a ValueSlice.
func (values ValueSlice) Median() (median float64) {
length := values.Len()
leftBoundary := (length - 1) / 2
rightBoundary := length / 2
if length == 0 {
return 0.0
}
if leftBoundary == rightBoundary {
median = values.Get(leftBoundary)
} else {
median = (values.Get(leftBoundary) + values.Get(rightBoundary)) / 2.0
}
return
}
// Mean finds the mean value from a ValueSlice.
func (values ValueSlice) Mean() (mean float64) {
length := values.Len()
for i := 0; i < length; i++ {
mean += (values.Get(i) - mean) / float64(i+1)
}
return
}
// Quantile finds a specified quantile value from a ValueSlice.
func (values ValueSlice) Quantile(quantile float64) float64 {
length := values.Len()
index := quantile * float64(length-1)
boundary := int(index)
delta := index - float64(boundary)
if length == 0 {
return 0.0
} else if boundary == length-1 {
return values.Get(boundary)
} else {
return (1-delta)*values.Get(boundary) + delta*values.Get(boundary+1)
}
}
// Sum finds the total of all values.
func (values ValueSlice) Sum() float64 {
sum := float64(0)
length := values.Len()
for i := 0; i < length; i++ {
sum += values.Get(i)
}
return sum
} | statsgod/statistics.go | 0.789112 | 0.654122 | statistics.go | starcoder |
package math
// Represents the specific planes/sides of the frustum.
const (
NEAR byte = iota
FAR byte = iota
TOP byte = iota
RIGHT byte = iota
BOTTOM byte = iota
LEFT byte = iota
SIDE_TOTAL byte = iota
)
// Represents the possible intersection attributes of a sphere or other solid object.
const (
OUTSIDE int = iota
INSIDE int = iota
INTERSECT int = iota
)
// Frustum, as defined by wikipedia, (plural: frusta or frustums) is the portion of a solid (normally a cone or pyramid) that lies between two parallel planes cutting it.
// In this case it represents a section of a pyramid lying between two parallel planes.
// It can also be visualized as a cube that has been applied a perspective projection.
type Frustum struct {
fov, aspect, nearDist, farDist float32
lookAt, perspMat Mat4x4
sides [SIDE_TOTAL]Plane
}
// MakeFrustum returns a pointer to a Frustum.
func MakeFrustum(nearDist, farDist, fov, aspect float32) *Frustum {
var frust Frustum
frust.fov = fov
frust.aspect = aspect
frust.nearDist = nearDist
frust.farDist = farDist
frust.lookAt = Mat4x4{}
frust.lookAt.MakeIdentity()
frust.perspMat = MakePerspectiveMatrix(nearDist, farDist, fov, aspect)
frust.init()
return &frust
}
// init reinitializes the planes of the frustum to match changed lookat or projection matrices.
func (frust *Frustum) init() {
worldToClip := Mult4m4m(frust.perspMat, frust.lookAt)
for i := 0; i < 4; i++ {
frust.sides[LEFT][i] = worldToClip[12+i] + worldToClip[i]
frust.sides[RIGHT][i] = worldToClip[12+i] - worldToClip[i]
frust.sides[BOTTOM][i] = worldToClip[12+i] + worldToClip[4+i]
frust.sides[TOP][i] = worldToClip[12+i] - worldToClip[4+i]
frust.sides[NEAR][i] = worldToClip[12+i] + worldToClip[8+i]
frust.sides[FAR][i] = worldToClip[12+i] - worldToClip[8+i]
}
for i := range frust.sides {
frust.sides[i].Normalize()
}
}
// LookAt will orient the frustum's planes to accomodate a change in the target to face, location of the frustum in world space, or the direction of up in world space.
// This function takes parameters of the target location for the frustum to face in world space, the location of the frustum in world space, and a vector that represents the direction of up in the world space.
func (frust *Frustum) LookAt(target, eye, up Vec3) {
u := Normalize3v(up)
f := Normalize3v(Sub3v3v(target, eye))
s := Normalize3v(Cross3v3v(f, u))
u = Cross3v3v(s, f)
frust.lookAt = Mat4x4{s[0], s[1], s[2], -Dot3v3v(s, eye),
u[0], u[1], u[2], -Dot3v3v(u, eye),
-f[0], -f[1], -f[2], Dot3v3v(f, eye),
0, 0, 0, 1}
frust.init()
}
// ContainsPoint returns true if a point lies within the bounds of the frustum.
// This is used for culling points for rendering.
func (frust *Frustum) ContainsPoint(vec Vec3) bool {
worldToClip := Mult4m4m(frust.lookAt, frust.perspMat)
vec = Mult4m3v(worldToClip, vec)
lookAt := Mult4m3v(frust.lookAt, Vec3{})
if Dist3v3v(lookAt, vec) > frust.farDist {
return false
}
for i := range frust.sides {
if frust.sides[i].IsInside(vec) != true {
return false
}
}
return true
}
// ContainsSphere returns one state of OUTSIDE, INTERSECT, or INSIDE that represents how a sphere interacts with the frustum.
// If no points within the sphere lie within the frustum, then the function returns OUTSIDE.
// If any point within the sphere is inside the frustum, and any one other point within the sphere is outside the frustum, then the function returns INTERSECT.
// If none of the previous conditions are met, then the sphere lies fully within the frustum, and the function returns INSIDE.
func (frust *Frustum) ContainsSphere(sp Sphere) int {
for i := range frust.sides {
distance := frust.sides[i].Distance(sp.Center)
if distance < -sp.Radius {
return OUTSIDE
}
if distance < sp.Radius {
return INTERSECT
}
}
return INSIDE
}
// LookAtMatrix returns the lookAt matrix of the frustum.
func (frust *Frustum) LookAtMatrix() Mat4x4 {
return frust.lookAt
}
// Projection returns the projection matrix of the frustum.
func (frust *Frustum) Projection() Mat4x4 {
return frust.perspMat
} | math/frustum.go | 0.839405 | 0.589775 | frustum.go | starcoder |
package main
import (
"errors"
"math"
"time"
)
const (
// Gravitational constant
G float64 = 6.67430e-11
// Mass of Earth
M_E float64 = 5.97219e+24
// Mean radius of Earth
R_E float64 = 6.371008e+6
)
// Error returned if eccentric anomaly solution does not converge after 100 iterations.
var errNoConvergence = errors.New("Kepler's equation solution failed to converge.")
/* Calculates orbital period from mean motion. */
func Period(mnm float64) float64 {
return 86400 / mnm
}
/* Calculates semi-major axis from orbital period and dominant body mass. */
func SemiMajorAxis(t, dominantMass float64) float64 {
return math.Cbrt((G * dominantMass * math.Pow(t, 2)) / (4 * math.Pow(math.Pi, 2)))
}
/* Returns semi-minor axis given semi-major-axis and orbital eccentricity. */
func SemiMinorAxis(sma, ecc float64) float64 {
return sma * math.Sqrt(1 - math.Pow(ecc, 2))
}
/* Converts argument of periapsis to longitude of periapsis given longitude of ascending node. */
func LongitudeOfPeriapsis(lan, agp float64) float64 {
return math.Mod(lan + agp, 360)
}
/* Returns the radius of apoapsis calculated from semi-major axis and eccentricity. */
func ApoapsisRadius(sma, ecc float64) float64 {
return sma * (1 + ecc)
}
/* Returns the radius of periapsis calculated from semi-major axis and eccentricity. */
func PeriapsisRadius(sma, ecc float64) float64 {
return sma * (1 - ecc)
}
/* Calculates time to periapsis from mean anomaly and orbital period. */
func TimeToPeriapsis(mna, t float64) float64 {
return t - (mna / sweepRate(t))
}
/* Calculates time to apoapsis from mean anomaly, orbital period and time to periapsis. */
func TimeToApoapsis(mna, t, pet float64) float64 {
if mna <= 180 {
return pet - t / 2
}
return pet + t / 2
}
/* Calculates mean longitude given mean anomaly and longitude of periapsis. */
func MeanLongitude(mna, lpe float64) float64 {
return math.Mod(lpe + mna, 360)
}
/* Eccentric anomaly solution adapted from:
*
* <NAME>. 2006. A Practical Method for Solving the Kepler Equation.
* For the full source description, see the top of the file or README.
*
* The function returns an error if the solution is not within a tolerance limits after
* 100 iterations.
*
* Accepts orbital eccentricity and mean anomaly as arguments.
*/
func EccentricAnomaly(ecc, mna float64) (float64, error) {
const (
maxIter int = 100
tolerance float64 = 1.0e-14
)
mnaNorm := math.Mod(Rad(mna), 2 * math.Pi)
// Starting value of the eccentric anomaly
eca0 := keplerStart(ecc, mnaNorm)
// The difference in eccentric anomaly value between iterations
dE := tolerance + 1
var eca float64
for i := 0; dE > tolerance; i++ {
if i >= maxIter {
return Deg(eca), errNoConvergence
}
eca = eca0 - eps(ecc, mnaNorm, eca0)
dE = math.Abs(eca - eca0)
eca0 = eca
}
return Deg(eca), nil
}
/* Calculates the starting value for eccentric anomaly solution (Murison, 2006).
* Intakes orbital eccentricity and mean anomaly [rad].
*/
func keplerStart(ecc, mna float64) float64 {
t34 := math.Pow(ecc, 2)
t35 := ecc * t34
t33 := math.Cos(mna)
return mna + (-0.5 * t35 + ecc + (t34 + 1.5 * t33 * t35) * t33) * math.Sin(mna)
}
/* Iteration function for the eccentric anomaly solution (Murison, 2006). Accepts orbital
* eccentricity, mean anomaly and the result of last iteration or the starting value.
* Angles are expressed in radians.
*/
func eps(ecc, mna, x float64) float64 {
t1 := math.Cos(x)
t2 := -1 + ecc * t1
t3 := math.Sin(x)
t4 := ecc * t3
t5 := -x + t4 + mna
t6 := t5 / (0.5 * t5 * t4 / t2 + t2)
return t5 / ((0.5 * t3 - t1 * t6 / 6) * ecc * t6 + t2)
}
/* Calculates true anomaly from orbital eccentricity and eccentric anomaly. */
func TrueAnomaly(ecc, eca float64) float64 {
ecaRad := Rad(eca)
return 2 * Deg(math.Atan2(math.Sqrt(1 + ecc) * math.Sin(ecaRad / 2), math.Sqrt(1 - ecc) * math.Cos(ecaRad / 2)))
}
/* Returns orbital radius given semi-major axis, orbital eccentricity and true anomaly. */
func OrbitalRadius(sma, ecc, tra float64) float64 {
return sma * ((1 - math.Pow(ecc, 2)) / (1 + ecc * math.Cos(Rad(tra))))
}
/* Calculates orbital velocity from semi-major axis, orbital radius and dominant body mass. */
func OrbitalVelocity(r, sma, dominantMass float64) float64 {
return math.Sqrt(G * dominantMass * (2 / r - 1 / sma))
}
/* Calculates true longitude from true anomaly and longitude of periapsis. */
func TrueLongitude(tra, lpe float64) float64 {
return math.Mod(tra + lpe, 360)
}
/* Converts epoch year and fraction of a day extracted from TLE to unix time in seconds. */
func EpochToUnix(epochYear int, epochDay float64) int64 {
return time.Date(epochYear, time.January, 0, 0, 0, 0, 0, time.UTC).Unix() + int64(86400 * epochDay)
}
/* Calculates the average rate of sweep from orbital period [deg/sec]. */
func sweepRate(t float64) float64 {
return 360 / t
} | src/calc.go | 0.861101 | 0.640327 | calc.go | starcoder |
package validator
import (
"context"
"strconv"
"github.com/mitchellh/mapstructure"
"github.com/alibaba/ilogtail/pkg/doc"
"github.com/alibaba/ilogtail/pkg/logger"
"github.com/alibaba/ilogtail/pkg/protocol"
)
const counterSysValidatorName = "sys_counter"
type counterSystemValidator struct {
ExpectReceivedMinimumLogNum int `mapstructure:"expect_received_minimum_log_num" comment:"the expected minimum received log number"`
ExpectReceivedMinimumLogGroupNum int `mapstructure:"expect_received_minimum_log_group_num" comment:"the expected minimum received log group number"`
ExpectMinimumRawLogNum int `mapstructure:"expect_minimum_raw_log_num" comment:"the expected minimum raw log number"`
ExpectMinimumProcessedLogNum int `mapstructure:"expect_minimum_processed_log_num" comment:"the expected minimum processed log number"`
ExpectMinimumFlushLogNum int `mapstructure:"expect_minimum_flush_log_num" comment:"the expected minimum flushed log number"`
ExpectMinimumFlushLogGroupNum int `mapstructure:"expect_minimum_flush_log_group_num" comment:"the expected minimum flushed log group number"`
ExpectEqualRawLog bool `mapstructure:"expect_equal_raw_log" comment:"whether the received log number equal to the raw log number"`
ExpectEqualProcessedLog bool `mapstructure:"expect_equal_processed_log" comment:"whether the received log number equal to the processed log number"`
ExpectEqualFlushLog bool `mapstructure:"expect_equal_flush_log" comment:"whether the received log number equal to the flush log number"`
groupCounter int
logsCounter int
}
func (c *counterSystemValidator) Description() string {
return "this is a log field validator to check received log from subscriber"
}
func (c *counterSystemValidator) Name() string {
return counterSysValidatorName
}
func (c *counterSystemValidator) Valid(group *protocol.LogGroup) {
c.groupCounter++
c.logsCounter += len(group.Logs)
}
func (c *counterSystemValidator) Start() error {
return nil
}
func (c *counterSystemValidator) FetchResult() (res []*Report) {
if c.ExpectReceivedMinimumLogGroupNum > c.groupCounter {
logger.Debugf(context.Background(), "want log group number over %d, bug got %d", c.ExpectReceivedMinimumLogGroupNum, c.groupCounter)
res = append(res, &Report{Validator: counterSysValidatorName, Name: "loggroup_minimum_number", Want: strconv.Itoa(c.ExpectReceivedMinimumLogGroupNum), Got: strconv.Itoa(c.groupCounter)})
}
if c.ExpectReceivedMinimumLogNum > c.logsCounter {
logger.Debugf(context.Background(), "want log number over %d, bug got %d", c.ExpectReceivedMinimumLogNum, c.logsCounter)
res = append(res, &Report{Validator: counterSysValidatorName, Name: "log_minimum_number", Want: strconv.Itoa(c.ExpectReceivedMinimumLogNum), Got: strconv.Itoa(c.logsCounter)})
}
if c.ExpectMinimumRawLogNum > RawLogCounter {
logger.Debugf(context.Background(), "want raw log number over %d, bug got %d", c.ExpectMinimumRawLogNum, RawLogCounter)
res = append(res, &Report{Validator: counterSysValidatorName, Name: "raw_log_minimum_number", Want: strconv.Itoa(c.ExpectMinimumRawLogNum), Got: strconv.Itoa(RawLogCounter)})
}
if c.ExpectMinimumProcessedLogNum > ProcessedLogCounter {
logger.Debugf(context.Background(), "want processed log number over %d, bug got %d", c.ExpectMinimumProcessedLogNum, ProcessedLogCounter)
res = append(res, &Report{Validator: counterSysValidatorName, Name: "processed_log_minimum_number", Want: strconv.Itoa(c.ExpectMinimumProcessedLogNum), Got: strconv.Itoa(ProcessedLogCounter)})
}
if c.ExpectMinimumFlushLogNum > FlushLogCounter {
logger.Debugf(context.Background(), "want flushed log number over %d, bug got %d", c.ExpectMinimumFlushLogNum, FlushLogCounter)
res = append(res, &Report{Validator: counterSysValidatorName, Name: "flushed_log_minimum_number", Want: strconv.Itoa(c.ExpectMinimumFlushLogNum), Got: strconv.Itoa(FlushLogCounter)})
}
if c.ExpectMinimumFlushLogGroupNum > FlushLogGroupCounter {
logger.Debugf(context.Background(), "want flushed log group number over %d, bug got %d", c.ExpectMinimumFlushLogGroupNum, FlushLogGroupCounter)
res = append(res, &Report{Validator: counterSysValidatorName, Name: "flushed_log_group_minimum_number", Want: strconv.Itoa(c.ExpectMinimumFlushLogGroupNum), Got: strconv.Itoa(FlushLogGroupCounter)})
}
if c.ExpectEqualRawLog && c.logsCounter != RawLogCounter {
logger.Debugf(context.Background(), "want got raw log number %d, bug got %d", RawLogCounter, c.logsCounter)
res = append(res, &Report{Validator: counterSysValidatorName, Name: "equal_raw_log", Want: strconv.Itoa(RawLogCounter), Got: strconv.Itoa(c.logsCounter)})
}
if c.ExpectEqualProcessedLog && c.logsCounter != ProcessedLogCounter {
logger.Debugf(context.Background(), "want got processed log number %d, bug got %d", ProcessedLogCounter, c.logsCounter)
res = append(res, &Report{Validator: counterSysValidatorName, Name: "equal_processed_log", Want: strconv.Itoa(ProcessedLogCounter), Got: strconv.Itoa(c.logsCounter)})
}
if c.ExpectEqualFlushLog && c.logsCounter != FlushLogCounter {
logger.Debugf(context.Background(), "want got flush log number %d, bug got %d", FlushLogCounter, c.logsCounter)
res = append(res, &Report{Validator: counterSysValidatorName, Name: "equal_flush_log", Want: strconv.Itoa(FlushLogCounter), Got: strconv.Itoa(c.logsCounter)})
}
return
}
func init() {
RegisterSystemValidatorCreator(counterSysValidatorName, func(spec map[string]interface{}) (SystemValidator, error) {
f := new(counterSystemValidator)
err := mapstructure.Decode(spec, f)
if err != nil {
return nil, err
}
return f, nil
})
doc.Register("sys_validator", counterSysValidatorName, new(counterSystemValidator))
} | test/engine/validator/sys_counter.go | 0.567697 | 0.450118 | sys_counter.go | starcoder |
package flagger
import (
"fmt"
"math"
//"strings"
)
func BuildDecTree( leafCount, spareLevels uint32,
traits *TraitMatrix,
tm TraitMap,
splitPercTolerance float32) DecTree {
dt := GetEmptyDecTree(leafCount, spareLevels)
tags := make([]string, len(tm))
for tag, n := range tm {
tags[n.Id] = tag
}
calculateNode( 1, 1, leafCount, GetOnes256(leafCount), traits, &dt, tags )
return dt
}
func calculateNode( level, index, count uint32, mask Bits256,
traits *TraitMatrix, dt *DecTree, tags []string ) {
//fmt.Println("LEVEL:", level, "NODE:", index, "COUNT:", count)
if count == 1 {
createLeaf(index, mask, dt, tags)
return
}
if level == dt.Levels {
panic(fmt.Errorf("Reached the bottom"))
}
newMask, traitId, newCount, quality := findBestStdFit(count, mask, traits)
if quality > ((float64(count) / 2) - 0.25) {
createDeadlock(index, count, mask, dt, tags)
return
}
createStandard(index, traitId, mask, dt)
calculateNode(level + 1, index << 1, newCount, newMask, traits, dt, tags)
calculateNode(level + 1, (index << 1) + 1, count - newCount,
And(Not(newMask), mask), traits, dt, tags)
}
func findBestStdFit(count uint32,
mask Bits256,
traits *TraitMatrix ) ( Bits256, uint32, uint32, float64 ) {
var bestMask Bits256
var bestIndex, bestCount uint32
bestQuality := float64(count)
for i := uint32(0); i < 64; i++ {
currMask := And(GetBits256FromTMX(traits, i), mask)
currCount := currMask.Count()
currQuality := math.Abs((float64(count) / 2) - float64(currCount))
if (currQuality < bestQuality) {
bestQuality = currQuality
bestIndex = i
bestCount = currCount
bestMask = currMask
}
if (bestQuality < 0.75) {
return bestMask, bestIndex, bestCount, bestQuality
}
}
return bestMask, bestIndex, bestCount, bestQuality
}
func createLeaf( index uint32, mask Bits256, dt *DecTree, tags []string ) {
tag := tags[mask.OnlyIndex()]
rule := "F_" + tag
dt.Nodes[index] = DTNode{ rule, mask }
}
func createDeadlock( index, count uint32, mask Bits256, dt *DecTree,
tags []string ) {
dlTags := []string{}
for _, cid := range mask.AllIndices() {
dlTags = append(dlTags, tags[cid])
}
//rule := fmt.Sprintf("DEADLOCK(%s)", strings.Join(dlTags, ";"))
//dt.Nodes[index] = DTNode{ rule, mask }
// fmt.Println("------------------->", rule)
solveDeadlock( index, dlTags, tags, dt)
}
func createStandard( index, traitId uint32, mask Bits256, dt *DecTree ) {
dt.Nodes[index] = DTNode{ fmt.Sprintf("S_%02d", traitId), mask }
}
func createTiebreaker( index, tbId uint32, mask Bits256, dt *DecTree ) {
dt.Nodes[index] = DTNode{ fmt.Sprintf("T_%02d", tbId), mask }
} | pkg/flagger/dtbuilder.go | 0.629205 | 0.42471 | dtbuilder.go | starcoder |
package goop2
// Var represnts a variable in a optimization problem. The variable is
// identified with an uint64.
type Var struct {
ID uint64
Lower float64
Upper float64
Vtype VarType
}
// NumVars returns the number of variables in the expression. For a variable, it
// always returns one.
func (v *Var) NumVars() int {
return 1
}
// Vars returns a slice of the Var ids in the expression. For a variable, it
// always returns a singleton slice with the given variable ID.
func (v *Var) Vars() []uint64 {
return []uint64{v.ID}
}
// Coeffs returns a slice of the coefficients in the expression. For a variable,
// it always returns a singleton slice containing the value one.
func (v *Var) Coeffs() []float64 {
return []float64{1}
}
// Constant returns the constant additive value in the expression. For a
// variable, it always returns zero.
func (v *Var) Constant() float64 {
return 0
}
// Plus adds the current expression to another and returns the resulting
// expression.
func (v *Var) Plus(e Expr) Expr {
vars := append([]uint64{v.ID}, e.Vars()...)
coeffs := append([]float64{1}, e.Coeffs()...)
newExpr := &LinearExpr{
variables: vars,
coefficients: coeffs,
constant: e.Constant(),
}
return newExpr
}
// Mult multiplies the current expression to another and returns the
// resulting expression
func (v *Var) Mult(m float64) Expr {
// Constants
// switch m.(type) {
// case float64:
vars := []uint64{v.ID}
coeffs := []float64{m * v.Coeffs()[0]}
// Algorithm
newExpr := &LinearExpr{
variables: vars,
coefficients: coeffs,
constant: 0,
}
return newExpr
// case *Var:
// return nil
// }
}
// LessEq returns a less than or equal to (<=) constraint between the
// current expression and another
func (v *Var) LessEq(other Expr) *Constr {
return LessEq(v, other)
}
// GreaterEq returns a greater than or equal to (>=) constraint between the
// current expression and another
func (v *Var) GreaterEq(other Expr) *Constr {
return GreaterEq(v, other)
}
// Eq returns an equality (==) constraint between the current expression
// and another
func (v *Var) Eq(other Expr) *Constr {
return Eq(v, other)
}
/*
// ID returns the ID of the variable
func (v *Var) ID() uint64 {
return v.ID
}
// Lower returns the lower value limit of the variable
func (v *Var) Lower() float64 {
return v.Lower
}
// Upper returns the upper value limit of the variable
func (v *Var) Upper() float64 {
return v.Upper
}
// Type returns the type of variable (continuous, binary, integer, etc)
func (v *Var) Type() VarType {
return v.Vtype
}
*/
// VarType represents the type of the variable (continuous, binary,
// integer, etc) and uses Gurobi's encoding.
type VarType byte
// Multiple common variable types have been included as constants that conform
// to Gurobi's encoding.
const (
Continuous VarType = 'C'
Binary = 'B'
Integer = 'I'
) | vars.go | 0.851243 | 0.669387 | vars.go | starcoder |
package problem6
/**
* Sum square difference
*
* https://projecteuler.net/problem=6
* The sum of the squares of the first ten natural numbers is,
* 1^2 + 2^2 + ... + 10^2 = 385
* The square of the sum of the first ten natural numbers is,
* (1 + 2 + ... + 10)^2 = 55^2 = 3025
* Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025 − 385 = 2640.
* Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum.
*
* http://odz.sakura.ne.jp/projecteuler/index.php?cmd=read&page=Problem%206
* 最初の10個の自然数について, その二乗の和は,
* 1^2 + 2^2 + ... + 10^2 = 385
* 最初の10個の自然数について, その和の二乗は,
* (1 + 2 + ... + 10)^2 = 55^2 = 3025
* これらの数の差は 3025 - 385 = 2640 となる.
* 同様にして, 最初の100個の自然数について二乗の和と和の二乗の差を求めよ.
*
* Contents of Project Euler are licenced under a Creative Commons Licence: Attribution-NonCommercial-ShareAlike 2.0 UK: England & Wales.
* http://creativecommons.org/licenses/by-nc-sa/2.0/uk/
*/
//Answer0 returns answer to this problem
func Answer0(max int64) int64 {
sum1 := int64(0)
sum2 := int64(0)
for n := int64(1); n <= max; n++ {
sum1 += n * n
sum2 += n
}
sum2 *= sum2
return sum2 - sum1
}
//Answer1 returns answer to this problem (refactoring version)
func Answer1(max int64) int64 {
sum1 := (2*max + 1) * (max + 1) * max / 6 // 1^2 + 2^2 + ... + max^2
sum2 := max * (max + 1) / 2 // (1 + 2 + ... + max)^2 = (max(max+1)/2)^2
sum2 *= sum2
return sum2 - sum1
}
/* Copyright 2018 Spiegel
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ | problem-6/answer.go | 0.833257 | 0.74519 | answer.go | starcoder |
package query
import (
"fmt"
"strings"
"github.com/pkg/errors"
"github.com/spoke-d/thermionic/internal/db/database"
)
// SelectStrings executes a statement which must yield rows with a single string
// column. It returns the list of column values.
func SelectStrings(tx database.Tx, query string, args ...interface{}) ([]string, error) {
var values []string
scan := func(rows database.Rows) error {
var value string
if err := rows.Scan(&value); err != nil {
return errors.WithStack(err)
}
values = append(values, value)
return nil
}
err := scanSingleColumn(tx, query, args, "TEXT", scan)
return values, errors.WithStack(err)
}
// SelectIntegers executes a statement which must yield rows with a single integer
// column. It returns the list of column values.
func SelectIntegers(tx database.Tx, query string, args ...interface{}) ([]int, error) {
var values []int
scan := func(rows database.Rows) error {
var value int
if err := rows.Scan(&value); err != nil {
return errors.WithStack(err)
}
values = append(values, value)
return nil
}
err := scanSingleColumn(tx, query, args, "INTEGER", scan)
return values, errors.WithStack(err)
}
// InsertStrings inserts a new row for each of the given strings, using the
// given insert statement template, which must define exactly one insertion
// column and one substitution placeholder for the values. For example:
// InsertStrings(tx, "INSERT INTO foo(name) VALUES %s", []string{"bar"}).
func InsertStrings(tx database.Tx, stmt string, values []string) error {
n := len(values)
if n == 0 {
return nil
}
params := make([]string, n)
args := make([]interface{}, n)
for i, value := range values {
params[i] = "(?)"
args[i] = value
}
stmt = fmt.Sprintf(stmt, strings.Join(params, ", "))
_, err := tx.Exec(stmt, args...)
return errors.WithStack(err)
}
// Function to scan a single row.
type scanFunc func(database.Rows) error
// Execute the given query and ensure that it yields rows with a single column
// of the given database type. For every row yielded, execute the given
// scanner.
func scanSingleColumn(tx database.Tx, query string, args []interface{}, typeName string, scan scanFunc) error {
rows, err := tx.Query(query, args...)
if err != nil {
return errors.WithStack(err)
}
defer rows.Close()
if err := checkRowsHaveOneColumnOfSpecificType(rows, typeName); err != nil {
return errors.WithStack(err)
}
for rows.Next() {
if err := scan(rows); err != nil {
return errors.WithStack(err)
}
}
err = rows.Err()
return errors.WithStack(err)
}
// Check that the given result set yields rows with a single column of a
// specific type.
func checkRowsHaveOneColumnOfSpecificType(rows database.Rows, typeName string) error {
types, err := rows.ColumnTypes()
if err != nil {
return errors.WithStack(err)
}
if len(types) != 1 {
return errors.Errorf("query yields %d columns, not 1", len(types))
}
actualTypeName := strings.ToUpper(types[0].DatabaseTypeName())
// If the actualTypeName is empty, there is nothing we can check against,
// so in that instance we should just return as valid.
if actualTypeName == "" {
return nil
}
if actualTypeName != typeName {
return errors.Errorf("query yields %q column, not %q", actualTypeName, typeName)
}
return nil
} | internal/db/query/slices.go | 0.730578 | 0.466056 | slices.go | starcoder |
package main
import (
"fmt"
)
// Cell is a "one" in our sparse matrix, it's an element of a 2D doubly linked list
type Cell struct {
Left, Right, Up, Down *Cell
Row, Column *Cell
data interface{}
}
func (cell Cell) String() string {
return fmt.Sprintf("%v", cell.data)
}
// Matrix defines a sparse matrix, it's the root element of the doubly linked list
// Constraints are columns in our matrix
// Choices are the rows of our matrix
type Matrix struct {
root *Cell
}
func newMatrix() *Matrix {
matrix := Matrix{}
//initially the matrix is completely empty
matrix.root = new(Cell)
matrix.root.Left = matrix.root
matrix.root.Right = matrix.root
matrix.root.Up = matrix.root
matrix.root.Down = matrix.root
matrix.root.data = "root"
return &matrix
}
func (matrix Matrix) createConstraint(data interface{}) *Cell {
constraint := Cell{}
constraint.data = data
// initially no choice cover this constraint
constraint.Up = &constraint
constraint.Down = &constraint
constraint.Column = &constraint
// add the constraint to the matrix as the last column
constraint.Left = matrix.root.Left
constraint.Right = matrix.root
constraint.Row = matrix.root
//TODO: should be constraint.restore
constraint.Left.Right = &constraint
constraint.Right.Left = &constraint
return &constraint
}
func (matrix Matrix) createChoice(data interface{}, constraints []*Cell) *Cell {
choice := Cell{}
choice.data = data
// initially this choice doesn't cover any constraint
choice.Left = &choice
choice.Right = &choice
choice.Row = &choice
// add the choice to the matrix as the last row
choice.Up = matrix.root.Up
choice.Down = matrix.root
choice.Column = matrix.root
//TODO: should be row.restore
choice.Up.Down = &choice
choice.Down.Up = &choice
for _, constraint := range constraints {
cell := Cell{}
// add it to the choice
cell.Left = choice.Left
cell.Right = choice.Left.Right
cell.Left.Right = &cell
cell.Right.Left = &cell
cell.Row = choice.Left.Right
// add it to the constraint
cell.Up = constraint.Up
cell.Down = constraint.Up.Down
cell.Up.Down = &cell
cell.Down.Up = &cell
cell.Column = constraint
cell.data = fmt.Sprintf("%v: %v", cell.Row.data, cell.Column.data)
}
return &choice
}
func (matrix Matrix) coverColumn(column *Cell) {
column = column.Column
column.Left.Right = column.Right
column.Right.Left = column.Left
for row := column.Down; row != column; row = row.Down {
for cell := row.Right; cell != row; cell = cell.Right {
cell.Up.Down = cell.Down
cell.Down.Up = cell.Up
}
}
}
func (matrix Matrix) uncoverColumn(column *Cell) {
column = column.Column
for row := column.Up; row != column; row = row.Up {
for cell := row.Left; cell != row; cell = cell.Left {
cell.Up.Down = cell
cell.Down.Up = cell
}
}
column.Left.Right = column
column.Right.Left = column
}
func (matrix Matrix) solve() (bool, []*Cell) {
// TODO: use an euristic to take the constraint satisfied by fewer choices
// choose an unsolved constraint: aka a column
constraint := matrix.root.Right
// no more constraint, we are done!
if constraint == constraint.Right {
return true, make([]*Cell, 0)
}
// cover column
matrix.coverColumn(constraint)
for row := constraint.Down; row != constraint; row = row.Down {
rowHead := row.Row
for cell := rowHead.Right; cell != rowHead; cell = cell.Right {
matrix.coverColumn(cell)
}
// solve recursively
found, partial := matrix.solve()
if found {
partial = append(partial, rowHead)
return true, partial
}
for cell := rowHead.Left; cell != rowHead; cell = cell.Left {
matrix.uncoverColumn(cell)
}
}
matrix.uncoverColumn(constraint)
return false, make([]*Cell, 0)
}
func main() {
mat := newMatrix()
/* sample from wikipedia
A = {1, 4, 7}
B = {1, 4}
C = {4, 5, 7}
D = {3, 5, 6}
E = {2, 3, 6, 7}
F = {2, 7}
*/
ctr := make([]*Cell, 7)
for i := 0; i < 7; i++ {
ctr[i] = mat.createConstraint(i + 1)
}
mat.createChoice("A", []*Cell{ctr[0], ctr[3], ctr[6]})
mat.createChoice("B", []*Cell{ctr[0], ctr[3]})
mat.createChoice("C", []*Cell{ctr[3], ctr[4], ctr[6]})
mat.createChoice("D", []*Cell{ctr[2], ctr[4], ctr[5]})
mat.createChoice("E", []*Cell{ctr[1], ctr[2], ctr[5], ctr[6]})
mat.createChoice("F", []*Cell{ctr[1], ctr[6]})
found, result := mat.solve()
fmt.Println(found, result)
} | main.go | 0.587707 | 0.676889 | main.go | starcoder |
package proto
import (
"math"
"github.com/go-faster/errors"
)
// Compile-time assertions for ColLowCardinalityOf.
var (
_ ColInput = ColLowCardinalityOf[string]{}
_ ColResult = (*ColLowCardinalityOf[string])(nil)
_ Column = (*ColLowCardinalityOf[string])(nil)
)
// DecodeState implements StateDecoder, ensuring state for index column.
func (c *ColLowCardinalityOf[T]) DecodeState(r *Reader) error {
keySerialization, err := r.Int64()
if err != nil {
return errors.Wrap(err, "version")
}
if keySerialization != int64(sharedDictionariesWithAdditionalKeys) {
return errors.Errorf("got version %d, expected %d",
keySerialization, sharedDictionariesWithAdditionalKeys,
)
}
if s, ok := c.index.(StateDecoder); ok {
if err := s.DecodeState(r); err != nil {
return errors.Wrap(err, "index state")
}
}
return nil
}
// EncodeState implements StateEncoder, ensuring state for index column.
func (c ColLowCardinalityOf[T]) EncodeState(b *Buffer) {
// Writing key serialization version.
b.PutInt64(int64(sharedDictionariesWithAdditionalKeys))
if s, ok := c.index.(StateEncoder); ok {
s.EncodeState(b)
}
}
// ColLowCardinalityOf is generic LowCardinality(T) column.
type ColLowCardinalityOf[T comparable] struct {
Values []T
index ColumnOf[T]
key CardinalityKey
// Keeping all key column variants as fields to reuse
// memory more efficiently.
// Values[T], kv and keys columns adds memory overhead, but simplifies
// implementation.
// TODO(ernado): revisit tradeoffs
keys8 ColUInt8
keys16 ColUInt16
keys32 ColUInt32
keys64 ColUInt64
kv map[T]int
keys []int
}
func (c *ColLowCardinalityOf[T]) DecodeColumn(r *Reader, rows int) error {
if rows == 0 {
// Skipping entirely of no rows.
return nil
}
meta, err := r.Int64()
if err != nil {
return errors.Wrap(err, "meta")
}
if (meta & cardinalityNeedGlobalDictionaryBit) == 1 {
return errors.New("global dictionary is not supported")
}
if (meta & cardinalityHasAdditionalKeysBit) == 0 {
return errors.New("additional keys bit is missing")
}
key := CardinalityKey(meta & cardinalityKeyMask)
if !key.IsACardinalityKey() {
return errors.Errorf("invalid low cardinality keys type %d", key)
}
c.key = key
indexRows, err := r.Int64()
if err != nil {
return errors.Wrap(err, "index size")
}
if err := checkRows(int(indexRows)); err != nil {
return errors.Wrap(err, "index size")
}
if err := c.index.DecodeColumn(r, int(indexRows)); err != nil {
return errors.Wrap(err, "index column")
}
keyRows, err := r.Int64()
if err != nil {
return errors.Wrap(err, "keys size")
}
if err := checkRows(int(keyRows)); err != nil {
return errors.Wrap(err, "index size")
}
switch c.key {
case KeyUInt8:
if err := c.keys8.DecodeColumn(r, rows); err != nil {
return errors.Wrap(err, "keys")
}
c.keys = fillValues(c.keys, c.keys8)
case KeyUInt16:
if err := c.keys16.DecodeColumn(r, rows); err != nil {
return errors.Wrap(err, "keys")
}
c.keys = fillValues(c.keys, c.keys16)
case KeyUInt32:
if err := c.keys32.DecodeColumn(r, rows); err != nil {
return errors.Wrap(err, "keys")
}
c.keys = fillValues(c.keys, c.keys32)
case KeyUInt64:
if err := c.keys64.DecodeColumn(r, rows); err != nil {
return errors.Wrap(err, "keys")
}
c.keys = fillValues(c.keys, c.keys64)
default:
return errors.Errorf("invalid key format %s", c.key)
}
c.Values = c.Values[:0]
for _, idx := range c.keys {
c.Values = append(c.Values, c.index.Row(idx))
}
return nil
}
func (c ColLowCardinalityOf[T]) Type() ColumnType {
return ColumnTypeLowCardinality.Sub(c.index.Type())
}
func (c ColLowCardinalityOf[T]) EncodeColumn(b *Buffer) {
if c.Rows() == 0 {
// Skipping encoding entirely.
return
}
// Meta encodes whether reader should update
// low cardinality metadata and keys column type.
meta := cardinalityUpdateAll | int64(c.key)
b.PutInt64(meta)
// Writing index (dictionary).
b.PutInt64(int64(c.index.Rows()))
c.index.EncodeColumn(b)
b.PutInt64(int64(c.Rows()))
switch c.key {
case KeyUInt8:
c.keys8.EncodeColumn(b)
case KeyUInt16:
c.keys16.EncodeColumn(b)
case KeyUInt32:
c.keys32.EncodeColumn(b)
case KeyUInt64:
c.keys64.EncodeColumn(b)
}
}
func (c *ColLowCardinalityOf[T]) Reset() {
for k := range c.kv {
delete(c.kv, k)
}
c.keys = c.keys[:0]
c.keys8 = c.keys8[:0]
c.keys16 = c.keys16[:0]
c.keys32 = c.keys32[:0]
c.keys64 = c.keys64[:0]
c.Values = c.Values[:0]
c.index.Reset()
}
type cardinalityKeyValue interface {
~uint8 | ~uint16 | ~uint32 | ~uint64
}
func fillKeys[K cardinalityKeyValue](values []int, keys []K) []K {
for _, v := range values {
keys = append(keys, K(v))
}
return keys
}
func fillValues[K cardinalityKeyValue](values []int, keys []K) []int {
for _, v := range keys {
values = append(values, int(v))
}
return values
}
// Append value to column.
func (c *ColLowCardinalityOf[T]) Append(v T) {
c.Values = append(c.Values, v)
}
// AppendArr appends slice to column.
func (c *ColLowCardinalityOf[T]) AppendArr(v []T) {
c.Values = append(c.Values, v...)
}
// Row returns i-th row.
func (c ColLowCardinalityOf[T]) Row(i int) T {
return c.Values[i]
}
// Rows returns rows count.
func (c ColLowCardinalityOf[T]) Rows() int {
return len(c.Values)
}
// Prepare column for ingestion.
func (c *ColLowCardinalityOf[T]) Prepare() error {
// Select minimum possible size for key.
if n := len(c.Values); n < math.MaxUint8 {
c.key = KeyUInt8
} else if n < math.MaxUint16 {
c.key = KeyUInt16
} else if uint32(n) < math.MaxUint32 {
c.key = KeyUInt32
} else {
c.key = KeyUInt64
}
// Allocate keys slice.
c.keys = append(c.keys[:0], make([]int, len(c.Values))...)
if c.kv == nil {
c.kv = map[T]int{}
}
// Fill keys with value indexes.
var last int
for i, v := range c.Values {
idx, ok := c.kv[v]
if !ok {
c.index.Append(v)
c.kv[v] = last
idx = last
last++
}
c.keys[i] = idx
}
// Fill key column with key indexes.
switch c.key {
case KeyUInt8:
c.keys8 = fillKeys(c.keys, c.keys8)
case KeyUInt16:
c.keys16 = fillKeys(c.keys, c.keys16)
case KeyUInt32:
c.keys32 = fillKeys(c.keys, c.keys32)
case KeyUInt64:
c.keys64 = fillKeys(c.keys, c.keys64)
}
return nil
}
// LowCardinalityOf creates new LowCardinality column from another column for T.
func LowCardinalityOf[T comparable](c ColumnOf[T]) *ColLowCardinalityOf[T] {
return &ColLowCardinalityOf[T]{
index: c,
}
} | proto/col_low_cardinality_of.go | 0.515864 | 0.421909 | col_low_cardinality_of.go | starcoder |
package tempAll
import (
"fmt"
"math"
"reflect"
)
import (
"github.com/tflovorn/scExplorer/bzone"
"github.com/tflovorn/scExplorer/serialize"
vec "github.com/tflovorn/scExplorer/vector"
)
// Container for variables relevant at all temperatures.
type Environment struct {
// Program parameters:
PointsPerSide int // length of one side of the lattice
// Constant physical parameters:
X float64 // average density of holons
T0 float64 // nn one-holon hopping energy
Thp float64 // direct nnn one-holon hopping energy
Tz float64 // inter-planar one-holon hopping energy
Alpha int // SC gap symmetry parameter (s-wave = +1, d-wave = -1)
Be_field float64 // magnetic field (flux density) times e along the c axis (eB is unitless)
// Dynamically determined physical parameters:
D1 float64 // nnn hopping parameter generated by two-holon hopping
Mu_h float64 // holon chemical potential
// May be constant or dynamically determined:
Beta float64 // inverse temperature
F0 float64 // superconducting order parameter (0 if T >= Tc)
Mu_b float64 // holon pair (bosonic) chemical potential (0 if T <= Tc)
A, B float64 // pair spectrum parameters
// Data for plotting
Temp float64 // 1/Beta -- to only be used for plotting; may otherwise be invalid
// Behavior flags:
// Iterate solution for Mu_b in tempFluc.SolveD1Mu_hMu_b.
IterateD1Mu_hMu_b bool
// Use kz^2 in pair spectrum - incompatible with finite magnetic field.
// If false, use cosine spectrum in tempCrit/tempFluc; cosine spectrum not implemented in tempLow.
PairKzSquaredSpectrum bool
// If true, include poles giving omega_- spectrum in the calculation.
// Has no effect if PairKzSquaredSpectrum = false (TODO - extend to cos(kz) spectrum).
// Expect negligible different between this being on or off.
OmegaMinusPoles bool
// Fix pair spectrum coefficients to their values at Tc (helps match F0 = 0 to Tc in T < Tc calculation).
FixedPairCoeffs bool
// If FixedPairCoeffs = true, stop varying pair spectrum coefficients after PairCoeffsReady is set to true.
PairCoeffsReady bool
// Cached values:
epsilonMinCache float64
lastEpsilonMinD1 float64
}
type Wrappable func(*Environment, vec.Vector) float64
// ===== Utility functions =====
// Wrap fn with a function which depends only on a vector
func WrapFunc(env *Environment, fn Wrappable) bzone.BzFunc {
return func(k vec.Vector) float64 {
return fn(env, k)
}
}
// Create an Environment from the given serialized data.
func NewEnvironment(jsonData string) (*Environment, error) {
// initialize env with input data
env := new(Environment)
err := serialize.CopyFromJSON(jsonData, env)
if err != nil {
return nil, err
}
// hack to get around JSON's lack of support for Inf
if env.Beta == math.MaxFloat64 {
env.Beta = math.Inf(1)
}
// initialize cache
env.setEpsilonMinCache()
env.lastEpsilonMinD1 = env.D1
return env, nil
}
// Convert to string by marshalling to JSON
func (env *Environment) String() string {
if env.Beta == math.Inf(1) {
// hack to get around JSON's choice to not allow Inf
env.Beta = math.MaxFloat64
}
marshalled, err := serialize.MakeJSON(env)
if err != nil {
panic(err)
}
if env.Beta == math.MaxFloat64 {
env.Beta = math.Inf(1)
}
return marshalled
}
// Create and return a copy of env
func (env *Environment) Copy() *Environment {
marshalled := env.String()
thisCopy, err := NewEnvironment(marshalled)
if err != nil {
// shouldn't get here (env should always be copyable)
panic(err)
}
return thisCopy
}
// Iterate through v and vars simultaneously. vars specifies the names of
// fields to change in env (they are set to the values given in v).
// Panics if vars specifies a field not contained in env (or a field of
// non-float type).
func (env *Environment) Set(v vec.Vector, vars []string) {
ev := reflect.ValueOf(env).Elem()
for i := 0; i < len(vars); i++ {
field := ev.FieldByName(vars[i])
if field == reflect.Zero(reflect.TypeOf(env)) {
panic(fmt.Sprintf("Field %v not present in Environment", vars[i]))
}
if field.Type().Kind() != reflect.Float64 {
panic(fmt.Sprintf("Field %v is non-float", vars[i]))
}
field.SetFloat(v[i])
}
}
// Split env into many copies with different values of the variable given by
// varName (N values running from min to max).
func (env *Environment) Split(varName string, N int, min, max float64) []*Environment {
step := (max - min) / float64(N-1)
if N == 1 {
step = 0
}
rets := make([]*Environment, N)
for i := 0; i < N; i++ {
x := min + float64(i)*step
thisCopy := env.Copy()
thisCopy.Set([]float64{x}, []string{varName})
rets[i] = thisCopy
}
return rets
}
// Call Split on each env in envs for each var in varNames to create a
// "Cartesian product" of the desired splits.
func (env *Environment) MultiSplit(varNames []string, Ns []int, mins, maxs []float64) []*Environment {
if len(varNames) == 0 {
return nil
}
oneSplit := env.Split(varNames[0], Ns[0], mins[0], maxs[0])
if len(varNames) == 1 {
return oneSplit
}
ret := make([]*Environment, 0)
for _, osEnv := range oneSplit {
ms := osEnv.MultiSplit(varNames[1:], Ns[1:], mins[1:], maxs[1:])
for _, e := range ms {
ret = append(ret, e)
}
}
return ret
}
// ===== Physics functions =====
// Scaled hopping energy
func (env *Environment) Th() float64 {
return env.T0 * (1.0 - env.X)
}
// Single-holon energy. Minimum is 0.
// env.EpsilonMin must be set to the value returned by EpsilonMin before
// calling this function.
func (env *Environment) Epsilon_h(k vec.Vector) float64 {
return env.epsilonBar(k) - env.getEpsilonMin()
}
// Single-holon energy without fixed minimum.
func (env *Environment) epsilonBar(k vec.Vector) float64 {
sx, sy := math.Sin(k[0]), math.Sin(k[1])
return 2.0*env.Th()*((sx+sy)*(sx+sy)-1.0) + 4.0*(2.0*env.D1*env.T0-env.Thp)*sx*sy
}
// Get minimum value of env.Epsilon. If env.D1 hasn't changed since the last
// call to this function, return a cached value.
func (env *Environment) getEpsilonMin() float64 {
if env.D1 != env.lastEpsilonMinD1 {
env.setEpsilonMinCache()
env.lastEpsilonMinD1 = env.D1
}
return env.epsilonMinCache
}
// Find the minimum of EpsilonBar.
func (env *Environment) setEpsilonMinCache() {
worker := func(k vec.Vector) float64 {
return env.epsilonBar(k)
}
env.epsilonMinCache = bzone.Min(env.PointsPerSide, 2, worker)
//println(env.epsilonMinCache)
}
// Single-holon energy minus chemical potential. Minimum is -env.Mu_h.
func (env *Environment) Xi_h(k []float64) float64 {
return env.Epsilon_h(k) - env.Mu_h
}
// Superconducting gap function.
func (env *Environment) Delta_h(k vec.Vector) float64 {
return 4.0 * (env.T0 + env.Tz) * env.F0 * (math.Sin(k[0]) + float64(env.Alpha)*math.Sin(k[1]))
}
// Bogolyubov quasiparticle energy.
func (env *Environment) BogoEnergy(k vec.Vector) float64 {
xi := env.Xi_h(k)
delta := env.Delta_h(k)
return math.Sqrt(xi*xi + delta*delta)
}
// Fermi distribution function.
func (env *Environment) Fermi(energy float64) float64 {
if energy == 0.0 {
return 0.5
}
// Temperature is 0 or e^(Beta*energy) is too big to calculate
if env.Beta == math.Inf(1) || env.Beta >= math.Abs(math.MaxFloat64/energy) || math.Abs(env.Beta*energy) >= math.Log(math.MaxFloat64) {
if energy <= 0 {
return 1.0
}
return 0.0
}
// nonzero temperature
return 1.0 / (math.Exp(energy*env.Beta) + 1.0)
}
// Extract the temperature from env
func GetTemp(data interface{}) float64 {
env := data.(Environment)
return 1.0 / env.Beta
} | tempAll/environment.go | 0.585812 | 0.509215 | environment.go | starcoder |
package pigo
import (
"bytes"
"encoding/binary"
"math"
"math/rand"
"sort"
"unsafe"
)
// Puploc contains all the information resulted from the pupil detection
// needed for accessing from a global scope.
type Puploc struct {
Row int
Col int
Scale float32
Perturbs int
}
// PuplocCascade is a general struct for storing
// the cascade tree values encoded into the binary file.
type PuplocCascade struct {
stages uint32
scales float32
trees uint32
treeDepth uint32
treeCodes []int8
treePreds []float32
}
// UnpackCascade unpacks the pupil localization cascade file.
func (plc *PuplocCascade) UnpackCascade(packet []byte) (*PuplocCascade, error) {
var (
stages uint32
scales float32
trees uint32
treeDepth uint32
treeCodes []int8
treePreds []float32
)
pos := 0
buff := make([]byte, 4)
dataView := bytes.NewBuffer(buff)
// Read the depth (size) of each tree and write it into the buffer array.
_, err := dataView.Write([]byte{packet[pos+0], packet[pos+1], packet[pos+2], packet[pos+3]})
if err != nil {
return nil, err
}
if dataView.Len() > 0 {
// Get the number of stages as 32-bit uint and write it into the buffer array.
stages = binary.LittleEndian.Uint32(packet[pos:])
_, err := dataView.Write([]byte{packet[pos+0], packet[pos+1], packet[pos+2], packet[pos+3]})
if err != nil {
return nil, err
}
pos += 4
// Obtain the scale multiplier (applied after each stage) and write it into the buffer array.
u32scales := binary.LittleEndian.Uint32(packet[pos:])
// Convert uint32 to float32
scales = *(*float32)(unsafe.Pointer(&u32scales))
_, err = dataView.Write([]byte{packet[pos+0], packet[pos+1], packet[pos+2], packet[pos+3]})
if err != nil {
return nil, err
}
pos += 4
// Obtain the number of trees per stage and write it into the buffer array.
trees = binary.LittleEndian.Uint32(packet[pos:])
_, err = dataView.Write([]byte{packet[pos+0], packet[pos+1], packet[pos+2], packet[pos+3]})
if err != nil {
return nil, err
}
pos += 4
// Obtain the depth of each tree and write it into the buffer array.
treeDepth = binary.LittleEndian.Uint32(packet[pos:])
_, err = dataView.Write([]byte{packet[pos+0], packet[pos+1], packet[pos+2], packet[pos+3]})
if err != nil {
return nil, err
}
pos += 4
// Traverse all the stages of the binary tree
for s := 0; s < int(stages); s++ {
// Traverse the branches of each stage
for t := 0; t < int(trees); t++ {
code := packet[pos : pos+int(4*math.Pow(2, float64(treeDepth))-4)]
// Convert unsigned bytecodes to signed ones.
i8code := *(*[]int8)(unsafe.Pointer(&code))
treeCodes = append(treeCodes, i8code...)
pos = pos + int(4*math.Pow(2, float64(treeDepth))-4)
// Read prediction from tree's leaf nodes.
for i := 0; i < int(math.Pow(2, float64(treeDepth))); i++ {
for l := 0; l < 2; l++ {
_, err := dataView.Write([]byte{packet[pos+0], packet[pos+1], packet[pos+2], packet[pos+3]})
if err != nil {
return nil, err
}
u32pred := binary.LittleEndian.Uint32(packet[pos:])
// Convert uint32 to float32
f32pred := *(*float32)(unsafe.Pointer(&u32pred))
treePreds = append(treePreds, f32pred)
pos += 4
}
}
}
}
}
return &PuplocCascade{
stages: stages,
scales: scales,
trees: trees,
treeDepth: treeDepth,
treeCodes: treeCodes,
treePreds: treePreds,
}, nil
}
// RunDetector runs the pupil localization function.
func (plc *PuplocCascade) RunDetector(pl Puploc, img ImageParams) *Puploc {
localization := func(r, c, s float32, pixels []uint8, rows, cols, dim int) []float32 {
root := 0
pTree := int(math.Pow(2, float64(plc.treeDepth)))
for i := 0; i < int(plc.stages); i++ {
var dr, dc float32 = 0.0, 0.0
for j := 0; j < int(plc.trees); j++ {
idx := 0
for k := 0; k < int(plc.treeDepth); k++ {
r1 := min(rows-1, max(0, (256*int(r)+int(plc.treeCodes[root+4*idx+0])*int(round(float64(s))))>>8))
c1 := min(cols-1, max(0, (256*int(c)+int(plc.treeCodes[root+4*idx+1])*int(round(float64(s))))>>8))
r2 := min(rows-1, max(0, (256*int(r)+int(plc.treeCodes[root+4*idx+2])*int(round(float64(s))))>>8))
c2 := min(cols-1, max(0, (256*int(c)+int(plc.treeCodes[root+4*idx+3])*int(round(float64(s))))>>8))
bintest := func(r1, r2 uint8) uint8 {
if r1 > r2 {
return 1
}
return 0
}
idx = 2*idx + 1 + int(bintest(pixels[r1*dim+c1], pixels[r2*dim+c2]))
}
lutIdx := 2 * (int(plc.trees)*pTree*i + pTree*j + idx - (pTree - 1))
dr += plc.treePreds[lutIdx+0]
dc += plc.treePreds[lutIdx+1]
root += 4*pTree - 4
}
r += dr * s
c += dc * s
s *= plc.scales
}
return []float32{r, c, s}
}
rows, cols, scale := []float32{}, []float32{}, []float32{}
for i := 0; i < pl.Perturbs; i++ {
rt := float32(pl.Row) + float32(pl.Scale)*0.15*(0.5-rand.Float32())
ct := float32(pl.Col) + float32(pl.Scale)*0.15*(0.5-rand.Float32())
st := float32(pl.Scale) * (0.25 + rand.Float32())
res := localization(rt, ct, st, img.Pixels, img.Rows, img.Cols, img.Dim)
rows = append(rows, res[0])
cols = append(cols, res[1])
scale = append(scale, res[2])
}
// sorting the perturbations in ascendent order
sort.Sort(plocSort(rows))
sort.Sort(plocSort(cols))
sort.Sort(plocSort(scale))
// get the median value of the sorted perturbation results
return &Puploc{
Row: int(rows[int(round(float64(pl.Perturbs)/2))]),
Col: int(cols[int(round(float64(pl.Perturbs)/2))]),
Scale: scale[int(round(float64(pl.Perturbs)/2))],
}
}
// min returns the minum value between two numbers
func min(val1, val2 int) int {
if val1 < val2 {
return val1
}
return val2
}
// max returns the maximum value between two numbers
func max(val1, val2 int) int {
if val1 > val2 {
return val1
}
return val2
}
// round returns the nearest integer, rounding ties away from zero.
func round(x float64) float64 {
t := math.Trunc(x)
if math.Abs(x-t) >= 0.5 {
return t + math.Copysign(1, x)
}
return t
}
// Implement custom sorting function on detection values.
type plocSort []float32
func (q plocSort) Len() int { return len(q) }
func (q plocSort) Swap(i, j int) { q[i], q[j] = q[j], q[i] }
func (q plocSort) Less(i, j int) bool {
if q[i] < q[j] {
return true
}
if q[i] > q[j] {
return false
}
return q[i] < q[j]
} | vendor/github.com/esimov/pigo/core/puploc.go | 0.567218 | 0.504516 | puploc.go | starcoder |
package game
import (
"image/color"
"math"
"github.com/hajimehoshi/ebiten/v2"
"github.com/lucasb-eyer/go-colorful"
)
// Bresenham algorithm for rasterizing a circle
// Draw a circle that fills given image
// Before drawing, ensure that the image has odd width and height for best
// results:
// if width%2 == 0 {
// width++
// height = width
// }
func bresenham(color color.Color, img *ebiten.Image) {
width, height := img.Size()
x := width / 2
y := height / 2
r := width / 2
if r < 0 {
return
}
x1, y1, err := -r, 0, 2-2*r
for {
img.Set(x-x1, y+y1, color)
img.Set(x-y1, y-x1, color)
img.Set(x+x1, y-y1, color)
img.Set(x+y1, y+x1, color)
r = err
if r > x1 {
x1++
err += x1*2 + 1
}
if r <= y1 {
y1++
err += y1*2 + 1
}
if x1 >= 0 {
break
}
}
}
// Use shader to draw a circle
func drawCircleToImage(img *ebiten.Image, shader *ebiten.Shader) {
w, h := img.Size()
x := 0.0
y := 0.0
op := &ebiten.DrawRectShaderOptions{}
op.GeoM.Translate(float64(x), float64(y))
op.Uniforms = map[string]interface{}{
"Translate": []float32{float32(x), float32(y)},
"Size": []float32{float32(w), float32(h)},
}
img.DrawRectShader(w, h, shader, op)
}
// NewCircle creates a new circle at position x,y with radius r
func NewCircle(x, y, r float64, shader *ebiten.Shader) *Circle {
var width = int(r)*2 + 3
var height = width
img := ebiten.NewImage(width, height)
drawCircleToImage(img, shader)
// mod controls the accumulation of activity based on speed
maxMod := remap(r, 5, 70, 5, 2)
// dim rate controls how fast activity fades
dimRate := remap(r, 5, 70, 0.07, 0.01)
// max change is the max that activity can reach
maxCharge := 1.5
return &Circle{
selected: false,
pos: Vec2{x, y},
radius: r,
area: math.Pi * r * r,
maxMod: maxMod,
dimRate: dimRate,
maxCharge: maxCharge,
color: randomCircleColor(),
image: img,
}
}
func randomCircleColor() colorful.Color {
hue := randFloat(0, 360)
if hue > 360 {
hue -= 360
}
chroma := 0.45 // -1 .. 1
lightness := 0.45 // 0 .. 1
return colorful.Hcl(hue, chroma, lightness)
}
// Circle represents a circle
type Circle struct {
selected bool
id int
pos Vec2
prevPos Vec2
vel Vec2
acc Vec2
radius float64
area float64
speed float64
activity float64
maxMod float64
dimRate float64
maxCharge float64
color colorful.Color
image *ebiten.Image
}
func (c *Circle) postUpdate() {
c.speed = c.vel.Len()
mod := remap(c.speed, 0, 100, 0, c.maxMod)
c.activity += mod
c.activity -= c.dimRate
c.activity = math.Min(math.Max(c.activity, 0), c.maxCharge)
}
func (c *Circle) addCollisionEnergy(energy float64) {
mod := remap(energy, 0, 500, 0, c.maxMod)
c.activity += mod
}
// Draw the circle to the screen.
func (c Circle) Draw(screen *ebiten.Image) {
op := &ebiten.DrawImageOptions{}
// set chroma and lightness based on speed
if c.selected {
c.activity = math.Max(c.activity, 1.0)
}
hue, chroma, lightness := c.color.Hcl()
chroma = remap(math.Min(c.activity, 1), 0, 1, 0, 1)
lightness = remap(math.Min(c.activity, 1), 0, 1, 0.45, 0.9)
c.color = colorful.Hcl(hue, chroma, lightness)
r := c.color.R
g := c.color.G
b := c.color.B
if c.selected {
h, s, v := c.color.Hsv()
col := colorful.Hsv(h, s, math.Min(v+0.25, 1))
r = col.R
g = col.G
b = col.B
}
// Draw motion blur effect that fades as the circle slows
if c.speed > 10 {
a := remap(clamp(c.speed, 10, 75), 10, 75, 0, 0.95)
op.GeoM.Translate(c.prevPos.X-c.radius, c.prevPos.Y-c.radius)
op.ColorM.Scale(r, g, b, a)
screen.DrawImage(c.image, op)
drawLine(c.pos, c.prevPos, c.radius*1.9, screen, c.color, a)
}
// Draw the circle
op.GeoM.Reset()
op.ColorM.Reset()
op.ColorM.Scale(r, g, b, 1)
op.GeoM.Translate(c.pos.X-c.radius, c.pos.Y-c.radius)
screen.DrawImage(c.image, op)
} | game/circle.go | 0.691081 | 0.47317 | circle.go | starcoder |
// card.go contains the Suit and Face types, as well as the Card struct.
// Card contains basic card variables, including both logic and UI information.
package card
import (
"golang.org/x/mobile/exp/f32"
"golang.org/x/mobile/exp/sprite"
"hearts/img/coords"
)
type Suit int
const (
Club Suit = iota
Diamond
Spade
Heart
UnknownSuit
)
// Converts a Suit type to string type
func (s Suit) String() string {
switch s {
case Heart:
return "h"
case Diamond:
return "d"
case Spade:
return "s"
case Club:
return "c"
default:
return "?"
}
}
// Converts a string type to Suit type
func ConvertToSuit(s string) Suit {
switch s {
case "h":
return Heart
case "d":
return Diamond
case "s":
return Spade
case "c":
return Club
default:
return UnknownSuit
}
}
type Face int
const (
Two Face = iota + 2
Three
Four
Five
Six
Seven
Eight
Nine
Ten
Jack
Queen
King
// note: in Hearts, Aces are high
Ace
UnknownFace
)
// Converts a Face type to string type
func (f Face) String() string {
switch f {
case Ace:
return "1"
case Two:
return "2"
case Three:
return "3"
case Four:
return "4"
case Five:
return "5"
case Six:
return "6"
case Seven:
return "7"
case Eight:
return "8"
case Nine:
return "9"
case Ten:
return "10"
case Jack:
return "j"
case Queen:
return "q"
case King:
return "k"
default:
return "?"
}
}
// Converts a string type to Face type
func ConvertToFace(s string) Face {
switch s {
case "1":
return Ace
case "2":
return Two
case "3":
return Three
case "4":
return Four
case "5":
return Five
case "6":
return Six
case "7":
return Seven
case "8":
return Eight
case "9":
return Nine
case "10":
return Ten
case "j":
return Jack
case "q":
return Queen
case "k":
return King
default:
return UnknownFace
}
}
// Returns a new card with suit and face variables set
// Does not set any properties of the card image
func NewCard(face Face, suit Suit) *Card {
return &Card{
s: suit,
f: face,
}
}
type Card struct {
s Suit
f Face
node *sprite.Node
image sprite.SubTex
back sprite.SubTex
// XY coordinates of the initial placement of the card
initial *coords.Vec
// current XY coordinates of the card
current *coords.Vec
// current width and height of the card
dimensions *coords.Vec
}
// Returns the suit of c
func (c *Card) GetSuit() Suit {
return c.s
}
// Returns the face of c
func (c *Card) GetFace() Face {
return c.f
}
// Returns the node of c
func (c *Card) GetNode() *sprite.Node {
return c.node
}
// Returns the image of c
func (c *Card) GetImage() sprite.SubTex {
return c.image
}
// Returns the image of the back of c
func (c *Card) GetBack() sprite.SubTex {
return c.back
}
// Returns a vector containing the current x- and y-coordinate of the upper left corner of c
func (c *Card) GetCurrent() *coords.Vec {
return c.current
}
// Returns a vector containing the initial x- and y-coordinate of the upper left corner of c
func (c *Card) GetInitial() *coords.Vec {
return c.initial
}
// Returns a vector containing the width and height of c
func (c *Card) GetDimensions() *coords.Vec {
return c.dimensions
}
// Sets the node of c to n
func (c *Card) SetNode(n *sprite.Node) {
c.node = n
}
// Sets the image of c to s
func (c *Card) SetImage(s sprite.SubTex) {
c.image = s
}
// Sets the card back of c to s
func (c *Card) SetBack(s sprite.SubTex) {
c.back = s
}
// Shows the front of c
func (c *Card) SetFrontDisplay(eng sprite.Engine) {
eng.SetSubTex(c.node, c.image)
}
// Shows the back of c
func (c *Card) SetBackDisplay(eng sprite.Engine) {
eng.SetSubTex(c.node, c.back)
}
// Moves c to a new position and size
func (c *Card) Move(newXY, newDimensions *coords.Vec, eng sprite.Engine) {
eng.SetTransform(c.node, f32.Affine{
{newDimensions.X, 0, newXY.X},
{0, newDimensions.Y, newXY.Y},
})
c.current = newXY
c.dimensions = newDimensions
}
// Sets the initial x and y coordinates of c
func (c *Card) SetInitial(newInitial *coords.Vec) {
c.initial = newInitial
}
// Returns true if c is worth any points (all Hearts cards, and the Queen of Spades)
func (c *Card) WorthPoints() bool {
return c.s == Heart || (c.s == Spade && c.f == Queen)
}
// Used to sort an array of cards
type CardSorter []*Card
// Returns the length of the array of cards
func (cs CardSorter) Len() int {
return len(cs)
}
// Swaps the positions of two cards in the array
func (cs CardSorter) Swap(i, j int) {
cs[i], cs[j] = cs[j], cs[i]
}
// Compares two cards-- one card is less than another if it has a lower suit, or if it has the same suit and a lower face
func (cs CardSorter) Less(i, j int) bool {
if cs[i].GetSuit() == cs[j].GetSuit() {
return cs[i].GetFace() < cs[j].GetFace()
}
return cs[i].GetSuit() < cs[j].GetSuit()
} | go/src/hearts/logic/card/card.go | 0.68342 | 0.457621 | card.go | starcoder |
package im2a
import (
"errors"
"fmt"
"image"
"image/color"
"io"
"math"
"net/http"
"os"
"strings"
_ "image/gif"
_ "image/jpeg"
_ "image/png"
"github.com/disintegration/imaging"
"golang.org/x/image/draw"
)
// Pixel ...
type Pixel struct {
Color int
Rune rune
Transparent bool
}
// Asciifier ...
type Asciifier struct {
Options *Options
}
// NewAsciifier ...
func NewAsciifier(options *Options) *Asciifier {
return &Asciifier{
Options: options,
}
}
// Asciify ...
func (a *Asciifier) Asciify(out io.Writer) error {
var file io.ReadCloser
if strings.HasPrefix(a.Options.Image, "http://") || strings.HasPrefix(a.Options.Image, "https://") {
// Open URL.
response, err := http.Get(a.Options.Image)
if err != nil {
return err
}
file = response.Body
} else {
// Open local file.
f, err := os.Open(a.Options.Image)
if err != nil {
return err
}
file = f
}
defer file.Close()
// Load image.
src, _, err := image.Decode(file)
if err != nil {
return err
}
// Invert?
if a.Options.Invert {
src = imaging.Invert(src)
}
// We don't need the terminal in all cases.
var terminal *Terminal
termWidth := 0
if a.Options.Pixel || (!a.Options.HTML && a.Options.Width == 0 && a.Options.Height == 0) {
terminal = NewTerminal()
if terminal.Width == 0 || terminal.Height == 0 {
return errors.New("Cannot determine terminal size")
}
termWidth = terminal.Width
}
// Determine how to scale the image.
width, height := 0, 0
if a.Options.Width > 0 && a.Options.Height > 0 {
// Use provided values
width, height = a.Options.Width, a.Options.Height
} else if a.Options.Width > 0 {
// Calculate height from width.
prop := float64(src.Bounds().Dx()) / float64(a.Options.Width)
width = a.Options.Width
height = roundInt(float64(src.Bounds().Dy()) / prop)
if !a.Options.HTML && !a.Options.Pixel {
height /= 2
}
} else if a.Options.Height > 0 {
// Calculate width from height.
prop := float64(src.Bounds().Dy()) / float64(a.Options.Height)
width = roundInt(float64(src.Bounds().Dx()) / prop)
height = a.Options.Height
if !a.Options.HTML && !a.Options.Pixel {
width *= 2
}
} else if !a.Options.HTML {
// Infer size from terminal.
if a.Options.Pixel {
// Scale to Width:Height*2.
prop := float64(src.Bounds().Dy()) / float64(terminal.Height*2-2)
width = roundInt(float64(src.Bounds().Dx()) / prop)
height = terminal.Height*2 - 2
// Fit width.
if width > terminal.Width {
prop = float64(src.Bounds().Dx()) / float64(terminal.Width-1)
width = terminal.Width - 1
height = roundInt(float64(src.Bounds().Dy()) / prop)
}
} else {
// Scale to Width/2:Height.
prop := float64(src.Bounds().Dy()) / float64(terminal.Height-1) / 2.0
width = roundInt(float64(src.Bounds().Dx()) / prop)
height = terminal.Height - 1
// Fit width.
if width > terminal.Width {
prop = float64(src.Bounds().Dx()) / float64(terminal.Width-1) * 2.0
width = terminal.Width - 1
height = roundInt(float64(src.Bounds().Dy()) / prop)
}
}
}
// In pixel mode we need an even amount of rows.
if a.Options.Pixel && height&1 == 1 {
height++
}
// Scale the image.
if width > 0 && height > 0 {
dst := image.NewRGBA(image.Rect(0, 0, width, height))
draw.Draw(dst, dst.Bounds(), image.Transparent, image.ZP, draw.Src)
draw.ApproxBiLinear.Scale(dst, dst.Bounds(), src, src.Bounds(), draw.Src, nil)
src = dst
}
width, height = src.Bounds().Dx(), src.Bounds().Dy()
// Allocate pixel buffer.
pixels := make([]*Pixel, height*width)
// Minimum opacity to consider a pixel fully transparent.
minOpacity := uint32((1.0 - a.Options.TransparencyThreshold) * 0xFFFF)
// Walk the image.
for y := 0; y < height; y++ {
for x := 0; x < width; x++ {
// Get pixel.
col := src.At(x, y)
pixel := &Pixel{}
// Grayscale it.
r, g, b, aa := col.RGBA()
v := uint16((float64(r)*a.Options.RedWeight +
float64(g)*a.Options.GreenWeight +
float64(b)*a.Options.BlueWeight))
grayscale := &color.RGBA64{R: v, G: v, B: v, A: 0}
// Find nearest grayscale terminal color to assign character.
minDistance := math.Inf(0)
idx := 0
for i, c := range ColorsG {
if distance := colorDistance(c, grayscale); distance < minDistance {
minDistance = distance
idx = i
}
}
// Assign character.
pixel.Rune = a.Options.Charset[idx%len(a.Options.Charset)]
if a.Options.Transparent && aa <= minOpacity {
// Pixel is transparent.
pixel.Transparent = true
} else if a.Options.Grayscale {
// Assign color index from character index.
if idx == 1 {
idx = 15
} else if idx != 0 {
idx += 230
}
pixel.Color = idx
} else {
// Find nearest color from the 256-color map.
minDistance := math.Inf(0)
idx := 0
for i, c := range ColorsT {
if distance := colorDistance(c, col); distance < minDistance {
minDistance = distance
idx = i
}
}
pixel.Color = idx
}
// Store the pixel.
pixels[y*width+x] = pixel
}
}
// Header.
a.PrintHeader(out)
// Print the buffer.
for y := 0; y < height; y++ {
a.BeginLine(out, termWidth, width)
// We can only optimize colors on the same line.
prev1 := &Pixel{Color: -1}
prev2 := &Pixel{Color: -1}
for x := 0; x < width; x++ {
if a.Options.Pixel {
// Pixel mode - box drawing characters, 2 lines at a time.
current1 := pixels[y*width+x]
current2 := pixels[y*width+width+x]
a.PrintPixel(out, current1, current2, prev1, prev2)
prev1 = current1
prev2 = current2
} else {
current1 := pixels[y*width+x]
a.PrintRune(out, current1, prev1)
prev1 = current1
}
}
a.EndLine(out)
if a.Options.Pixel {
y++
}
}
// Footer.
a.PrintFooter(out)
return nil
}
// PrintHeader ...
func (a *Asciifier) PrintHeader(out io.Writer) {
if a.Options.HTML {
fmt.Fprintln(out, "<!DOCTYPE html>")
fmt.Fprintln(out, "<html>")
fmt.Fprintln(out, "<head>")
fmt.Fprintln(out, " <meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />")
fmt.Fprintln(out, " <title>im2a asciified image</title>")
fmt.Fprintln(out, " <style type=\"text/css\">")
fmt.Fprintln(out, " body { background: #000000; }")
fmt.Fprintln(out, " pre { font: normal 12px/9px Menlo, monospace; }")
if a.Options.Center {
fmt.Fprintln(out, " pre { text-align: center; }")
}
if a.Options.Grayscale {
for idx, color := range ColorsGG {
fmt.Fprintf(out, " .c_%d { color: #%06x }\n", idx, color)
}
} else {
for idx, color := range ColorsTT {
fmt.Fprintf(out, " .c_%d { color: #%06x }\n", idx, color)
}
}
fmt.Fprintln(out, " </style>")
fmt.Fprintln(out, "</head>")
fmt.Fprintln(out, "<body>")
fmt.Fprintln(out, "<pre>")
}
}
// PrintFooter ...
func (a *Asciifier) PrintFooter(out io.Writer) {
if a.Options.HTML {
fmt.Fprintln(out, "</pre>")
fmt.Fprintln(out, "</body>")
fmt.Fprintln(out, "</html>")
fmt.Fprintf(out, "<!-- im2a-go v%s -->\n", Version)
}
}
// BeginLine ...
func (a *Asciifier) BeginLine(out io.Writer, termWidth int, imageWidth int) {
if a.Options.Center && !a.Options.HTML {
fmt.Fprint(out, strings.Repeat(" ", (termWidth-imageWidth)/2))
}
}
// EndLine ...
func (a *Asciifier) EndLine(out io.Writer) {
if a.Options.HTML {
fmt.Fprintln(out, "")
} else {
fmt.Fprintln(out, "\x1b[0;0m")
}
}
// PrintRune ...
func (a *Asciifier) PrintRune(out io.Writer, current *Pixel, prev *Pixel) {
if a.Options.HTML {
idx := current.Color
if a.Options.Grayscale {
if idx == 1 {
idx = 15
} else if idx != 0 {
idx -= 230
}
}
if current.Transparent {
fmt.Fprint(out, " ")
} else {
fmt.Fprintf(out, "<span class=\"c_%d\">%c</span>", idx, current.Rune)
}
} else {
if current.Transparent {
if !prev.Transparent {
fmt.Fprint(out, "\x1b[49m")
}
fmt.Fprint(out, " ")
} else {
if current.Color != prev.Color {
fmt.Fprintf(out, "\x1b[38;5;%dm", current.Color)
}
fmt.Fprintf(out, "%c", current.Rune)
}
}
}
// PrintPixel ...
func (a *Asciifier) PrintPixel(out io.Writer, current1 *Pixel, current2 *Pixel, prev1 *Pixel, prev2 *Pixel) {
if current1.Color != prev1.Color || current1.Transparent != prev1.Transparent {
if current1.Transparent {
fmt.Fprint(out, "\x1b[49m")
} else {
fmt.Fprintf(out, "\x1b[48;5;%dm", current1.Color)
}
}
if current2.Color != prev2.Color || current2.Transparent != prev2.Transparent {
if current2.Transparent {
fmt.Fprint(out, "\x1b[39m")
} else {
fmt.Fprintf(out, "\x1b[38;5;%dm", current2.Color)
}
}
if current1.Color == current2.Color || (current1.Transparent && current2.Transparent) {
fmt.Fprint(out, " ")
} else if current1.Transparent {
fmt.Fprint(out, "▀")
} else {
fmt.Fprint(out, "▄")
}
} | asciifier.go | 0.617974 | 0.458834 | asciifier.go | starcoder |
package coordinate
import (
"fmt"
"math"
"strings"
)
func EuclideanDistance(c1, c2 Coordinate) float64 {
d := c1.Subtract(c2)
d = d.Multiply(d)
distance := float64(0)
for i := 0; i < d.Cardinality(); i++ {
distance += d.Get(i)
}
return math.Sqrt(float64(distance))
}
func ManhattanDistance(c1, c2 Coordinate) float64 {
d := c1.Subtract(c2)
distance := float64(0)
for i := 0; i < d.Cardinality(); i++ {
distance += math.Abs(d.Get(i))
}
return distance
}
type Coordinate interface {
Get(i int) float64
Cardinality() int
Add(addend Coordinate) Coordinate
Subtract(subtrahend Coordinate) Coordinate
Multiply(multiplier Coordinate) Coordinate
Equal(other Coordinate) bool
}
type coordinate struct {
coordinates []float64
}
func New(coordinates ...float64) Coordinate {
return &coordinate{coordinates}
}
func (c *coordinate) Cardinality() int { return len(c.coordinates) }
func (c *coordinate) Get(i int) float64 { return c.coordinates[i] }
func (c *coordinate) String() string {
str := make([]string, len(c.coordinates))
for i, coordinate := range c.coordinates {
str[i] = fmt.Sprintf("%f", coordinate)
}
return fmt.Sprintf("(%s)", strings.Join(str, ","))
}
func (c *coordinate) compute(other Coordinate, computer func(lhs, rhs float64) float64) Coordinate {
nextCoordinate := &coordinate{
coordinates: make([]float64, len(c.coordinates)),
}
for i, coordinate := range c.coordinates {
nextCoordinate.coordinates[i] = computer(coordinate, other.Get(i))
}
return nextCoordinate
}
func (c *coordinate) Add(addend Coordinate) Coordinate {
return c.compute(addend, func(lhs, rhs float64) float64 { return lhs + rhs })
}
func (c *coordinate) Subtract(subtrahend Coordinate) Coordinate {
return c.compute(subtrahend, func(lhs, rhs float64) float64 { return lhs - rhs })
}
func (c *coordinate) Multiply(multiplier Coordinate) Coordinate {
return c.compute(multiplier, func(lhs, rhs float64) float64 { return lhs * rhs })
}
func (c *coordinate) Equal(other Coordinate) bool {
if c == other {
return true
}
if c == nil || other == nil {
return false
}
for i, coordinate := range c.coordinates {
if coordinate != other.Get(i) {
return false
}
}
return true
}
func NewSegment(start, end Coordinate) *Segment {
seg := &Segment{
Start: start,
End: end,
s: end.Subtract(start),
}
return seg
}
type Segment struct {
Start Coordinate
End Coordinate
s Coordinate
}
func (seg *Segment) String() string {
return fmt.Sprintf("%v -> %v", seg.Start, seg.End)
}
func (seg *Segment) Coincident(other *Segment) bool {
return seg.Start.Equal(other.Start) && seg.End.Equal(other.End)
}
func (seg *Segment) Intersection(other *Segment) (Coordinate, bool) {
d := float64((other.s.Get(1)*seg.s.Get(0) - other.s.Get(0)*seg.s.Get(1)))
m1 := float64((other.s.Get(0) * (seg.Start.Get(1) - other.Start.Get(1))) - (other.s.Get(1) * (seg.Start.Get(0) - other.Start.Get(0))))
m2 := float64(seg.s.Get(0)*(seg.Start.Get(1)-other.Start.Get(1)) - (seg.s.Get(1) * (seg.Start.Get(0) - other.Start.Get(0))))
if d == 0 {
return nil, false
}
m1 = m1 / d
m2 = m2 / d
// find the intersection of the *lines* (not segment)
intersection := &coordinate{
coordinates: make([]float64, seg.Start.Cardinality()),
}
for i := 0; i < seg.Start.Cardinality(); i++ {
a := seg.Start.Get(i) + seg.s.Get(i)*m1
b := other.Start.Get(i) + other.s.Get(i)*m2
if a != b {
return nil, false
}
intersection.coordinates[i] = a
}
// determine if the intersection is within both lines
if seg.Contains(intersection) && other.Contains(intersection) {
return intersection, true
}
return nil, false
}
func (seg *Segment) Contains(point Coordinate) bool {
for i := 0; i < seg.Start.Cardinality(); i++ {
start := seg.Start.Get(i)
end := seg.End.Get(i)
if end < start {
t := end
end = start
start = t
}
if point.Get(i) < start || end < point.Get(i) {
return false
}
}
return true
}
func (seg *Segment) Length() int {
return int(EuclideanDistance(seg.Start, seg.End))
} | coordinate/coordinate.go | 0.854854 | 0.497253 | coordinate.go | starcoder |
package output
import (
"github.com/Jeffail/benthos/v3/internal/docs"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/output/writer"
"github.com/Jeffail/benthos/v3/lib/types"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeZMQ4] = TypeSpec{
constructor: fromSimpleConstructor(NewZMQ4),
Summary: `
The zmq4 output type attempts to send messages to a ZMQ4 port, currently only
PUSH and PUB sockets are supported.`,
Description: `
ZMQ4 is supported but currently depends on C bindings. Since this is an
annoyance when building or using Benthos it is not compiled by default.
There is a specific docker tag postfix ` + "`-cgo`" + ` for C builds containing
ZMQ support.
You can also build it into your project by getting libzmq installed on your
machine, then build with the tag:
` + "```sh" + `
go install -tags "ZMQ4" github.com/Jeffail/benthos/v3/cmd/benthos
` + "```" + ``,
FieldSpecs: docs.FieldSpecs{
docs.FieldCommon("urls", "A list of URLs to connect to. If an item of the list contains commas it will be expanded into multiple URLs.", []string{"tcp://localhost:5556"}),
docs.FieldCommon("bind", "Whether the URLs listed should be bind (otherwise they are connected to)."),
docs.FieldCommon("socket_type", "The socket type to send with.").HasOptions("PUSH", "PUB"),
docs.FieldAdvanced("high_water_mark", "The message high water mark to use."),
docs.FieldCommon("poll_timeout", "The maximum period of time to wait for a message to send before the request is abandoned and reattempted."),
},
Categories: []Category{
CategoryNetwork,
},
}
}
//------------------------------------------------------------------------------
// NewZMQ4 creates a new ZMQ4 output type.
func NewZMQ4(conf Config, mgr types.Manager, log log.Modular, stats metrics.Type) (Type, error) {
z, err := writer.NewZMQ4(conf.ZMQ4, log, stats)
if err != nil {
return nil, err
}
s, err := NewWriter(
"zmq4", z, log, stats,
)
if err != nil {
return nil, err
}
return OnlySinglePayloads(s), nil
}
//------------------------------------------------------------------------------ | lib/output/zmq4.go | 0.621541 | 0.635618 | zmq4.go | starcoder |
package gohome
import (
"github.com/PucklaMotzer09/mathgl/mgl32"
)
func toLine3D(pos1 mgl32.Vec3, pos2 mgl32.Vec3) (line Line3D) {
vecCol := ColorToVec4(DrawColor)
line[0][0] = pos1[0]
line[0][1] = pos1[1]
line[0][2] = pos1[2]
line[0][3] = vecCol[0]
line[0][4] = vecCol[1]
line[0][5] = vecCol[2]
line[0][6] = vecCol[3]
line[1][0] = pos2[0]
line[1][1] = pos2[1]
line[1][2] = pos2[2]
line[1][3] = vecCol[0]
line[1][4] = vecCol[1]
line[1][5] = vecCol[2]
line[1][6] = vecCol[3]
return
}
func toPoint2D(point mgl32.Vec2) Shape2DVertex {
vecCol := ColorToVec4(DrawColor)
return Shape2DVertex{
point[0], point[1],
vecCol[0], vecCol[1], vecCol[2], vecCol[3],
}
}
func toLine2D(pos1 mgl32.Vec2, pos2 mgl32.Vec2) (line Line2D) {
vecCol := ColorToVec4(DrawColor)
line[0][0] = pos1[0]
line[0][1] = pos1[1]
line[0][2] = vecCol[0]
line[0][3] = vecCol[1]
line[0][4] = vecCol[2]
line[0][5] = vecCol[3]
line[1][0] = pos2[0]
line[1][1] = pos2[1]
line[1][2] = vecCol[0]
line[1][3] = vecCol[1]
line[1][4] = vecCol[2]
line[1][5] = vecCol[3]
return
}
func toTriangle2D(pos1 mgl32.Vec2, pos2 mgl32.Vec2, pos3 mgl32.Vec2) (tri Triangle2D) {
vecCol := ColorToVec4(DrawColor)
tri[0][0] = pos1[0]
tri[0][1] = pos1[1]
tri[0][2] = vecCol[0]
tri[0][3] = vecCol[1]
tri[0][4] = vecCol[2]
tri[0][5] = vecCol[3]
tri[1][0] = pos2[0]
tri[1][1] = pos2[1]
tri[1][2] = vecCol[0]
tri[1][3] = vecCol[1]
tri[1][4] = vecCol[2]
tri[1][5] = vecCol[3]
tri[2][0] = pos3[0]
tri[2][1] = pos3[1]
tri[2][2] = vecCol[0]
tri[2][3] = vecCol[1]
tri[2][4] = vecCol[2]
tri[2][5] = vecCol[3]
return
}
func toRectangle2D(pos1, pos2, pos3, pos4 mgl32.Vec2) (rect Rectangle2D) {
vecCol := ColorToVec4(DrawColor)
rect[0][0] = pos1[0]
rect[0][1] = pos1[1]
rect[0][2] = vecCol[0]
rect[0][3] = vecCol[1]
rect[0][4] = vecCol[2]
rect[0][5] = vecCol[3]
rect[1][0] = pos2[0]
rect[1][1] = pos2[1]
rect[1][2] = vecCol[0]
rect[1][3] = vecCol[1]
rect[1][4] = vecCol[2]
rect[1][5] = vecCol[3]
rect[2][0] = pos3[0]
rect[2][1] = pos3[1]
rect[2][2] = vecCol[0]
rect[2][3] = vecCol[1]
rect[2][4] = vecCol[2]
rect[2][5] = vecCol[3]
rect[3][0] = pos4[0]
rect[3][1] = pos4[1]
rect[3][2] = vecCol[0]
rect[3][3] = vecCol[1]
rect[3][4] = vecCol[2]
rect[3][5] = vecCol[3]
return
}
func toPolygon2D(positions ...mgl32.Vec2) (poly Polygon2D) {
vecCol := ColorToVec4(DrawColor)
poly.Points = append(poly.Points, make([]Shape2DVertex, len(positions))...)
for i := 0; i < len(positions); i++ {
vertex := Shape2DVertex{
positions[i][0], positions[i][1],
vecCol[0], vecCol[1], vecCol[2], vecCol[3],
}
poly.Points[i] = vertex
}
return
}
func toVertex3D(pos mgl32.Vec3) (vert Shape3DVertex) {
for i := 0; i < 3; i++ {
vert[i] = pos[i]
}
vecCol := ColorToVec4(DrawColor)
for i := 0; i < 4; i++ {
vert[i+3] = vecCol[i]
}
return
}
func toTriangle3D(pos1, pos2, pos3 mgl32.Vec3) (tri Triangle3D) {
pos := [3]mgl32.Vec3{
pos1, pos2, pos3,
}
for i := 0; i < 3; i++ {
tri[i] = toVertex3D(pos[i])
}
return
}
func cubeToTriangle3Ds(width, height, depth float32) (tris [6 * 2]Triangle3D) {
const LDB = 0
const RDB = 1
const RDF = 2
const LDF = 3
const LUB = 4
const RUB = 5
const RUF = 6
const LUF = 7
p := [8]mgl32.Vec3{
mgl32.Vec3{-width / 2.0, -height / 2.0, -depth / 2.0}, // LDB
mgl32.Vec3{+width / 2.0, -height / 2.0, -depth / 2.0}, // RDB
mgl32.Vec3{+width / 2.0, -height / 2.0, +depth / 2.0}, // RDF
mgl32.Vec3{-width / 2.0, -height / 2.0, +depth / 2.0}, // LDF
mgl32.Vec3{-width / 2.0, +height / 2.0, -depth / 2.0}, // LUB
mgl32.Vec3{+width / 2.0, +height / 2.0, -depth / 2.0}, // RUB
mgl32.Vec3{+width / 2.0, +height / 2.0, +depth / 2.0}, // RUF
mgl32.Vec3{-width / 2.0, +height / 2.0, +depth / 2.0}, // LUF
}
tris = [6 * 2]Triangle3D{
toTriangle3D(p[LUF], p[LDF], p[RDF]), // FRONT
toTriangle3D(p[RDF], p[RUF], p[LUF]),
toTriangle3D(p[RUF], p[RDF], p[RDB]), // RIGHT
toTriangle3D(p[RDB], p[RUB], p[RUF]),
toTriangle3D(p[RUB], p[RDB], p[LDB]), // BACK
toTriangle3D(p[LDB], p[LUB], p[RUB]),
toTriangle3D(p[LUB], p[LDB], p[LDF]), // LEFT
toTriangle3D(p[LDF], p[LUF], p[LUB]),
toTriangle3D(p[LUB], p[LUF], p[RUF]), // UP
toTriangle3D(p[RUF], p[RUB], p[LUB]),
toTriangle3D(p[LDF], p[LDB], p[RDB]), // DOWN
toTriangle3D(p[RDB], p[RDF], p[LDF]),
}
return
} | src/gohome/drawutils.go | 0.675122 | 0.446857 | drawutils.go | starcoder |
package svg
import (
"github.com/goki/gi/gi"
"github.com/goki/ki/ki"
"github.com/goki/ki/kit"
"github.com/goki/mat32"
)
// Circle is a SVG circle
type Circle struct {
NodeBase
Pos mat32.Vec2 `xml:"{cx,cy}" desc:"position of the center of the circle"`
Radius float32 `xml:"r" desc:"radius of the circle"`
}
var KiT_Circle = kit.Types.AddType(&Circle{}, ki.Props{"EnumType:Flag": gi.KiT_NodeFlags})
// AddNewCircle adds a new button to given parent node, with given name, x,y pos, and radius.
func AddNewCircle(parent ki.Ki, name string, x, y, radius float32) *Circle {
g := parent.AddNewChild(KiT_Circle, name).(*Circle)
g.Pos.Set(x, y)
g.Radius = radius
return g
}
func (g *Circle) SVGName() string { return "circle" }
func (g *Circle) CopyFieldsFrom(frm interface{}) {
fr := frm.(*Circle)
g.NodeBase.CopyFieldsFrom(&fr.NodeBase)
g.Pos = fr.Pos
g.Radius = fr.Radius
}
func (g *Circle) SetPos(pos mat32.Vec2) {
g.Pos = pos.SubScalar(g.Radius)
}
func (g *Circle) SetSize(sz mat32.Vec2) {
g.Radius = 0.25 * (sz.X + sz.Y)
}
func (g *Circle) SVGLocalBBox() mat32.Box2 {
bb := mat32.Box2{}
hlw := 0.5 * g.LocalLineWidth()
bb.Min = g.Pos.SubScalar(g.Radius + hlw)
bb.Max = g.Pos.AddScalar(g.Radius + hlw)
return bb
}
func (g *Circle) Render2D() {
vis, rs := g.PushXForm()
if !vis {
return
}
pc := &g.Pnt
pc.DrawCircle(rs, g.Pos.X, g.Pos.Y, g.Radius)
pc.FillStrokeClear(rs)
g.ComputeBBoxSVG()
g.Render2DChildren()
rs.PopXFormLock()
}
// ApplyXForm applies the given 2D transform to the geometry of this node
// each node must define this for itself
func (g *Circle) ApplyXForm(xf mat32.Mat2) {
rot := xf.ExtractRot()
if rot != 0 || !g.Pnt.XForm.IsIdentity() {
g.Pnt.XForm = g.Pnt.XForm.Mul(xf)
g.SetProp("transform", g.Pnt.XForm.String())
g.GradientApplyXForm(xf)
} else {
g.Pos = xf.MulVec2AsPt(g.Pos)
scx, scy := xf.ExtractScale()
g.Radius *= 0.5 * (scx + scy)
g.GradientApplyXForm(xf)
}
}
// ApplyDeltaXForm applies the given 2D delta transforms to the geometry of this node
// relative to given point. Trans translation and point are in top-level coordinates,
// so must be transformed into local coords first.
// Point is upper left corner of selection box that anchors the translation and scaling,
// and for rotation it is the center point around which to rotate
func (g *Circle) ApplyDeltaXForm(trans mat32.Vec2, scale mat32.Vec2, rot float32, pt mat32.Vec2) {
if rot != 0 {
xf, lpt := g.DeltaXForm(trans, scale, rot, pt, false) // exclude self
mat := g.Pnt.XForm.MulCtr(xf, lpt)
g.Pnt.XForm = mat
g.SetProp("transform", g.Pnt.XForm.String())
} else {
xf, lpt := g.DeltaXForm(trans, scale, rot, pt, true) // include self
g.Pos = xf.MulVec2AsPtCtr(g.Pos, lpt)
scx, scy := xf.ExtractScale()
g.Radius *= 0.5 * (scx + scy)
g.GradientApplyXFormPt(xf, lpt)
}
}
// WriteGeom writes the geometry of the node to a slice of floating point numbers
// the length and ordering of which is specific to each node type.
// Slice must be passed and will be resized if not the correct length.
func (g *Circle) WriteGeom(dat *[]float32) {
SetFloat32SliceLen(dat, 3+6)
(*dat)[0] = g.Pos.X
(*dat)[1] = g.Pos.Y
(*dat)[2] = g.Radius
g.WriteXForm(*dat, 3)
g.GradientWritePts(dat)
}
// ReadGeom reads the geometry of the node from a slice of floating point numbers
// the length and ordering of which is specific to each node type.
func (g *Circle) ReadGeom(dat []float32) {
g.Pos.X = dat[0]
g.Pos.Y = dat[1]
g.Radius = dat[2]
g.ReadXForm(dat, 3)
g.GradientReadPts(dat)
} | svg/circle.go | 0.798187 | 0.467149 | circle.go | starcoder |
package interpolation
import (
"github.com/edwardbrowncross/naturalneighbour/delaunay"
"github.com/edwardbrowncross/naturalneighbour/voronoi"
)
// Interpolator provides natural neighbour interpolation within a set of points.
type Interpolator struct {
t *delaunay.Triangulation
areaCache map[*delaunay.Point]float64
}
// New creates a new Interpolator using the given points.
func New(points []*delaunay.Point) (*Interpolator, error) {
t, err := delaunay.NewTriangulation(points)
return &Interpolator{t, map[*delaunay.Point]float64{}}, err
}
// Interpolate returns the interpolated value at the given x and y coordinates using natural neighbour interpolation.
// https://pdfs.semanticscholar.org/52ca/255573eded0e4371fe2ced980b196636718d.pdf
func (i *Interpolator) Interpolate(x, y float64) (float64, error) {
// Create a new point and add it to the triangulation.
p := delaunay.NewPoint(x, y, 0)
undo, err := i.t.AddPoint(p)
if err != nil {
return 0, err
}
// Calculate the area of the voronoi cells of the points linked to the new point.
neighbours := p.GetConnected()
areasAfter := make([]float64, len(neighbours))
// Calculate the area of the new test point's voronoi cells.
for i, n := range neighbours {
areasAfter[i] = voronoi.NewRegion(n).GetArea()
}
totalArea := voronoi.NewRegion(p).GetArea()
undo()
// Calculate the area of the same points without the new point in the triangulation.
areasBefore := make([]float64, len(neighbours))
for idx, n := range neighbours {
if value, found := i.areaCache[n]; !found {
area := voronoi.NewRegion(n).GetArea()
i.areaCache[n] = area
areasBefore[idx] = area
} else {
areasBefore[idx] = value
}
}
// Take a weighted average of the values of the points the new test point was connected to.
// Weighting is the percentage of the test point's voronoi cell that was stolen from each neighbour point.
total := 0.0
for i, n := range neighbours {
total += n.Value * (areasBefore[i] - areasAfter[i])
}
return total / totalArea, nil
} | interpolation/interpolator.go | 0.738952 | 0.441854 | interpolator.go | starcoder |
package routerrpc
import (
"time"
"github.com/Actinium-project/acmutil"
)
// RoutingConfig contains the configurable parameters that control routing.
type RoutingConfig struct {
// MinRouteProbability is the minimum required route success probability
// to attempt the payment.
MinRouteProbability float64 `long:"minrtprob" description:"Minimum required route success probability to attempt the payment"`
// AprioriHopProbability is the assumed success probability of a hop in
// a route when no other information is available.
AprioriHopProbability float64 `long:"apriorihopprob" description:"Assumed success probability of a hop in a route when no other information is available."`
// AprioriWeight is a value in the range [0, 1] that defines to what
// extent historical results should be extrapolated to untried
// connections. Setting it to one will completely ignore historical
// results and always assume the configured a priori probability for
// untried connections. A value of zero will ignore the a priori
// probability completely and only base the probability on historical
// results, unless there are none available.
AprioriWeight float64 `long:"aprioriweight" description:"Weight of the a priori probability in success probability estimation. Valid values are in [0, 1]."`
// PenaltyHalfLife defines after how much time a penalized node or
// channel is back at 50% probability.
PenaltyHalfLife time.Duration `long:"penaltyhalflife" description:"Defines the duration after which a penalized node or channel is back at 50% probability"`
// AttemptCost is the virtual cost in path finding weight units of
// executing a payment attempt that fails. It is used to trade off
// potentially better routes against their probability of succeeding.
AttemptCost btcutil.Amount `long:"attemptcost" description:"The (virtual) cost in sats of a failed payment attempt"`
// MaxMcHistory defines the maximum number of payment results that
// are held on disk by mission control.
MaxMcHistory int `long:"maxmchistory" description:"the maximum number of payment results that are held on disk by mission control"`
} | .history/lnrpc/routerrpc/config_20200302203542.go | 0.675336 | 0.468912 | config_20200302203542.go | starcoder |
package lib
import (
"errors"
"fmt"
"strconv"
"strings"
"github.com/dunelang/dune"
)
func init() {
dune.RegisterLib(Assert, `
declare namespace assert {
export function contains(search: string, value: string): void
export function equal(a: any, b: any, errorMessage?: string): void
export function isTrue(a: boolean): void
export function isNull(a: any): void
export function isNotNull(a: any): void
export function exception(msg: string, func: Function): void
export function int(a: any, msg: string): number
export function float(a: any, msg: string): number
export function string(a: any, msg: string): string
export function bool(a: any, msg: string): boolean
export function object(a: any, msg: string): any
}
`)
}
var Assert = []dune.NativeFunction{
{
Name: "assert.contains",
Arguments: 2,
Function: func(this dune.Value, args []dune.Value, vm *dune.VM) (dune.Value, error) {
if err := ValidateArgs(args, dune.String, dune.String); err != nil {
return dune.NullValue, err
}
a := args[0].String()
b := args[1].String()
if !strings.Contains(b, a) {
return dune.NullValue, fmt.Errorf("'%s' not contained in '%s'", a, b)
}
return dune.NullValue, nil
},
},
{
Name: "assert.equal",
Arguments: -1,
Function: func(this dune.Value, args []dune.Value, vm *dune.VM) (dune.Value, error) {
var msg string
ln := len(args)
switch ln {
case 2:
case 3:
a3 := args[2]
if a3.Type != dune.String {
return dune.NullValue, fmt.Errorf("expected error message to be a string, got %v", a3.TypeName())
}
msg = a3.String()
default:
return dune.NullValue, fmt.Errorf("expected 2 or 3 args, got %d", ln)
}
a := args[0]
b := args[1]
if !areEqual(a, b) {
if msg != "" {
return dune.NullValue, errors.New(msg)
}
return dune.NullValue, fmt.Errorf("values are different: %v, %v", serializeOrErr(a), serializeOrErr(b))
}
return dune.NullValue, nil
},
},
{
Name: "assert.isTrue",
Arguments: 1,
Function: func(this dune.Value, args []dune.Value, vm *dune.VM) (dune.Value, error) {
a := args[0]
switch a.Type {
case dune.Bool:
if a.ToBool() {
return dune.TrueValue, nil
}
}
return dune.FalseValue, nil
},
},
{
Name: "assert.isNull",
Arguments: 1,
Function: func(this dune.Value, args []dune.Value, vm *dune.VM) (dune.Value, error) {
a := args[0]
switch a.Type {
case dune.Null, dune.Undefined:
default:
return dune.NullValue, fmt.Errorf("expected null, got %v", a)
}
return dune.NullValue, nil
},
},
{
Name: "assert.isNotNull",
Arguments: 1,
Function: func(this dune.Value, args []dune.Value, vm *dune.VM) (dune.Value, error) {
a := args[0]
switch a.Type {
case dune.Null, dune.Undefined:
return dune.NullValue, fmt.Errorf("%v is null", a)
default:
}
return dune.NullValue, nil
},
},
{
Name: "assert.exception",
Arguments: 2,
Function: func(this dune.Value, args []dune.Value, vm *dune.VM) (dune.Value, error) {
a := args[0]
if a.Type != dune.String {
return dune.NullValue, fmt.Errorf("expected argument 1 to be a string, got %s", a.TypeName())
}
expected := a.String()
v := args[1]
err := runFuncOrClosure(vm, v)
if err == nil {
return dune.NullValue, fmt.Errorf("expected exception: %s", expected)
}
if expected != "" && !strings.Contains(err.Error(), expected) {
return dune.NullValue, fmt.Errorf("invalid exception, does not contain '%s': %s", expected, err.Error())
}
// clear the error
vm.Error = nil
return dune.NullValue, nil
},
},
{
Name: "assert.int",
Arguments: 2,
Function: func(this dune.Value, args []dune.Value, vm *dune.VM) (dune.Value, error) {
if args[1].Type != dune.String {
return dune.NullValue, fmt.Errorf("expected argument 2 to be a string, got %s", args[1].TypeName())
}
a := args[0]
msg := args[1].String()
var v int64
var err error
switch a.Type {
case dune.Int:
v = a.ToInt()
case dune.String:
v, err = strconv.ParseInt(a.String(), 0, 64)
if err != nil {
return dune.NullValue, fmt.Errorf(msg, showAssertMessage("%v is not int", a.TypeName()))
}
default:
return dune.NullValue, fmt.Errorf(msg)
}
return dune.NewInt64(v), nil
},
},
{
Name: "assert.float",
Arguments: 2,
Function: func(this dune.Value, args []dune.Value, vm *dune.VM) (dune.Value, error) {
if args[1].Type != dune.String {
return dune.NullValue, fmt.Errorf("expected argument 2 to be a string, got %s", args[1].TypeName())
}
a := args[0]
msg := args[1].String()
var v int64
var err error
switch a.Type {
case dune.Int:
v = a.ToInt()
case dune.String:
v, err = strconv.ParseInt(a.String(), 0, 64)
if err != nil {
return dune.NullValue, fmt.Errorf(msg, showAssertMessage("%v is not float", a.TypeName()))
}
default:
return dune.NullValue, fmt.Errorf(msg, showAssertMessage("%v is not float", a.TypeName()))
}
return dune.NewInt64(v), nil
},
},
{
Name: "assert.string",
Arguments: 2,
Function: func(this dune.Value, args []dune.Value, vm *dune.VM) (dune.Value, error) {
if args[1].Type != dune.String {
return dune.NullValue, fmt.Errorf("expected argument 2 to be a string, got %s", args[1].TypeName())
}
a := args[0]
msg := args[1].String()
var v string
switch a.Type {
case dune.Int, dune.Float, dune.Bool, dune.String:
v = a.String()
default:
return dune.NullValue, fmt.Errorf(msg, showAssertMessage("%v is not a string", a.TypeName()))
}
return dune.NewString(v), nil
},
},
{
Name: "assert.bool",
Arguments: 2,
Function: func(this dune.Value, args []dune.Value, vm *dune.VM) (dune.Value, error) {
if args[1].Type != dune.String {
return dune.NullValue, fmt.Errorf("expected argument 2 to be a string, got %s", args[1].TypeName())
}
a := args[0]
msg := args[1].String()
var v dune.Value
switch a.Type {
case dune.Bool:
v = a
case dune.Int:
switch a.ToInt() {
case 0:
v = dune.FalseValue
case 1:
v = dune.TrueValue
default:
return dune.NullValue, fmt.Errorf(msg, showAssertMessage("%v is not bool", a.TypeName()))
}
case dune.String:
s := a.String()
s = strings.Trim(s, " ")
switch s {
case "true", "1":
v = dune.TrueValue
case "false", "0":
v = dune.FalseValue
default:
return dune.NullValue, fmt.Errorf(msg, showAssertMessage("%v is not bool", a.TypeName()))
}
default:
return dune.NullValue, fmt.Errorf(msg, showAssertMessage("%v is not bool", a.TypeName()))
}
return v, nil
},
},
{
Name: "assert.object",
Arguments: 2,
Function: func(this dune.Value, args []dune.Value, vm *dune.VM) (dune.Value, error) {
if args[1].Type != dune.String {
return dune.NullValue, fmt.Errorf("expected argument 2 to be a string, got %s", args[1].TypeName())
}
a := args[0]
msg := args[1].String()
switch a.Type {
case dune.Map:
default:
return dune.NullValue, fmt.Errorf(msg, showAssertMessage("%v is not an object", a.TypeName()))
}
return a, nil
},
},
}
func showAssertMessage(format string, args ...interface{}) string {
if !strings.Contains(format, "%s") && !strings.Contains(format, "%v") {
format += ": %s"
}
return fmt.Sprintf(format, args...)
}
func areEqual(a, b dune.Value) bool {
if a.Equals(b) {
return true
}
if a.Type == dune.Array && b.Type == dune.Array {
aa := a.ToArrayObject().Array
bb := b.ToArrayObject().Array
if len(aa) != len(bb) {
return false
}
for i, v := range aa {
if !bb[i].Equals(v) {
return false
}
}
return true
}
return false
}
func serializeOrErr(v dune.Value) string {
s, err := serialize(v)
if err != nil {
return err.Error()
}
return s
} | lib/assert.go | 0.590779 | 0.594021 | assert.go | starcoder |
package plantest
import (
"context"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/influxdata/flux/plan"
"github.com/influxdata/flux/stdlib/universe"
)
// SimpleRule is a simple rule whose pattern matches any plan node and
// just stores the NodeIDs of nodes it has visited in SeenNodes.
type SimpleRule struct {
SeenNodes []plan.NodeID
}
func (sr *SimpleRule) Pattern() plan.Pattern {
return plan.Any()
}
func (sr *SimpleRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) {
for _, nid := range sr.SeenNodes {
if nid == node.ID() {
return node, false, nil
}
}
sr.SeenNodes = append(sr.SeenNodes, node.ID())
return node, false, nil
}
func (sr *SimpleRule) Name() string {
return "simple"
}
// FunctionRule is a simple rule intended to invoke a Rewrite function.
type FunctionRule struct {
RewriteFn func(ctx context.Context, node plan.Node) (plan.Node, bool, error)
}
func (fr *FunctionRule) Name() string {
return "function"
}
func (fr *FunctionRule) Pattern() plan.Pattern {
return plan.Any()
}
func (fr *FunctionRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) {
return fr.RewriteFn(ctx, node)
}
// SmashPlanRule adds an `Intruder` as predecessor of the given `Node` without
// marking it as successor of it. It breaks the integrity of the plan.
// If `Kind` is specified, it takes precedence over `Node`, and the rule will use it
// to match.
type SmashPlanRule struct {
Node plan.Node
Intruder plan.Node
Kind plan.ProcedureKind
}
func (SmashPlanRule) Name() string {
return "SmashPlanRule"
}
func (spp SmashPlanRule) Pattern() plan.Pattern {
var k plan.ProcedureKind
if len(spp.Kind) > 0 {
k = spp.Kind
} else {
k = spp.Node.Kind()
}
return plan.Pat(k, plan.Any())
}
func (spp SmashPlanRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) {
var changed bool
if len(spp.Kind) > 0 || node == spp.Node {
node.AddPredecessors(spp.Intruder)
changed = true
}
// it is not necessary to return a copy of the node, because the rule changes the number
// of predecessors and it won't be re-triggered again.
return node, changed, nil
}
// CreateCycleRule creates a cycle between the given `Node` and its predecessor.
// It creates exactly one cycle. After the rule is triggered once, it won't have any effect later.
// This rule breaks the integrity of the plan.
// If `Kind` is specified, it takes precedence over `Node`, and the rule will use it
// to match.
type CreateCycleRule struct {
Node plan.Node
Kind plan.ProcedureKind
}
func (CreateCycleRule) Name() string {
return "CreateCycleRule"
}
func (ccr CreateCycleRule) Pattern() plan.Pattern {
var k plan.ProcedureKind
if len(ccr.Kind) > 0 {
k = ccr.Kind
} else {
k = ccr.Node.Kind()
}
return plan.Pat(k, plan.Any())
}
func (ccr CreateCycleRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) {
var changed bool
if len(ccr.Kind) > 0 || node == ccr.Node {
node.Predecessors()[0].AddPredecessors(node)
node.AddSuccessors(node.Predecessors()[0])
changed = true
}
// just return a copy of the node, otherwise the rule will be triggered an infinite number of times
// (it doesn't change the number of predecessors, indeed).
return node.ShallowCopy(), changed, nil
}
// MultiRoot matches a set of plan nodes at the root and stores the NodeIDs of
// nodes it has visited in SeenNodes.
type MultiRootRule struct {
SeenNodes []plan.NodeID
}
func (sr *MultiRootRule) Pattern() plan.Pattern {
return plan.OneOf(
[]plan.ProcedureKind{
universe.MinKind,
universe.MaxKind,
universe.MeanKind,
},
plan.Any())
}
func (sr *MultiRootRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) {
sr.SeenNodes = append(sr.SeenNodes, node.ID())
return node, false, nil
}
func (sr *MultiRootRule) Name() string {
return "multiroot"
}
// RuleTestCase allows for concise creation of test cases that exercise rules
type RuleTestCase struct {
Name string
Context context.Context
Rules []plan.Rule
Before *PlanSpec
After *PlanSpec
NoChange bool
ValidateError error
}
// PhysicalRuleTestHelper will run a rule test case.
func PhysicalRuleTestHelper(t *testing.T, tc *RuleTestCase) {
t.Helper()
before := CreatePlanSpec(tc.Before)
var after *plan.Spec
if tc.NoChange || tc.ValidateError != nil {
after = CreatePlanSpec(tc.Before.Copy())
} else {
after = CreatePlanSpec(tc.After)
}
opts := []plan.PhysicalOption{
plan.OnlyPhysicalRules(tc.Rules...),
}
if tc.ValidateError == nil {
// Disable validation so that we can avoid having to push a range into every from
opts = append(opts, plan.DisableValidation())
}
physicalPlanner := plan.NewPhysicalPlanner(opts...)
ctx := tc.Context
if ctx == nil {
ctx = context.Background()
}
pp, err := physicalPlanner.Plan(ctx, before)
if err != nil {
if tc.ValidateError != nil {
if got, want := err, tc.ValidateError; !cmp.Equal(want, got) {
t.Fatalf("unexpected planner error -want/+got:\n%s", cmp.Diff(want, got))
}
return
}
t.Fatal(err)
} else if tc.ValidateError != nil {
t.Fatal("expected planner error")
}
type testAttrs struct {
ID plan.NodeID
Spec plan.PhysicalProcedureSpec
}
want := make([]testAttrs, 0)
after.BottomUpWalk(func(node plan.Node) error {
want = append(want, testAttrs{
ID: node.ID(),
Spec: node.ProcedureSpec().(plan.PhysicalProcedureSpec),
})
return nil
})
got := make([]testAttrs, 0)
pp.BottomUpWalk(func(node plan.Node) error {
got = append(got, testAttrs{
ID: node.ID(),
Spec: node.ProcedureSpec().(plan.PhysicalProcedureSpec),
})
return nil
})
if !cmp.Equal(want, got, CmpOptions...) {
t.Errorf("transformed plan not as expected, -want/+got:\n%v",
cmp.Diff(want, got, CmpOptions...))
}
}
// LogicalRuleTestHelper will run a rule test case.
func LogicalRuleTestHelper(t *testing.T, tc *RuleTestCase) {
t.Helper()
before := CreatePlanSpec(tc.Before)
var after *plan.Spec
if tc.NoChange {
after = CreatePlanSpec(tc.Before.Copy())
} else {
after = CreatePlanSpec(tc.After)
}
logicalPlanner := plan.NewLogicalPlanner(
plan.OnlyLogicalRules(tc.Rules...),
)
ctx := tc.Context
if ctx == nil {
ctx = context.Background()
}
pp, err := logicalPlanner.Plan(ctx, before)
if err != nil {
t.Fatal(err)
}
type testAttrs struct {
ID plan.NodeID
Spec plan.ProcedureSpec
}
want := make([]testAttrs, 0)
after.BottomUpWalk(func(node plan.Node) error {
want = append(want, testAttrs{
ID: node.ID(),
Spec: node.ProcedureSpec(),
})
return nil
})
got := make([]testAttrs, 0)
pp.BottomUpWalk(func(node plan.Node) error {
got = append(got, testAttrs{
ID: node.ID(),
Spec: node.ProcedureSpec(),
})
return nil
})
if !cmp.Equal(want, got, CmpOptions...) {
t.Errorf("transformed plan not as expected, -want/+got:\n%v",
cmp.Diff(want, got, CmpOptions...))
}
} | plan/plantest/rules.go | 0.696268 | 0.418697 | rules.go | starcoder |
package schema
import (
"github.com/elastic/beats/libbeat/common"
)
// Schema describes how a map[string]interface{} object can be parsed and converted into
// an event. The conversions can be described using an (optionally nested) common.MapStr
// that contains Conv objects.
type Schema map[string]Mapper
// Mapper interface represents a valid type to be used in a schema.
type Mapper interface {
// Map applies the Mapper conversion on the data and adds the result
// to the event on the key.
Map(key string, event common.MapStr, data map[string]interface{}) *Errors
HasKey(key string) bool
}
// A Conv object represents a conversion mechanism from the data map to the event map.
type Conv struct {
Func Converter // Convertor function
Key string // The key in the data map
Optional bool // Whether to log errors if the key is not found
}
// Convertor function type
type Converter func(key string, data map[string]interface{}) (interface{}, error)
// Map applies the conversion on the data and adds the result
// to the event on the key.
func (conv Conv) Map(key string, event common.MapStr, data map[string]interface{}) *Errors {
value, err := conv.Func(conv.Key, data)
if err != nil {
err := NewError(key, err.Error())
if conv.Optional {
err.SetType(OptionalType)
}
errs := NewErrors()
errs.AddError(err)
return errs
} else {
event[key] = value
}
return nil
}
func (conv Conv) HasKey(key string) bool {
return conv.Key == key
}
// implements Mapper interface for structure
type Object map[string]Mapper
func (o Object) Map(key string, event common.MapStr, data map[string]interface{}) *Errors {
subEvent := common.MapStr{}
errs := applySchemaToEvent(subEvent, data, o)
event[key] = subEvent
return errs
}
func (o Object) HasKey(key string) bool {
return hasKey(key, o)
}
// ApplyTo adds the fields extracted from data, converted using the schema, to the
// event map.
func (s Schema) ApplyTo(event common.MapStr, data map[string]interface{}) (common.MapStr, *Errors) {
errors := applySchemaToEvent(event, data, s)
errors.Log()
return event, errors
}
// Apply converts the fields extracted from data, using the schema, into a new map and reports back the errors.
func (s Schema) Apply(data map[string]interface{}) (common.MapStr, *Errors) {
return s.ApplyTo(common.MapStr{}, data)
}
// HasKey checks if the key is part of the schema
func (s Schema) HasKey(key string) bool {
return hasKey(key, s)
}
func hasKey(key string, mappers map[string]Mapper) bool {
for _, mapper := range mappers {
if mapper.HasKey(key) {
return true
}
}
return false
}
func applySchemaToEvent(event common.MapStr, data map[string]interface{}, conversions map[string]Mapper) *Errors {
errs := NewErrors()
for key, mapper := range conversions {
errors := mapper.Map(key, event, data)
errs.AddErrors(errors)
}
return errs
}
// SchemaOption is for adding optional parameters to the conversion
// functions
type SchemaOption func(c Conv) Conv
// The optional flag suppresses the error message in case the key
// doesn't exist or results in an error.
func Optional(c Conv) Conv {
c.Optional = true
return c
}
// setOptions adds the optional flags to the Conv object
func SetOptions(c Conv, opts []SchemaOption) Conv {
for _, opt := range opts {
c = opt(c)
}
return c
} | vendor/github.com/elastic/beats/libbeat/common/schema/schema.go | 0.85318 | 0.633906 | schema.go | starcoder |
package schema
import (
errors_ "errors"
"fmt"
"strings"
"github.com/semi-technologies/weaviate/entities/models"
)
type schemaProperties struct {
Schema *models.Schema
}
// WeaviateSchema represents the used schema's
type WeaviateSchema struct {
ActionSchema schemaProperties
ThingSchema schemaProperties
}
const (
// ErrorNoSuchClass message
ErrorNoSuchClass string = "no such class with name '%s' found in the schema. Check your schema files for which classes are available"
// ErrorNoSuchProperty message
ErrorNoSuchProperty string = "no such prop with name '%s' found in class '%s' in the schema. Check your schema files for which properties in this class are available"
// ErrorNoSuchDatatype message
ErrorNoSuchDatatype string = "given value-DataType does not exist."
// ErrorInvalidRefType message
ErrorInvalidRefType string = "given ref type is not valid"
)
// GetClassByName returns the class by its name
func GetClassByName(s *models.Schema, className string) (*models.Class, error) {
if s == nil {
return nil, fmt.Errorf(ErrorNoSuchClass, className)
}
// For each class
for _, class := range s.Classes {
// Check if the name of the class is the given name, that's the class we need
if class.Class == className {
return class, nil
}
}
return nil, fmt.Errorf(ErrorNoSuchClass, className)
}
// GetPropertyByName returns the class by its name
func GetPropertyByName(c *models.Class, propName string) (*models.Property, error) {
// For each class-property
for _, prop := range c.Properties {
// Check if the name of the property is the given name, that's the property we need
if prop.Name == strings.Split(propName, ".")[0] {
return prop, nil
}
}
return nil, fmt.Errorf(ErrorNoSuchProperty, propName, c.Class)
}
// GetPropertyDataType checks whether the given string is a valid data type
func GetPropertyDataType(class *models.Class, propertyName string) (*DataType, error) {
// Get the class-property
prop, err := GetPropertyByName(class, propertyName)
if err != nil {
return nil, err
}
// Init the return value
var returnDataType DataType
// For each data type
for _, dataType := range prop.DataType {
if len(dataType) == 0 {
return nil, fmt.Errorf("invalid-dataType")
}
// Get the first letter to see if it is a capital
firstLetter := string(dataType[0])
if strings.ToUpper(firstLetter) == firstLetter {
returnDataType = DataTypeCRef
} else {
// Get the value-data type (non-cref), return error if there is one, otherwise assign it to return data type
valueDataType, err := GetValueDataTypeFromString(dataType)
if err != nil {
return nil, err
}
returnDataType = *valueDataType
}
}
return &returnDataType, nil
}
// GetValueDataTypeFromString checks whether the given string is a valid data type
func GetValueDataTypeFromString(dt string) (*DataType, error) {
var returnDataType DataType
if IsValidValueDataType(dt) {
if dt == string(DataTypeBoolean) {
returnDataType = DataTypeBoolean
} else if dt == string(DataTypeInt) {
returnDataType = DataTypeInt
} else if dt == string(DataTypeDate) {
returnDataType = DataTypeDate
} else if dt == string(DataTypeNumber) {
returnDataType = DataTypeNumber
} else if dt == string(DataTypeString) {
returnDataType = DataTypeString
} else if dt == string(DataTypeText) {
returnDataType = DataTypeText
} else if dt == string(DataTypeGeoCoordinates) {
returnDataType = DataTypeGeoCoordinates
} else if dt == string(DataTypePhoneNumber) {
returnDataType = DataTypePhoneNumber
} else if dt == string(DataTypeBlob) {
returnDataType = DataTypeBlob
} else if dt == string(DataTypeStringArray) {
returnDataType = DataTypeStringArray
} else if dt == string(DataTypeTextArray) {
returnDataType = DataTypeTextArray
} else if dt == string(DataTypeIntArray) {
returnDataType = DataTypeIntArray
} else if dt == string(DataTypeNumberArray) {
returnDataType = DataTypeNumberArray
}
} else {
return nil, errors_.New(ErrorNoSuchDatatype)
}
return &returnDataType, nil
}
// IsValidValueDataType checks whether the given string is a valid data type
func IsValidValueDataType(dt string) bool {
switch dt {
case
string(DataTypeString),
string(DataTypeText),
string(DataTypeInt),
string(DataTypeNumber),
string(DataTypeBoolean),
string(DataTypeDate),
string(DataTypeGeoCoordinates),
string(DataTypePhoneNumber),
string(DataTypeBlob),
string(DataTypeStringArray),
string(DataTypeTextArray),
string(DataTypeIntArray),
string(DataTypeNumberArray):
return true
}
return false
}
func IsRefDataType(dt []string) bool {
firstLetter := string(dt[0][0])
return strings.ToUpper(firstLetter) == firstLetter
}
func IsBlobDataType(dt []string) bool {
for i := range dt {
if dt[i] == string(DataTypeBlob) {
return true
}
}
return false
}
func IsArrayDataType(dt []string) bool {
for i := range dt {
switch DataType(dt[i]) {
case DataTypeStringArray, DataTypeTextArray, DataTypeIntArray, DataTypeNumberArray:
return true
}
}
return false
} | entities/schema/backward_compat.go | 0.673943 | 0.416322 | backward_compat.go | starcoder |
package tsm1
import (
"github.com/influxdata/influxdb/v2/tsdb"
)
// ReadFloatBlockAt returns the float values corresponding to the given index entry.
func (t *TSMReader) ReadFloatBlockAt(entry *IndexEntry, vals *[]FloatValue) ([]FloatValue, error) {
t.mu.RLock()
v, err := t.accessor.readFloatBlock(entry, vals)
t.mu.RUnlock()
return v, err
}
// ReadFloatArrayBlockAt fills vals with the float values corresponding to the given index entry.
func (t *TSMReader) ReadFloatArrayBlockAt(entry *IndexEntry, vals *tsdb.FloatArray) error {
t.mu.RLock()
err := t.accessor.readFloatArrayBlock(entry, vals)
t.mu.RUnlock()
return err
}
// ReadIntegerBlockAt returns the integer values corresponding to the given index entry.
func (t *TSMReader) ReadIntegerBlockAt(entry *IndexEntry, vals *[]IntegerValue) ([]IntegerValue, error) {
t.mu.RLock()
v, err := t.accessor.readIntegerBlock(entry, vals)
t.mu.RUnlock()
return v, err
}
// ReadIntegerArrayBlockAt fills vals with the integer values corresponding to the given index entry.
func (t *TSMReader) ReadIntegerArrayBlockAt(entry *IndexEntry, vals *tsdb.IntegerArray) error {
t.mu.RLock()
err := t.accessor.readIntegerArrayBlock(entry, vals)
t.mu.RUnlock()
return err
}
// ReadUnsignedBlockAt returns the unsigned values corresponding to the given index entry.
func (t *TSMReader) ReadUnsignedBlockAt(entry *IndexEntry, vals *[]UnsignedValue) ([]UnsignedValue, error) {
t.mu.RLock()
v, err := t.accessor.readUnsignedBlock(entry, vals)
t.mu.RUnlock()
return v, err
}
// ReadUnsignedArrayBlockAt fills vals with the unsigned values corresponding to the given index entry.
func (t *TSMReader) ReadUnsignedArrayBlockAt(entry *IndexEntry, vals *tsdb.UnsignedArray) error {
t.mu.RLock()
err := t.accessor.readUnsignedArrayBlock(entry, vals)
t.mu.RUnlock()
return err
}
// ReadStringBlockAt returns the string values corresponding to the given index entry.
func (t *TSMReader) ReadStringBlockAt(entry *IndexEntry, vals *[]StringValue) ([]StringValue, error) {
t.mu.RLock()
v, err := t.accessor.readStringBlock(entry, vals)
t.mu.RUnlock()
return v, err
}
// ReadStringArrayBlockAt fills vals with the string values corresponding to the given index entry.
func (t *TSMReader) ReadStringArrayBlockAt(entry *IndexEntry, vals *tsdb.StringArray) error {
t.mu.RLock()
err := t.accessor.readStringArrayBlock(entry, vals)
t.mu.RUnlock()
return err
}
// ReadBooleanBlockAt returns the boolean values corresponding to the given index entry.
func (t *TSMReader) ReadBooleanBlockAt(entry *IndexEntry, vals *[]BooleanValue) ([]BooleanValue, error) {
t.mu.RLock()
v, err := t.accessor.readBooleanBlock(entry, vals)
t.mu.RUnlock()
return v, err
}
// ReadBooleanArrayBlockAt fills vals with the boolean values corresponding to the given index entry.
func (t *TSMReader) ReadBooleanArrayBlockAt(entry *IndexEntry, vals *tsdb.BooleanArray) error {
t.mu.RLock()
err := t.accessor.readBooleanArrayBlock(entry, vals)
t.mu.RUnlock()
return err
}
// blockAccessor abstracts a method of accessing blocks from a
// TSM file.
type blockAccessor interface {
init() (*indirectIndex, error)
read(key []byte, timestamp int64) ([]Value, error)
readAll(key []byte) ([]Value, error)
readBlock(entry *IndexEntry, values []Value) ([]Value, error)
readFloatBlock(entry *IndexEntry, values *[]FloatValue) ([]FloatValue, error)
readFloatArrayBlock(entry *IndexEntry, values *tsdb.FloatArray) error
readIntegerBlock(entry *IndexEntry, values *[]IntegerValue) ([]IntegerValue, error)
readIntegerArrayBlock(entry *IndexEntry, values *tsdb.IntegerArray) error
readUnsignedBlock(entry *IndexEntry, values *[]UnsignedValue) ([]UnsignedValue, error)
readUnsignedArrayBlock(entry *IndexEntry, values *tsdb.UnsignedArray) error
readStringBlock(entry *IndexEntry, values *[]StringValue) ([]StringValue, error)
readStringArrayBlock(entry *IndexEntry, values *tsdb.StringArray) error
readBooleanBlock(entry *IndexEntry, values *[]BooleanValue) ([]BooleanValue, error)
readBooleanArrayBlock(entry *IndexEntry, values *tsdb.BooleanArray) error
readBytes(entry *IndexEntry, buf []byte) (uint32, []byte, error)
rename(path string) error
path() string
close() error
free() error
}
func (m *mmapAccessor) readFloatBlock(entry *IndexEntry, values *[]FloatValue) ([]FloatValue, error) {
m.incAccess()
m.mu.RLock()
if int64(len(m.b)) < entry.Offset+int64(entry.Size) {
m.mu.RUnlock()
return nil, ErrTSMClosed
}
a, err := DecodeFloatBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values)
m.mu.RUnlock()
if err != nil {
return nil, err
}
return a, nil
}
func (m *mmapAccessor) readFloatArrayBlock(entry *IndexEntry, values *tsdb.FloatArray) error {
m.incAccess()
m.mu.RLock()
if int64(len(m.b)) < entry.Offset+int64(entry.Size) {
m.mu.RUnlock()
return ErrTSMClosed
}
err := DecodeFloatArrayBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values)
m.mu.RUnlock()
return err
}
func (m *mmapAccessor) readIntegerBlock(entry *IndexEntry, values *[]IntegerValue) ([]IntegerValue, error) {
m.incAccess()
m.mu.RLock()
if int64(len(m.b)) < entry.Offset+int64(entry.Size) {
m.mu.RUnlock()
return nil, ErrTSMClosed
}
a, err := DecodeIntegerBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values)
m.mu.RUnlock()
if err != nil {
return nil, err
}
return a, nil
}
func (m *mmapAccessor) readIntegerArrayBlock(entry *IndexEntry, values *tsdb.IntegerArray) error {
m.incAccess()
m.mu.RLock()
if int64(len(m.b)) < entry.Offset+int64(entry.Size) {
m.mu.RUnlock()
return ErrTSMClosed
}
err := DecodeIntegerArrayBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values)
m.mu.RUnlock()
return err
}
func (m *mmapAccessor) readUnsignedBlock(entry *IndexEntry, values *[]UnsignedValue) ([]UnsignedValue, error) {
m.incAccess()
m.mu.RLock()
if int64(len(m.b)) < entry.Offset+int64(entry.Size) {
m.mu.RUnlock()
return nil, ErrTSMClosed
}
a, err := DecodeUnsignedBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values)
m.mu.RUnlock()
if err != nil {
return nil, err
}
return a, nil
}
func (m *mmapAccessor) readUnsignedArrayBlock(entry *IndexEntry, values *tsdb.UnsignedArray) error {
m.incAccess()
m.mu.RLock()
if int64(len(m.b)) < entry.Offset+int64(entry.Size) {
m.mu.RUnlock()
return ErrTSMClosed
}
err := DecodeUnsignedArrayBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values)
m.mu.RUnlock()
return err
}
func (m *mmapAccessor) readStringBlock(entry *IndexEntry, values *[]StringValue) ([]StringValue, error) {
m.incAccess()
m.mu.RLock()
if int64(len(m.b)) < entry.Offset+int64(entry.Size) {
m.mu.RUnlock()
return nil, ErrTSMClosed
}
a, err := DecodeStringBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values)
m.mu.RUnlock()
if err != nil {
return nil, err
}
return a, nil
}
func (m *mmapAccessor) readStringArrayBlock(entry *IndexEntry, values *tsdb.StringArray) error {
m.incAccess()
m.mu.RLock()
if int64(len(m.b)) < entry.Offset+int64(entry.Size) {
m.mu.RUnlock()
return ErrTSMClosed
}
err := DecodeStringArrayBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values)
m.mu.RUnlock()
return err
}
func (m *mmapAccessor) readBooleanBlock(entry *IndexEntry, values *[]BooleanValue) ([]BooleanValue, error) {
m.incAccess()
m.mu.RLock()
if int64(len(m.b)) < entry.Offset+int64(entry.Size) {
m.mu.RUnlock()
return nil, ErrTSMClosed
}
a, err := DecodeBooleanBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values)
m.mu.RUnlock()
if err != nil {
return nil, err
}
return a, nil
}
func (m *mmapAccessor) readBooleanArrayBlock(entry *IndexEntry, values *tsdb.BooleanArray) error {
m.incAccess()
m.mu.RLock()
if int64(len(m.b)) < entry.Offset+int64(entry.Size) {
m.mu.RUnlock()
return ErrTSMClosed
}
err := DecodeBooleanArrayBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values)
m.mu.RUnlock()
return err
} | tsdb/engine/tsm1/reader.gen.go | 0.747339 | 0.477676 | reader.gen.go | starcoder |
package condition
import (
"fmt"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/types"
jmespath "github.com/jmespath/go-jmespath"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeJMESPath] = TypeSpec{
constructor: NewJMESPath,
Description: `
Parses a message part as a JSON blob and attempts to apply a JMESPath expression
to it, expecting a boolean response. If the response is true the condition
passes, otherwise it does not. Please refer to the
[JMESPath website](http://jmespath.org/) for information and tutorials regarding
the syntax of expressions.
For example, with the following config:
` + "``` yaml" + `
jmespath:
part: 0
query: a == 'foo'
` + "```" + `
If the initial jmespaths of part 0 were:
` + "``` json" + `
{
"a": "foo"
}
` + "```" + `
Then the condition would pass.
JMESPath is traditionally used for mutating JSON, in order to do this please
instead use the ` + "[`jmespath`](/docs/components/processors/jmespath)" + `
processor.`,
}
}
//------------------------------------------------------------------------------
// JMESPathConfig is a configuration struct containing fields for the jmespath
// condition.
type JMESPathConfig struct {
Part int `json:"part" yaml:"part"`
Query string `json:"query" yaml:"query"`
}
// NewJMESPathConfig returns a JMESPathConfig with default values.
func NewJMESPathConfig() JMESPathConfig {
return JMESPathConfig{
Part: 0,
Query: "",
}
}
//------------------------------------------------------------------------------
// JMESPath is a condition that checks message against a jmespath query.
type JMESPath struct {
stats metrics.Type
log log.Modular
part int
query *jmespath.JMESPath
mCount metrics.StatCounter
mTrue metrics.StatCounter
mFalse metrics.StatCounter
mErrJSONP metrics.StatCounter
mErrJMES metrics.StatCounter
mErr metrics.StatCounter
}
// NewJMESPath returns a JMESPath condition.
func NewJMESPath(
conf Config, mgr types.Manager, log log.Modular, stats metrics.Type,
) (Type, error) {
query, err := jmespath.Compile(conf.JMESPath.Query)
if err != nil {
return nil, fmt.Errorf("failed to compile JMESPath query: %v", err)
}
return &JMESPath{
stats: stats,
log: log,
part: conf.JMESPath.Part,
query: query,
mCount: stats.GetCounter("count"),
mTrue: stats.GetCounter("true"),
mFalse: stats.GetCounter("false"),
mErrJSONP: stats.GetCounter("error_json_parse"),
mErrJMES: stats.GetCounter("error_jmespath_search"),
mErr: stats.GetCounter("error"),
}, nil
}
//------------------------------------------------------------------------------
func safeSearch(part interface{}, j *jmespath.JMESPath) (res interface{}, err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("jmespath panic: %v", r)
}
}()
return j.Search(part)
}
// Check attempts to check a message part against a configured condition.
func (c *JMESPath) Check(msg types.Message) bool {
c.mCount.Incr(1)
index := c.part
if index < 0 {
index = msg.Len() + index
}
if index < 0 || index >= msg.Len() {
c.mFalse.Incr(1)
return false
}
jsonPart, err := msg.Get(index).JSON()
if err != nil {
c.log.Debugf("Failed to parse part into json: %v\n", err)
c.mErrJSONP.Incr(1)
c.mErr.Incr(1)
c.mFalse.Incr(1)
return false
}
var result interface{}
if result, err = safeSearch(jsonPart, c.query); err != nil {
c.log.Debugf("Failed to search json: %v\n", err)
c.mErrJMES.Incr(1)
c.mErr.Incr(1)
c.mFalse.Incr(1)
return false
}
resultBool, _ := result.(bool)
if resultBool {
c.mTrue.Incr(1)
} else {
c.mFalse.Incr(1)
}
return resultBool
}
//------------------------------------------------------------------------------ | lib/condition/jmespath.go | 0.66454 | 0.788787 | jmespath.go | starcoder |
package channels
import (
logging "github.com/ipfs/go-log/v2"
cbg "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/go-statemachine/fsm"
datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/filecoin-project/go-data-transfer/channels/internal"
)
var log = logging.Logger("data-transfer")
var transferringStates = []fsm.StateKey{
datatransfer.Requested,
datatransfer.Ongoing,
datatransfer.InitiatorPaused,
datatransfer.ResponderPaused,
datatransfer.BothPaused,
datatransfer.ResponderCompleted,
datatransfer.ResponderFinalizing,
}
// ChannelEvents describe the events taht can
var ChannelEvents = fsm.Events{
// Open a channel
fsm.Event(datatransfer.Open).FromAny().To(datatransfer.Requested).Action(func(chst *internal.ChannelState) error {
chst.AddLog("")
return nil
}),
// Remote peer has accepted the Open channel request
fsm.Event(datatransfer.Accept).From(datatransfer.Requested).To(datatransfer.Ongoing).Action(func(chst *internal.ChannelState) error {
chst.AddLog("")
return nil
}),
fsm.Event(datatransfer.TransferRequestQueued).FromAny().ToJustRecord().Action(func(chst *internal.ChannelState) error {
chst.Message = ""
chst.AddLog("")
return nil
}),
fsm.Event(datatransfer.Restart).FromAny().ToJustRecord().Action(func(chst *internal.ChannelState) error {
chst.Message = ""
chst.AddLog("")
return nil
}),
fsm.Event(datatransfer.Cancel).FromAny().To(datatransfer.Cancelling).Action(func(chst *internal.ChannelState) error {
chst.AddLog("")
return nil
}),
// When a channel is Opened, clear any previous error message.
// (eg if the channel is opened after being restarted due to a connection
// error)
fsm.Event(datatransfer.Opened).FromAny().ToJustRecord().Action(func(chst *internal.ChannelState) error {
chst.Message = ""
chst.AddLog("")
return nil
}),
fsm.Event(datatransfer.DataReceived).FromAny().ToNoChange().
Action(func(chst *internal.ChannelState, rcvdBlocksTotal int64) error {
if rcvdBlocksTotal > chst.ReceivedBlocksTotal {
chst.ReceivedBlocksTotal = rcvdBlocksTotal
}
chst.AddLog("")
return nil
}),
fsm.Event(datatransfer.DataReceivedProgress).FromMany(transferringStates...).ToNoChange().
Action(func(chst *internal.ChannelState, delta uint64) error {
chst.Received += delta
chst.AddLog("received data")
return nil
}),
fsm.Event(datatransfer.DataSent).
FromMany(transferringStates...).ToNoChange().
From(datatransfer.TransferFinished).ToNoChange().
Action(func(chst *internal.ChannelState) error {
chst.AddLog("")
return nil
}),
fsm.Event(datatransfer.DataSentProgress).FromMany(transferringStates...).ToNoChange().
Action(func(chst *internal.ChannelState, delta uint64) error {
chst.Sent += delta
chst.AddLog("sending data")
return nil
}),
fsm.Event(datatransfer.DataQueued).
FromMany(transferringStates...).ToNoChange().
From(datatransfer.TransferFinished).ToNoChange().
Action(func(chst *internal.ChannelState) error {
chst.AddLog("")
return nil
}),
fsm.Event(datatransfer.DataQueuedProgress).FromMany(transferringStates...).ToNoChange().
Action(func(chst *internal.ChannelState, delta uint64) error {
chst.Queued += delta
chst.AddLog("")
return nil
}),
fsm.Event(datatransfer.Disconnected).FromAny().ToNoChange().Action(func(chst *internal.ChannelState, err error) error {
chst.Message = err.Error()
chst.AddLog("data transfer disconnected: %s", chst.Message)
return nil
}),
fsm.Event(datatransfer.SendDataError).FromAny().ToNoChange().Action(func(chst *internal.ChannelState, err error) error {
chst.Message = err.Error()
chst.AddLog("data transfer send error: %s", chst.Message)
return nil
}),
fsm.Event(datatransfer.ReceiveDataError).FromAny().ToNoChange().Action(func(chst *internal.ChannelState, err error) error {
chst.Message = err.Error()
chst.AddLog("data transfer receive error: %s", chst.Message)
return nil
}),
fsm.Event(datatransfer.RequestCancelled).FromAny().ToNoChange().Action(func(chst *internal.ChannelState, err error) error {
chst.Message = err.Error()
chst.AddLog("data transfer request cancelled: %s", chst.Message)
return nil
}),
fsm.Event(datatransfer.Error).FromAny().To(datatransfer.Failing).Action(func(chst *internal.ChannelState, err error) error {
chst.Message = err.Error()
chst.AddLog("data transfer erred: %s", chst.Message)
return nil
}),
fsm.Event(datatransfer.NewVoucher).FromAny().ToNoChange().
Action(func(chst *internal.ChannelState, vtype datatransfer.TypeIdentifier, voucherBytes []byte) error {
chst.Vouchers = append(chst.Vouchers, internal.EncodedVoucher{Type: vtype, Voucher: &cbg.Deferred{Raw: voucherBytes}})
chst.AddLog("got new voucher")
return nil
}),
fsm.Event(datatransfer.NewVoucherResult).FromAny().ToNoChange().
Action(func(chst *internal.ChannelState, vtype datatransfer.TypeIdentifier, voucherResultBytes []byte) error {
chst.VoucherResults = append(chst.VoucherResults,
internal.EncodedVoucherResult{Type: vtype, VoucherResult: &cbg.Deferred{Raw: voucherResultBytes}})
chst.AddLog("got new voucher result")
return nil
}),
fsm.Event(datatransfer.PauseInitiator).
FromMany(datatransfer.Requested, datatransfer.Ongoing).To(datatransfer.InitiatorPaused).
From(datatransfer.ResponderPaused).To(datatransfer.BothPaused).
FromAny().ToJustRecord().Action(func(chst *internal.ChannelState) error {
chst.AddLog("")
return nil
}),
fsm.Event(datatransfer.PauseResponder).
FromMany(datatransfer.Requested, datatransfer.Ongoing).To(datatransfer.ResponderPaused).
From(datatransfer.InitiatorPaused).To(datatransfer.BothPaused).
FromAny().ToJustRecord().Action(func(chst *internal.ChannelState) error {
chst.AddLog("")
return nil
}),
fsm.Event(datatransfer.ResumeInitiator).
From(datatransfer.InitiatorPaused).To(datatransfer.Ongoing).
From(datatransfer.BothPaused).To(datatransfer.ResponderPaused).
FromAny().ToJustRecord().Action(func(chst *internal.ChannelState) error {
chst.AddLog("")
return nil
}),
fsm.Event(datatransfer.ResumeResponder).
From(datatransfer.ResponderPaused).To(datatransfer.Ongoing).
From(datatransfer.BothPaused).To(datatransfer.InitiatorPaused).
From(datatransfer.Finalizing).To(datatransfer.Completing).
FromAny().ToJustRecord().Action(func(chst *internal.ChannelState) error {
chst.AddLog("")
return nil
}),
// The transfer has finished on the local node - all data was sent / received
fsm.Event(datatransfer.FinishTransfer).
FromAny().To(datatransfer.TransferFinished).
FromMany(datatransfer.Failing, datatransfer.Cancelling).ToJustRecord().
From(datatransfer.ResponderCompleted).To(datatransfer.Completing).
From(datatransfer.ResponderFinalizing).To(datatransfer.ResponderFinalizingTransferFinished).Action(func(chst *internal.ChannelState) error {
chst.AddLog("")
return nil
}),
fsm.Event(datatransfer.ResponderBeginsFinalization).
FromAny().To(datatransfer.ResponderFinalizing).
FromMany(datatransfer.Failing, datatransfer.Cancelling).ToJustRecord().
From(datatransfer.TransferFinished).To(datatransfer.ResponderFinalizingTransferFinished).Action(func(chst *internal.ChannelState) error {
chst.AddLog("")
return nil
}),
// The remote peer sent a Complete message, meaning it has sent / received all data
fsm.Event(datatransfer.ResponderCompletes).
FromAny().To(datatransfer.ResponderCompleted).
FromMany(datatransfer.Failing, datatransfer.Cancelling).ToJustRecord().
From(datatransfer.ResponderPaused).To(datatransfer.ResponderFinalizing).
From(datatransfer.TransferFinished).To(datatransfer.Completing).
From(datatransfer.ResponderFinalizing).To(datatransfer.ResponderCompleted).
From(datatransfer.ResponderFinalizingTransferFinished).To(datatransfer.Completing).Action(func(chst *internal.ChannelState) error {
chst.AddLog("")
return nil
}),
fsm.Event(datatransfer.BeginFinalizing).FromAny().To(datatransfer.Finalizing).Action(func(chst *internal.ChannelState) error {
chst.AddLog("")
return nil
}),
// Both the local node and the remote peer have completed the transfer
fsm.Event(datatransfer.Complete).FromAny().To(datatransfer.Completing).Action(func(chst *internal.ChannelState) error {
chst.AddLog("")
return nil
}),
fsm.Event(datatransfer.CleanupComplete).
From(datatransfer.Cancelling).To(datatransfer.Cancelled).
From(datatransfer.Failing).To(datatransfer.Failed).
From(datatransfer.Completing).To(datatransfer.Completed).Action(func(chst *internal.ChannelState) error {
chst.AddLog("")
return nil
}),
// will kickoff state handlers for channels that were cleaning up
fsm.Event(datatransfer.CompleteCleanupOnRestart).FromAny().ToNoChange().Action(func(chst *internal.ChannelState) error {
chst.AddLog("")
return nil
}),
}
// ChannelStateEntryFuncs are handlers called as we enter different states
// (currently unused for this fsm)
var ChannelStateEntryFuncs = fsm.StateEntryFuncs{
datatransfer.Cancelling: cleanupConnection,
datatransfer.Failing: cleanupConnection,
datatransfer.Completing: cleanupConnection,
}
func cleanupConnection(ctx fsm.Context, env ChannelEnvironment, channel internal.ChannelState) error {
otherParty := channel.Initiator
if otherParty == env.ID() {
otherParty = channel.Responder
}
env.CleanupChannel(datatransfer.ChannelID{ID: channel.TransferID, Initiator: channel.Initiator, Responder: channel.Responder})
env.Unprotect(otherParty, datatransfer.ChannelID{ID: channel.TransferID, Initiator: channel.Initiator, Responder: channel.Responder}.String())
return ctx.Trigger(datatransfer.CleanupComplete)
}
// CleanupStates are the penultimate states for a channel
var CleanupStates = []fsm.StateKey{
datatransfer.Cancelling,
datatransfer.Completing,
datatransfer.Failing,
}
// ChannelFinalityStates are the final states for a channel
var ChannelFinalityStates = []fsm.StateKey{
datatransfer.Cancelled,
datatransfer.Completed,
datatransfer.Failed,
}
// IsChannelTerminated returns true if the channel is in a finality state
func IsChannelTerminated(st datatransfer.Status) bool {
for _, s := range ChannelFinalityStates {
if s == st {
return true
}
}
return false
}
// IsChannelCleaningUp returns true if channel was being cleaned up and finished
func IsChannelCleaningUp(st datatransfer.Status) bool {
for _, s := range CleanupStates {
if s == st {
return true
}
}
return false
} | channels/channels_fsm.go | 0.546496 | 0.412471 | channels_fsm.go | starcoder |
package parser
import (
"fmt"
"regexp"
"strconv"
)
// Result is a struct which holds the result from parsing a string
type Result struct {
Match string
Rest string
}
// NewResult creates a new Result struct to hold the result from a Parser
func NewResult(m, r string) *Result {
return &Result{
Match: m,
Rest: r,
}
}
// Parser is a single function which will parse input and return a Result
type Parser func(input string) (*Result, error)
// Combinator is a function that receives one or more Parsers and return a single Parser
type Combinator func(ps ...Parser) Parser
// CharacterParser will parse a single character func CharacterParser(c string) Parser {
func CharacterParser(c string) Parser {
return func(s string) (*Result, error) {
compare := s[:1]
if compare == c {
return NewResult(c, s[1:]), nil
}
return &Result{}, fmt.Errorf("no match for %s", c)
}
}
// IntParser will parse an integer of length i
func IntParser(i int) Parser {
return func(s string) (*Result, error) {
sl := s[:i]
re := regexp.MustCompile(`\d{` + strconv.Itoa(i) + `}`)
match := re.FindString(sl)
if len(match) == i {
return NewResult(match, s[i:]), nil
}
return &Result{}, fmt.Errorf("no match")
}
}
// SequenceOf receives many parsers and runs them in sequence
var SequenceOf Combinator = func(ps ...Parser) Parser {
return func(s string) (*Result, error) {
r := NewResult("", s)
if len(ps) > len(s) {
ps = ps[:len(s)]
}
for _, p := range ps {
res, err := p(r.Rest)
if err != nil {
return &Result{}, err
}
r.Match += res.Match
r.Rest = res.Rest
}
return r, nil
}
}
// Or receives two parsers and returns a zero or one truthy match
var Or Combinator = func(ps ...Parser) Parser {
return func(s string) (*Result, error) {
if len(ps) > 2 {
return &Result{}, fmt.Errorf("only two parsers")
}
var c1, c2 string
c1 = s
c2 = s
res1, err1 := ps[0](c1)
res2, err2 := ps[1](c2)
if len(res1.Match) != 0 {
return res1, nil
}
if len(res2.Match) != 0 {
return res2, nil
}
if err1 != nil {
return &Result{}, err1
}
if err2 != nil {
return &Result{}, err2
}
return &Result{}, fmt.Errorf("something wrong with `Or`")
}
} | parser/parser.go | 0.691497 | 0.400661 | parser.go | starcoder |
package angular
import (
"time"
)
// Units for Velocity values. Always multiply with a unit when setting the initial value like you would for
// time.Time. This prevents you from having to worry about the internal storage format.
const (
MilliradianPerSecond Velocity = Velocity(Milliradian) / Velocity(time.Second)
RadianPerSecond Velocity = Velocity(Radian) / Velocity(time.Second)
DegreePerSecond Velocity = Velocity(Degree) / Velocity(time.Second)
GradianPerSecond Velocity = Velocity(Gradian) / Velocity(time.Second)
)
// MilliradiansPerSecond returns v as a floating point number of milliradianspersecond.
func (v Velocity) MilliradiansPerSecond() float64 {
return float64(v / MilliradianPerSecond)
}
// RadiansPerSecond returns v as a floating point number of radianspersecond.
func (v Velocity) RadiansPerSecond() float64 {
return float64(v / RadianPerSecond)
}
// DegreeesPerSecond returns v as a floating point number of degreeespersecond.
func (v Velocity) DegreeesPerSecond() float64 {
return float64(v / DegreePerSecond)
}
// GradiansPerSecond returns v as a floating point number of gradianspersecond.
func (v Velocity) GradiansPerSecond() float64 {
return float64(v / GradianPerSecond)
}
// Abs returns the absolute value of v as a copy.
func (v Velocity) Abs() Velocity {
if v < 0 {
return -v
}
return v
}
// Mul returns the product of v * x as a new Velocity.
func (v Velocity) Mul(x float64) Velocity {
return v * Velocity(x)
}
// Div returns the quotient of v / x as a new Velocity.
func (v Velocity) Div(x float64) Velocity {
return v / Velocity(x)
}
// DivVelocity returns the quotient of v / x as a floating point number.
func (v Velocity) DivVelocity(x Velocity) float64 {
return float64(v / x)
}
// DivDuration returns the quotient of v / t as a Acceleration.
func (v Velocity) DivDuration(t time.Duration) Acceleration {
return Acceleration(float64(v) / float64(t))
}
// DivAcceleration returns the quotient of v / x as a time.Duration.
func (v Velocity) DivAcceleration(x Acceleration) time.Duration {
return time.Duration(float64(v) / float64(x))
}
// MulDuration returns the product of v * t as a Angle.
func (v Velocity) MulDuration(t time.Duration) Angle {
return Angle(float64(v) * float64(t))
} | angular/velocity_generated.go | 0.930062 | 0.636254 | velocity_generated.go | starcoder |
package statemachine
import (
"strconv"
)
/** The StateMachine allows to build a LCG (Life Cycle Graph) to represent the multiple states an aplication
will go through.
At each state, an action can be executed through a Hook Caller.
**/
type LCGState int
func (s LCGState) String() string {
return strconv.Itoa(int(s))
}
const DefaultState LCGState = -1
// LCGChildren defines the children nodes oredered by priority. The first has the most important priority.
// The last is de default, the one to choose when DefaultState has been added in the event list and
// no other event match with a child.
type LCGChildren []LCGState
func (t LCGChildren) Default() (exists bool, dft LCGState) {
length := len(t)
if length == 0 {
return false, 0
}
return true, t[length-1]
}
func (t LCGChildren) IsDefault(state LCGState) bool {
exists, dft := t.Default()
return (exists && state == dft)
}
type LCGNodeInfo struct {
Name string
Children LCGChildren
}
type LCGGraph map[LCGState]LCGNodeInfo
func (g LCGGraph) IsLeafNode(state LCGState) bool {
return len(g[state].Children) == 0
}
func (g LCGGraph) StateName(state LCGState) (name string) {
name = g[state].Name
if name == "" {
name = state.String()
}
return name
}
// LCGStateAction calls
// The Hook "Enter" is called each time the machine enter in a state.
// This is the action to do on a state.
type LCGStateAction interface {
Enter(state LCGState) error
}
type Machine struct {
Graph LCGGraph
Actions LCGStateAction
curState LCGState
prevState LCGState
pathInGraph []string
pathLoopsCount uint
pathLengthLimit uint
}
func (m Machine) IsOFF() bool {
// 0 when the machine is instanciated without any browsing or when set to OFF after traversing a leaf node
return m.prevState == m.curState
}
func (m *Machine) setOFF() {
m.prevState = DefaultState
m.curState = DefaultState
}
// SetState Set current state for this mathine
// Can not set the same state twice
func (m *Machine) SetState(state LCGState) (entered bool) {
if m.curState != state || m.IsOFF() {
if m.IsPathInGraphEnabled() {
m.EnablePathInGraph() // Reset path for a new browsing
}
m.enterInState(state)
return true
}
return false
}
// GetState Get current state for this machine
func (m *Machine) GetState() LCGState {
return m.curState
}
type LCGEvents []LCGState
func (e LCGEvents) Contains(state LCGState) bool {
for _, elem := range e {
if elem == state {
return true
}
}
return false
}
func (e LCGEvents) IsTriggeringState(state LCGState, isDefaultState bool) bool {
if e.Contains(state) ||
(isDefaultState && e.Contains(DefaultState)) {
return true
}
return false
}
// enterInState Set the new current state
// The machine is set to OFF if browsing a leaf node
func (m *Machine) enterInState(state LCGState) (err error) {
var loopPathLength uint
if m.IsOFF() {
// Hack to ensure that a never used machine will appear ON
// after entering in state 0 (because all is initialized at 0 at struct creation)
m.prevState = DefaultState
} else {
if m.prevState == state {
loopPathLength = 2 // The length of the path in the loop is 2 nodes
}
m.prevState = m.curState
}
m.curState = state
m.addStateToPath(state, loopPathLength)
if m.Actions != nil {
err = m.Actions.Enter(m.curState)
}
// Browsing is at its end (in a leaf node), thus set the machine to OFF
if m.Graph.IsLeafNode(state) {
m.setOFF()
}
return err
}
// EnterNextState Trigger each event up to the first allowing to throw a new state.
func (m *Machine) EnterNextState(events LCGEvents) (entered bool, err error) {
children := m.Graph[m.curState].Children
exists, defaultChild := children.Default()
if !exists {
return false, nil
}
for _, nextState := range children {
if events.IsTriggeringState(nextState, (nextState == defaultChild)) {
// Enter into next state !!
err = m.enterInState(nextState)
return true, err
}
}
return false, err
}
func (m *Machine) EnablePathInGraph() {
/*
m.pathInGraph = make([]string, 0)
m.appendToPath(">") // Initialize with the first element
*/
m.pathInGraph = []string{">"}
if m.pathLengthLimit == 0 {
m.pathLengthLimit = 512
}
}
func (m *Machine) DisablePathInGraph() {
m.pathInGraph = nil
}
func (m *Machine) IsPathInGraphEnabled() bool {
return m.pathInGraph != nil
}
// SetPathLengthLimit Defines the size of the slice storing the path in graph, i.e. the maximum states to store. Nnote that loops in graph count for 2 states.
// This slice store also the separator (">") in addtion to the states.
// If not set, the max is by default limited to 512. You can define more if needed.
// Min is 5 and Maximum is 1024
func (m *Machine) SetPathLengthLimit(max uint) {
if max < 5 {
max = 5
} else if max > 1024 {
max = 1024
}
m.pathLengthLimit = max
}
func (m *Machine) GetPathInGraph() (path string) {
path = ""
for _, p := range m.pathInGraph {
path += p
}
return path
}
func (m *Machine) appendToPath(elem ...string) {
m.pathInGraph = append(m.pathInGraph, elem...)
if len(m.pathInGraph) > int(m.pathLengthLimit) {
m.pathInGraph = m.pathInGraph[2:]
}
}
/*
func (m *Machine) compactAndAppendToPath(elem ...string) {
newLength := len(m.pathInGraph) - int(m.pathCompactionNeeded) - int(m.pathCompactionNeeded/2)
m.pathInGraph = m.pathInGraph[:newLength]
m.pathInGraph = append(m.pathInGraph, elem...)
}
*/
// TODO manage maximum path length to limit its size ?
func (m *Machine) addStateToPath(state LCGState, loopPathLength uint) {
if m.pathInGraph == nil {
return
}
stateName := m.Graph.StateName(state)
if m.Graph.IsLeafNode(state) {
m.appendToPath(stateName)
m.pathLoopsCount = 0
} else {
if loopPathLength == 0 { // There's no loop
m.pathLoopsCount = 0
m.appendToPath(stateName, ">")
return
}
m.pathLoopsCount++
pathLen := len(m.pathInGraph)
m.pathInGraph[pathLen-int(loopPathLength*2+1)] = ">("
m.pathInGraph[pathLen-3] = "<<>>"
count := float32(m.pathLoopsCount)/2.0 + 1.0
//loops := fmt.Sprintf("%.1f", count)
if float32(int(count)) == count {
m.pathInGraph[pathLen-1] = ")x" + strconv.Itoa(int(count)) + ">"
} else {
m.pathInGraph[pathLen-1] = ")x" + strconv.Itoa(int(count)) + ">" + stateName + ">"
}
}
} | tools/statemachine/sm.go | 0.647575 | 0.544438 | sm.go | starcoder |
package cbr
/*---FILE DESCRIPTION---
The parameter file contains structs with parameters that changes how the CBR AI interprets or works with the gamestate and its cases.
This file alo contains the aiData struct which is where general CBRAI data is stored.
---FILE DESCRIPTION---*/
var cbrParameters = CBRParameters{
gameStateMaxChange: 5, //every frame during gameplay if we check the current gamestate against the currently running case and it got this much worse, we check for a new case
betterCaseThreshold: 0.30, //when the next case in a sequence would be played, if another case exists that is this much better, switch to that case
topSelectionThreshold: 0.00, //when a best case is found to avoid always selecting the same case in similar situation choose a random worse. This parameter determines how much worse at most.
maxXPositionComparison: 300, //If X position comparisons are more than this amount shifted, similarity for them is 1
maxYPositionComparison: 200, //If Y position comparisons are more than this amount shifted, similarity for them is 1
maxVelocityComparison: 10, //The max difference in velocity before the comparison function hits max dissimilarity
curGamestateQueLength: 12, // how many frames in a row are stored to compare against in the comparison function. Stored in aiData.curGamestate
maxInputBufferDifference: 30, //How many frames of input buffering can be off before reaching max dissimilarity
maxHitstunDifference: 10, //how many frames of difference of beeing in hitstun is allowed till max dissimilarity
maxBlockstunDifference: 10, //how many frames of difference of beeing in blockstun is allowed till max dissimilarity
maxAttackStateDiff: 20, //how many frames of beeing in an attack state are allowed till max dissimilarity
nearWallDist: 0.13, //percent of how close compared to current stage size a character has to be, to be considered near the wall
repetitionFrames: 60, //amount of frames after which a case was used where it will be taxed for beeing used again. Multiplied the more a case is used.
comboLength: 20,
cps: comparisonParameters{
//parameters that determine how strongly different comparison functions are evaluated
XRelativePosition: 1.0,
YRelativePosition: 1.0,
xVelocityComparison: 0.25,
yVelocityComparison: 0.25,
inputBufferDirection: 1.0,
inputBufferButton: 1.0,
airborneState: 1.0,
lyingDownState: 1.0,
hitState: 1.0,
blockState: 1.0,
attackState: 1.0,
nearWall: 0.3,
unitOrder: 0.3,
moveID: 0.5,
pressureMoveID: 0.8,
getHit: 1.0,
didHit: 1.0,
frameAdv: 0.3,
frameAdvInitiator: 0.1,
comboSimilarity: 1.0,
objectOrder: 0.3,
caseReuse: 0.5,
roundState: 100.0,
helperRelativePositionX: 0.5,
helperRelativePositionY: 0.5,
helperXVelocityComparison: 0.25,
helperYVelocityComparison: 0.25,
enemyXVelocityComparison: 0.25,
enemyYVelocityComparison: 0.25,
enemyAirborneState: 1.0,
enemyLyingDownState: 1.0,
enemyHitState: 1.0,
enemyBlockState: 1.0,
enemyAttackState: 1.0,
enemyMoveID: 0.5,
enemyPressureMoveID: 0.8,
enemyHelperRelativePositionX: 1.0,
enemyHelperRelativePositionY: 1.0,
enemyHelperXVelocityComparison: 0.25,
enemyHelperYVelocityComparison: 0.25,
},
}
//All data relevant for the AI to operate
//See CBRData.proto and CBRRawFrames.proto for the structure of the data
var aiData = AIData{
cbrData: &CBRData{},
rawFrames: &CBRRawFrames{},
recording: false,
replaying: false,
//replayIndex: 0,
//rawFrameReplay: false,
curGamestate: &CBRRawFrames{},
framedata: &Framedata{},
aiControlledIndex: -1,
}
type AIData struct {
cbrData *CBRData //Data storage after the AI processed a replay
rawFrames *CBRRawFrames //Data storage of a replay before processing
recording bool
replaying bool
//replayFrames []*CBRData_Frame //Frames that the AI is ready to send over to the game for replaying
//replayIndex int
//rawFrameReplay bool
curGamestate *CBRRawFrames
framedata *Framedata
aiControlledIndex int // index denoting the character that the CBRAI controlls.
}
//parameters that are relevant for the comparison function or need to be adjusted when the comparison function is adjusted
type CBRParameters struct {
gameStateMaxChange float32
betterCaseThreshold float32
topSelectionThreshold float32
maxXPositionComparison float32
maxYPositionComparison float32
maxVelocityComparison float32
cps comparisonParameters
curGamestateQueLength int
maxInputBufferDifference int32
maxHitstunDifference int32
maxBlockstunDifference int32
maxAttackStateDiff int32
nearWallDist float32
repetitionFrames int64
comboLength float32
}
//parameters that determine how important the corresponding sub comparison functions are
type comparisonParameters struct {
XRelativePosition float32
YRelativePosition float32
xVelocityComparison float32
yVelocityComparison float32
inputBufferDirection float32
inputBufferButton float32
airborneState float32
lyingDownState float32
hitState float32
blockState float32
attackState float32
nearWall float32
unitOrder float32
moveID float32
pressureMoveID float32
getHit float32
didHit float32
frameAdv float32
frameAdvInitiator float32
comboSimilarity float32
objectOrder float32
caseReuse float32
roundState float32
helperRelativePositionX float32
helperRelativePositionY float32
helperXVelocityComparison float32
helperYVelocityComparison float32
enemyXVelocityComparison float32
enemyYVelocityComparison float32
enemyAirborneState float32
enemyLyingDownState float32
enemyHitState float32
enemyBlockState float32
enemyAttackState float32
enemyMoveID float32
enemyPressureMoveID float32
enemyHelperXVelocityComparison float32
enemyHelperYVelocityComparison float32
enemyHelperRelativePositionX float32
enemyHelperRelativePositionY float32
} | src/cbr/parameters.go | 0.630344 | 0.700088 | parameters.go | starcoder |
package main
import (
"math"
"time"
)
// CompendiumFactory generates data structures for any page of the compendium.
type CompendiumFactory struct {
NbTop uint // Number of most downvoted comments
Timezone *time.Location // Timezone of the dates
}
// NewCompendiumFactory returns a new CompendiumFactory.
func NewCompendiumFactory(conf CompendiumConf) CompendiumFactory {
return CompendiumFactory{
NbTop: conf.NbTop,
Timezone: conf.Timezone.Value,
}
}
// Index returns the data structure that describes the compendium's index.
func (cf CompendiumFactory) Index(conn StorageConn) (Compendium, error) {
ci := Compendium{
NbTop: cf.NbTop,
Timezone: cf.Timezone,
Version: Version,
}
err := conn.WithTx(func() error {
var err error
ci.Users, err = conn.ListRegisteredUsers()
if err != nil {
return err
}
all, negative, err := conn.CompendiumPerUser()
if err != nil {
return err
}
ci.All = all.ToView(ci.Timezone)
ci.Negative = negative.OrderBy(func(a, b Stats) bool { return a.Sum < b.Sum }).ToView(ci.Timezone)
ci.rawComments, err = conn.Comments(Pagination{Limit: ci.NbTop})
return err
})
if err != nil {
return ci, err
}
nbUsers := len(ci.Users)
for i := 0; i < nbUsers; i++ {
ci.Users[i] = ci.Users[i].InTimezone(cf.Timezone)
}
return ci, nil
}
// Comments returns paginated comments of all non-hidden users.
func (cf CompendiumFactory) Comments(conn StorageConn, page Pagination) (Compendium, error) {
c := Compendium{
NbTop: page.Limit,
Offset: page.Offset,
Timezone: cf.Timezone,
Version: Version,
}
var err error
c.rawComments, err = conn.Comments(page)
return c, err
}
// User returns a data structure that describes the compendium page for a single user.
func (cf CompendiumFactory) User(conn StorageConn, username string) (CompendiumUser, error) {
cu := CompendiumUser{
Compendium: Compendium{
NbTop: cf.NbTop,
Timezone: cf.Timezone,
Version: Version,
},
}
err := conn.WithTx(func() error {
query := conn.GetUser(username)
if query.Error != nil {
return query.Error
} else if !query.Exists {
return nil
}
cu.Users = []User{query.User}
all, rawNegative, err := conn.CompendiumUserPerSub(cu.User().Name)
if err != nil {
return err
}
cu.All = all.ToView(cu.Timezone)
cu.Summary = all.Stats().ToView(0, cu.Timezone)
negative := rawNegative.Filter(func(s Stats) bool { return s.Sum < 0 })
cu.Negative = negative.OrderBy(func(a, b Stats) bool { return a.Sum < b.Sum }).ToView(cu.Timezone)
cu.SummaryNegative = negative.Stats().ToView(0, cu.Timezone)
cu.rawComments, err = conn.UserComments(cu.User().Name, Pagination{Limit: cu.NbTop})
return err
})
return cu, err
}
// UserComments returns a page of comments for a user.
func (cf CompendiumFactory) UserComments(conn StorageConn, username string, page Pagination) (CompendiumUser, error) {
cu := CompendiumUser{
Compendium: Compendium{
NbTop: page.Limit,
Offset: page.Offset,
Timezone: cf.Timezone,
Version: Version,
},
}
err := conn.WithTx(func() error {
query := conn.GetUser(username)
if query.Error != nil {
return query.Error
} else if !query.Exists {
return nil
}
cu.Users = []User{query.User}
var err error
cu.rawComments, err = conn.UserComments(cu.User().Name, page)
return err
})
return cu, err
}
// Compendium describes the basic data of a page of the compendium.
// Specific pages may use it directly or extend it.
type Compendium struct {
All []StatsView // Statistics about every comments
NbTop uint // Number of most downvoted comments
Negative []StatsView // Statistics about comments with a negative score
Offset uint // Offset in the rank of the comments
Timezone *time.Location // Timezone of the dates
Users []User // Users in the compendium
Version SemVer // Version of the application
rawComments []Comment
CommentBodyConverter CommentBodyConverter
}
// CommentsLen returns the number of top comments without generating them.
func (c Compendium) CommentsLen() int {
return len(c.rawComments)
}
// Comments generates the views for the top comments.
func (c Compendium) Comments() []CommentView {
offset := uint64(c.Offset)
views := make([]CommentView, 0, len(c.rawComments))
for i, comment := range c.rawComments {
view := comment.ToView(uint64(i+1)+offset, c.Timezone, c.CommentBodyConverter)
views = append(views, view)
}
return views
}
// HiddenUsersLen returns the number of hidden users.
func (c Compendium) HiddenUsersLen() int {
var nb int
for _, user := range c.Users {
if user.Hidden {
nb++
}
}
return nb
}
// NextOffset returns the offset for the next page.
func (c Compendium) NextOffset() uint {
return c.NbTop + c.Offset
}
// UsualScanDuration returns the shortest amount of time the bot can do the most frequent type of scan (active users only).
func (c Compendium) UsualScanDuration() time.Duration {
var count int
for _, user := range c.Users {
if !user.Inactive {
count++
}
}
return time.Duration(count) * RedditAPIRequestWait
}
// CompendiumUser describes the compendium page for a single user.
type CompendiumUser struct {
Compendium
Summary StatsView // Statistics summarizing the user's activity
SummaryNegative StatsView // Statistics summarizing the user's activity based only on comments with a negative score
}
// Exists tells if the user exists.
func (cu CompendiumUser) Exists() bool {
return len(cu.Users) > 0
}
// User returns the single User being described.
func (cu CompendiumUser) User() User {
return cu.Users[0]
}
// PercentageNegative returns the rounded percentage of comments in the negatives.
func (cu CompendiumUser) PercentageNegative() int64 {
if cu.Summary.Count == 0 || cu.SummaryNegative.Count == 0 {
return 0
}
return int64(math.Round(100 * float64(cu.SummaryNegative.Count) / float64(cu.Summary.Count)))
} | compendium.go | 0.692018 | 0.454593 | compendium.go | starcoder |
package main
import (
"bufio"
"fmt"
"log"
"math"
"os"
"sort"
"strings"
)
const Reset = "\033[0m"
const Red = "\033[31m"
const Green = "\033[32m"
const Yellow = "\033[33m"
func main() {
patterns, outputs, err := Loader("input.txt")
if err != nil {
log.Fatal(err)
}
_ = patterns
_ = outputs
fmt.Printf("Part 1: digits 1, 4, 7, or 8 appeared %d times\n", Part1(outputs))
fmt.Printf("Part 2: Sum: %d\n", Part2(patterns, outputs))
}
func Part2(patterns [][]string, outputs [][]string) (count int) {
for i, pattern := range patterns {
segments := SecondPass(FirstPass(pattern))
output := outputs[i]
numericalValue := 0
for pow, digit := range output {
outInt := render(digit, segments)
fmt.Printf("%v ", outInt)
numericalValue += outInt * int(math.Pow(10, float64(3-pow)))
}
fmt.Printf("N=%d\n", numericalValue)
count += numericalValue
}
return count
}
func FirstPass(patternsOberved []string) (knownDigits map[int]string, unkLength5 []string) {
knownDigits = make(map[int]string)
for _, pattern := range patternsOberved {
switch len(pattern) {
case 2:
knownDigits[1] = pattern
case 4:
knownDigits[4] = pattern
case 3:
knownDigits[7] = pattern
case 7:
knownDigits[8] = pattern
case 5:
unkLength5 = append(unkLength5, pattern)
}
}
return knownDigits, unkLength5
}
func SecondPass(knownDigits map[int]string, unkLength5 []string) map[string]string {
segment := make(map[string]string)
regionI := UncertaintyRegion{}
regionI.add(string(knownDigits[1][0]))
regionI.add(string(knownDigits[1][1]))
// fmt.Printf("regionI: %v ", regionI)
regionII := UncertaintyRegion{}
for _, letter := range knownDigits[4] {
if !regionI.contains(string(letter)) {
regionII.add(string(letter))
}
}
// fmt.Printf("regionII: %v ", regionII)
for _, letter := range knownDigits[7] {
if !regionI.contains(string(letter)) && !regionII.contains(string(letter)) {
segment["a"] = string(letter)
break
}
}
// fmt.Printf("Mapped Segment A: %v ", segment["a"])
regionIII := UncertaintyRegion{}
for _, letter := range knownDigits[8] {
if !regionI.contains(string(letter)) && !regionII.contains(string(letter)) && string(letter) != segment["a"] {
regionIII.add(string(letter))
}
}
// fmt.Printf("regionIII: %v ", regionIII)
// Find who is 3
for _, pattern := range unkLength5 {
regionCount := make([]int, 3)
for _, letter := range pattern {
if regionI.contains(string(letter)) {
regionCount[0]++
} else if regionII.contains(string(letter)) {
regionCount[1]++
} else if regionIII.contains(string(letter)) {
regionCount[2]++
}
}
if regionCount[0] == 2 {
knownDigits[3] = pattern
} else if regionCount[2] == 2 {
knownDigits[2] = pattern
} else {
knownDigits[5] = pattern
}
}
// Disambiguate Regions II and III using 3
for _, letter := range knownDigits[3] {
if regionII.contains(string(letter)) {
segment["d"] = string(letter)
segment["b"] = regionII.getOther(string(letter))
}
if regionIII.contains(string(letter)) {
segment["g"] = string(letter)
segment["e"] = regionIII.getOther(string(letter))
}
}
// Disambiguate Regions I using 2
for _, letter := range knownDigits[2] {
if regionI.contains(string(letter)) {
segment["c"] = string(letter)
segment["f"] = regionI.getOther(string(letter))
}
}
// Invert it
segmentInverted := make(map[string]string)
for k, v := range segment {
segmentInverted[v] = k
}
// fmt.Printf("\nsegment: %v \n inverted: %v", segment, segmentInverted)
return segmentInverted
}
func render(observedPattern string, segmentMap map[string]string) int {
corrected := make([]string, 0)
for _, letter := range observedPattern {
corrected = append(corrected, segmentMap[string(letter)])
}
sort.Strings(corrected)
switch strings.Join(corrected, "") {
case "abcefg":
return 0
case "cf":
return 1
case "acdeg":
return 2
case "acdfg":
return 3
case "bcdf":
return 4
case "abdfg":
return 5
case "abdefg":
return 6
case "acf":
return 7
case "abcdefg":
return 8
case "abcdfg":
return 9
}
return -1
}
func Part1(outputs [][]string) (count int) {
for _, display := range outputs {
for _, digit := range display {
if isUnique(digit) > 0 {
fmt.Printf("%s%v%s ", Green, digit, Reset)
count++
} else {
fmt.Printf("%v ", digit)
}
}
fmt.Printf("\n")
}
return count
}
func isUnique(s string) int {
switch len(s) {
case 2:
return 1
case 4:
return 4
case 3:
return 7
case 7:
return 8
default:
return -1
}
}
func Loader(filename string) (patterns [][]string, outputs [][]string, err error) {
f, err := os.Open(filename)
if err != nil {
return nil, nil, err
}
defer f.Close()
scanner := bufio.NewScanner(f)
patterns = make([][]string, 0)
outputs = make([][]string, 0)
for scanner.Scan() {
patterns = append(patterns, strings.Fields(strings.Split(scanner.Text(), "|")[0]))
outputs = append(outputs, strings.Fields(strings.Split(scanner.Text(), "|")[1]))
}
return patterns, outputs, err
}
type UncertaintyRegion struct {
item1 string
item2 string
}
func (r *UncertaintyRegion) add(s string) {
if len(r.item1) == 0 {
r.item1 = s
} else {
r.item2 = s
}
}
func (r UncertaintyRegion) contains(s string) bool {
if r.item1 == s || r.item2 == s {
return true
}
return false
}
func (r UncertaintyRegion) getOther(s string) string {
if r.item1 == s {
return r.item2
}
return r.item1
} | day8/day8.go | 0.522446 | 0.420778 | day8.go | starcoder |
package sql
import (
"fmt"
"strings"
)
type Data interface {
Append(other Data) Data
AppendWithSpace(other Data) Data
SurroundAppend(l, r string, other Data) Data
String() string
Values() []interface{}
}
// Empty can be used before a for loop to initialize your Data
func Empty() Data { return empty }
var empty = stringData("")
func New(s string, values []interface{}) Data {
if len(values) == 0 && len(s) == 0 {
return Empty()
}
if len(s) == 0 {
return Values(values)
}
if len(values) == 0 {
return String(s)
}
return data{
str: s,
values: values,
}
}
func Newf(values []interface{}, format string, a ...interface{}) Data {
return New(fmt.Sprintf(format, a...), values)
}
func String(s string) Data {
if s == "" {
return Empty()
}
return stringData(s)
}
func Format(format string, a ...interface{}) Data {
return String(fmt.Sprintf(format, a...))
}
func Values(values []interface{}) Data {
if len(values) == 0 {
return Empty()
}
return valuesData(values)
}
func Surround(l, r string, d Data) Data {
// Point to the same array. Data should be immutable - minus random slice shenanigans.
return New(l+d.String()+r, d.Values())
}
// Join will combine the Data with each other with sep in between each String value.
// It will not append empty String values, but will append the Values, if any.
func Join(sep string, d []Data) Data {
// Determine final lengths first
var sLen int
var vLen int
for idx := range d {
sLen += len(d[idx].String())
vLen += len(d[idx].Values())
}
// Setup result buffers
var v = make([]interface{}, 0, vLen)
var s strings.Builder
// Grow to the combined text length, plus the required number of separators.
// Note that this can be more than what needs to be allocated due to possible empty strings
sepCount := sLen + len(sep)*(len(d)-1)
if sepCount > 0 {
s.Grow(sepCount)
}
// Build result buffers
for idx := range d {
str := d[idx].String()
// Only add if we have data, if not, its just empty, might have values still though.
if len(str) > 0 {
// Only add the separator if the str is non-empty & the result buffer already has other data.
// We do not want to put the separate at the beginning of it.
if s.Len() > 0 {
_, _ = s.WriteString(sep)
}
_, _ = s.WriteString(str)
}
// Ensure we do not append nils
if values := d[idx].Values(); len(values) > 0 {
// Ensure to expand both of these! Without, it will put all the Data
// values (for one Data) into a single top level entry.
// Example if you dont expand:
// []interface{}{ []interface{}{data1for1, data2for1}, []interface{}{data1for2} }
v = append(v, values...)
}
}
return data{
values: v,
str: s.String(),
}
}
const (
space = " "
)
// TODO - Data might be able to be replaced with some sort of linked
// list type construct. Could be more useful than copying strings
// and backed value slices.
type data struct {
str string
values []interface{}
}
func (d data) String() string {
return d.str
}
func (d data) Values() []interface{} {
return d.values
}
func (d data) Append(other Data) Data {
return New(
d.str+other.String(),
copyValues(d.values, other.Values()),
)
}
func (d data) AppendWithSpace(other Data) Data {
return New(
d.str+space+other.String(),
copyValues(d.values, other.Values()),
)
}
func (d data) SurroundAppend(l, r string, other Data) Data {
return New(
d.str+l+other.String()+r,
copyValues(d.values, other.Values()),
)
}
type stringData string
func (d stringData) Append(other Data) Data {
s := string(d) + other.String()
return New(s, other.Values())
}
func (d stringData) AppendWithSpace(other Data) Data {
s := string(d) + space + other.String()
return New(s, other.Values())
}
func (d stringData) SurroundAppend(l, r string, other Data) Data {
s := string(d) + l + other.String() + r
return New(s, other.Values())
}
func (d stringData) String() string { return string(d) }
func (stringData) Values() []interface{} { return nil }
type valuesData []interface{}
func (v valuesData) Append(other Data) Data {
return New(other.String(), copyValues(v, other.Values()))
}
func (v valuesData) AppendWithSpace(other Data) Data {
return New(space+other.String(), copyValues(v, other.Values()))
}
func (v valuesData) SurroundAppend(l, r string, other Data) Data {
return New(l+other.String()+r, copyValues(v, other.Values()))
}
func (valuesData) String() string { return "" }
func (v valuesData) Values() []interface{} { return v }
// copyValues creates a new backing slice and copies the values of both slices into it.
func copyValues(v1, v2 []interface{}) []interface{} {
lv1 := len(v1)
v := make([]interface{}, lv1+len(v2))
copy(v, v1)
copy(v[lv1:], v2)
return v
} | sql/data.go | 0.609873 | 0.599866 | data.go | starcoder |
package tiff
import (
"encoding/json"
"fmt"
)
/*
Entry structure
For IFD/Entry:
Each 12-byte IFD entry has the following format:
Bytes 0-1: The Tag that identifies the entry.
Bytes 2-3: The entry Type.
Bytes 4-7: The number of values, Count of the indicated Type.
Bytes 8-11: The Value Offset, the file offset (in bytes) of the Value
for the entry. The Value is expected to begin on a word
boundary; the corresponding Value Offset will thus be an
even number. This file offset may point anywhere in the
file, even after the image data.
*/
// Entry represents a single entry in an IFD in a TIFF file. This is the mostly
// uninterpreted core 12 byte data structure only.
type Entry interface {
TagID() uint16
TypeID() uint16
Count() uint32
ValueOffset() [4]byte
}
// entry represents the data structure of an IFD entry.
type entry struct {
tagID uint16 // Bytes 0-1
typeID uint16 // Bytes 2-3
count uint32 // Bytes 4-7
valueOffset [4]byte // Bytes 8-11
}
func (e *entry) TagID() uint16 {
return e.tagID
}
func (e *entry) TypeID() uint16 {
return e.typeID
}
func (e *entry) Count() uint32 {
return e.count
}
func (e *entry) ValueOffset() [4]byte {
return e.valueOffset
}
func (e *entry) String() string {
return fmt.Sprintf("<TagID: %5d, TypeID: %5d, Count: %d, ValueOffset: %v>", e.tagID, e.typeID, e.count, e.valueOffset)
}
func (e *entry) MarshalJSON() ([]byte, error) {
tmp := struct {
Tag uint16 `json:"tagID"`
Type uint16 `json:"typeID"`
Count uint32 `json:"count"`
ValueOffset [4]byte `json:"valueOffset"`
}{
Tag: e.tagID,
Type: e.typeID,
Count: e.count,
ValueOffset: e.valueOffset,
}
return json.Marshal(tmp)
}
func ParseEntry(br BReader) (out Entry, err error) {
e := new(entry)
if err = br.BRead(&e.tagID); err != nil {
return
}
if err = br.BRead(&e.typeID); err != nil {
return
}
if err = br.BRead(&e.count); err != nil {
return
}
if err = br.BRead(&e.valueOffset); err != nil {
return
}
return e, nil
} | entry.go | 0.578686 | 0.460592 | entry.go | starcoder |
package pipe
import (
"fmt"
"time"
"github.com/dudk/phono"
)
type (
// measurable identifies an entity with measurable metrics.
// Each measurable can have multiple counters.
// If custom metrics will be needed, it can be exposed in the future.
measurable interface {
Reset()
Measure() Measure
FinishMeasure()
Counter(string) *Counter
Latency()
}
// metric represents measures of pipe components.
metric struct {
ID string
phono.SampleRate
Counters map[string]*Counter
start time.Time
elapsed time.Duration
latencyMeasure time.Time
latency time.Duration
}
// Measure represents metric values at certain moment of time.
// It should be used to transfer metrics values through pipe and avoid data races with counters.
Measure struct {
ID string
phono.SampleRate
Counters map[string]Counter
Start time.Time
Elapsed time.Duration
Latency time.Duration
}
// Counter counts messages and samples.
// Duration is not zero only in context of measure.
Counter struct {
messages int64
samples int64
duration time.Duration
}
)
// newMetric creates new metric with requested measures.
func newMetric(id string, sampleRate phono.SampleRate, keys ...string) *metric {
m := &metric{
ID: id,
SampleRate: sampleRate,
Counters: make(map[string]*Counter),
}
m.AddCounters(keys...)
return m
}
// Reset metrics values
func (m *metric) Reset() {
m.start = time.Now()
m.latencyMeasure = time.Now()
for key := range m.Counters {
m.Counters[key].Reset()
}
}
// FinishMeasure finalizes metric values.
func (m *metric) FinishMeasure() {
m.elapsed = time.Since(m.start)
}
// Counter returns counter for specified key.
func (m *metric) Counter(key string) *Counter {
return m.Counters[key]
}
// AddCounters to metric with defined keys.
func (m *metric) AddCounters(keys ...string) {
for _, measure := range keys {
m.Counters[measure] = &Counter{}
}
}
// Latency sets latency since last latency measure.
func (m *metric) Latency() {
m.latency = time.Since(m.latencyMeasure)
m.latencyMeasure = time.Now()
}
// String returns string representation of Metrics.
func (m Measure) String() string {
return fmt.Sprintf("SampleRate: %v Started: %v Elapsed: %v Latency: %v Counters: %v", m.SampleRate, m.Start, m.Elapsed, m.Latency, m.Counters)
}
// Measure returns latest measures of metric.
func (m *metric) Measure() Measure {
if m == nil {
return Measure{}
}
elapsed := m.elapsed
if !m.start.IsZero() && elapsed == 0 {
elapsed = time.Since(m.start)
}
measure := Measure{
ID: m.ID,
SampleRate: m.SampleRate,
Start: m.start,
Elapsed: elapsed,
Latency: m.latency,
Counters: make(map[string]Counter),
}
for key, counter := range m.Counters {
c := *counter
c.duration = m.SampleRate.DurationOf(counter.samples)
measure.Counters[key] = c
}
return measure
}
// Advance counter's metrics.
func (c *Counter) Advance(buf phono.Buffer) {
c.messages++
c.samples = c.samples + int64(buf.Size())
}
// Reset resets counter's metrics.
func (c *Counter) Reset() {
c.messages, c.samples = 0, 0
}
// Count returns messages and samples metrics.
func (c *Counter) Count() (int64, int64) {
return c.messages, c.samples
}
// Messages returns messages metrics.
func (c *Counter) Messages() int64 {
return c.messages
}
// Samples returns samples metrics.
func (c *Counter) Samples() int64 {
return c.samples
}
// Duration returns duration of counted samples.
func (c *Counter) Duration() time.Duration {
return c.duration
}
// String representation of Counter.
func (c Counter) String() string {
return fmt.Sprintf("Messages: %v Samples: %v Duration: %v", c.messages, c.samples, c.duration)
} | pipe/metric.go | 0.794664 | 0.412885 | metric.go | starcoder |
package cmd
import (
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var (
// queryCreateCmd represents the lql create command
queryCreateCmd = &cobra.Command{
Use: "create",
Short: "Create a query",
Long: `
There are multiple ways you can create a query:
* Typing the query into your default editor (via $EDITOR)
* Piping a query to the Lacework CLI command (via $STDIN)
* From a local file on disk using the flag '--file'
* From a URL using the flag '--url'
There are also multiple formats you can use to define a query:
* Javascript Object Notation (JSON)
* YAML Ain't Markup Language (YAML)
To launch your default editor and create a new query.
lacework lql create
The following example comes from Lacework's implementation of a policy query:
---
evaluatorId: Cloudtrail
queryId: LW_Global_AWS_CTA_AccessKeyDeleted
queryText: |-
LW_Global_AWS_CTA_AccessKeyDeleted {
source {
CloudTrailRawEvents
}
filter {
EVENT_SOURCE = 'iam.amazonaws.com'
and EVENT_NAME = 'DeleteAccessKey'
and ERROR_CODE is null
}
return distinct {
INSERT_ID,
INSERT_TIME,
EVENT_TIME,
EVENT
}
}
Identifier of the query that executes while running the policy
This query specifies an identifier named 'LW_Global_AWS_CTA_AccessKeyDeleted'.
Policy evaluation uses this dataset (along with the filters) to identify AWS
CloudTrail events that signify that an IAM access key was deleted. The query
is delimited by '{ }' and contains three sections:
* Source data is specified in the 'source' clause. The source of data is the
'CloudTrailRawEvents' dataset. LQL queries generally refer to other datasets,
and customizable policies always target a suitable dataset.
* Records of interest are specified by the 'filter' clause. In the example, the
records available in 'CloudTrailRawEvents' are filtered for those whose source
is 'iam.amazonaws.com', whose event name is 'DeleteAccessKey', and that do not
have any error code. The syntax for this filtering expression strongly resembles SQL.
* The fields this query exposes are listed in the 'return' clause. Because there
may be unwanted duplicates among result records when Lacework composes them from
just these four columns, the distinct modifier is added. This behaves like a SQL
'SELECT DISTINCT'. Each returned column in this case is just a field that is present
in 'CloudTrailRawEvents', but we can compose results by manipulating strings, dates,
JSON and numbers as well.
The resulting dataset is shaped like a table. The table's columns are named with the
names of the columns selected. If desired, you could alias them to other names as well.
For more information about LQL, visit:
https://docs.lacework.com/lql-overview
`,
Args: cobra.NoArgs,
RunE: createQuery,
}
)
func init() {
// add sub-commands to the lql command
queryCmd.AddCommand(queryCreateCmd)
setQuerySourceFlags(queryCreateCmd)
}
func createQuery(cmd *cobra.Command, args []string) error {
msg := "unable to create query"
// input query
queryString, err := inputQuery(cmd)
if err != nil {
return errors.Wrap(err, msg)
}
// parse query
newQuery, err := parseQuery(queryString)
if err != nil {
return errors.Wrap(err, msg)
}
cli.Log.Debugw("creating query", "query", queryString)
cli.StartProgress(" Creating query...")
create, err := cli.LwApi.V2.Query.Create(newQuery)
cli.StopProgress()
if err != nil {
return errors.Wrap(err, msg)
}
if cli.JSONOutput() {
return cli.OutputJSON(create.Data)
}
cli.OutputHuman("The query %s was created.\n", create.Data.QueryID)
return nil
} | cli/cmd/lql_create.go | 0.827654 | 0.40987 | lql_create.go | starcoder |
package keras
import (
"fmt"
"time"
)
//Model architecure to implement neural network.
type Model struct {
ConvLayers []Layer
Name string
Optimizer Optimizer
LossFunc func([]float64, []float64) float64
LossValues []float64
Duration time.Duration
Settings []Metrics
TrainDataX []float64
TrainDataY []float64
Callbacks []Callback
Training bool
LearningRate float64
TrainingLog TrainingLog
}
//TrainingLog returns model's log
type TrainingLog []string
type Metrics interface {
Measure([]float64, []float64) float64
Name() string
}
//Optimizer interface requires an ApplyGradients function. Pass it to the model compilation.
type Optimizer interface {
ApplyGradients()
}
//Sequential returns a model given layers and a name.
func Sequential(layers []Layer, name string) *Model {
return &Model{ConvLayers: layers, Name: name}
}
//Add method adds a layer to the end of the model architecture
func (m *Model) Add(layer Layer) *Model {
m.ConvLayers[len(m.ConvLayers)] = layer
return m
}
//GetLayerByIndex returns the ith layer.
func (m *Model) GetLayerByIndex(index int) Layer {
return m.ConvLayers[index]
}
//GetMetricsByIndex returns the index's model metric
func (m *Model) GetMetricsByIndex(index int) Metrics {
return m.Settings[index]
}
//GetLayerByName returns the layer given its name.
func (m *Model) GetLayerByName(name string) Layer {
for i := range m.ConvLayers {
if m.ConvLayers[i].Name() == name {
return m.ConvLayers[i]
}
}
return m.ConvLayers[0]
}
//Compile compiles the model given the optimizer, loss and metrics
func (m *Model) Compile(optimizer Optimizer, loss func([]float64, []float64) float64, ms []Metrics) {
m.Optimizer = optimizer
m.LossFunc = loss
m.Settings = ms
}
//Predict does the feed forward magic when fed the inputs.
func (m *Model) Predict(values []float64) []float64 {
var outputs []float64
for i := range m.ConvLayers {
outputs = m.ConvLayers[i].Call()
m.ConvLayers[i+1].Call()
if i == len(m.ConvLayers)-1 {
return outputs
}
}
return outputs
}
// Train trains the model given trainX and trainY data and the number of epochs. It keeps track of the defined metrics and prints it every epoch. It also prints the training duration.
//It returns a map from strings to floats, where strings represent the metrics name and float the metrics value.
func (m *Model) Train(trainX, trainY []float64, epochs int) map[string]float64 {
startTime := time.Now()
metricsValues := make(map[string]float64, len(m.Settings))
for i := 1; i < epochs; i++ {
for j := 0; j < len(trainX); j++ {
lossValue := m.LossFunc(m.Predict(trainX), trainY)
m.LossValues = append(m.LossValues, lossValue)
m.Optimizer.ApplyGradients()
}
avg := meanValue(m.LossValues)
for _, met := range m.Settings {
metricsValues[met.Name()] = met.Measure(m.Predict(trainX), trainY)
}
fmt.Printf("Epoch: %d Loss:%.4f\n", i, avg)
}
endTime := time.Now()
m.Duration = endTime.Sub(startTime)
fmt.Printf("Training duration: %s\n", m.Duration.String())
return metricsValues
}
//Summary prints the layer by layer summaary along with trainable parameters.
func (m *Model) Summary() {
var sum int
for i := range m.ConvLayers {
tp := m.ConvLayers[i].TrainableParameters()
sum += tp
fmt.Printf("name: %s trainable parameters: %d\n", m.ConvLayers[i].Name(), tp)
}
fmt.Println("Trainable parameters: ", sum)
} | keras/keras.go | 0.809728 | 0.408808 | keras.go | starcoder |
package domain
import (
"encoding/json"
"fmt"
"net/url"
"strconv"
"strings"
"time"
)
// ShortDate represents a date without a time, i.e. the time is always zero.
type ShortDate struct {
time.Time
}
// NewShortDate creates a ShortDate from a given time
func NewShortDate(date time.Time) ShortDate {
return Date(date.Year(), date.Month(), date.Day(), date.Location())
}
// Date returns a new ShortDate for the given year, month, day and location
func Date(year int, month time.Month, day int, location *time.Location) ShortDate {
return ShortDate{time.Date(year, month, day, 0, 0, 0, 0, time.UTC)}
}
// MarshalJSON marshals the date into a JSON representation
func (s *ShortDate) MarshalJSON() ([]byte, error) {
if s.IsZero() {
return json.Marshal("")
}
return json.Marshal(s.Format("2006-01-02"))
}
// UnmarshalJSON unmarshals the JSON representation into a date
func (s *ShortDate) UnmarshalJSON(data []byte) error {
unquotedData, _ := strconv.Unquote(string(data))
time, err := time.Parse("2006-01-02", unquotedData)
s.Time = time
return err
}
func (s *ShortDate) String() string {
return s.Format("2006-01-02")
}
// MarshalText marshals the date into a byte representation
func (s *ShortDate) MarshalText() ([]byte, error) {
return []byte(s.Format("2006-01-02")), nil
}
// UnmarshalText unmarshals text into a ShortDate
func (s *ShortDate) UnmarshalText(text []byte) error {
time, err := time.Parse("2006-01-02", string(text))
if err != nil {
return err
}
*s = ShortDate{time}
return nil
}
// Timeframe represents a date range
type Timeframe struct {
StartDate ShortDate
EndDate ShortDate
}
// TimeframeFromDate returns a Timeframe with the StartDate set to date and the EndDate set to today.
// The EndDate will use the same timezone location as provided in StartDate
func TimeframeFromDate(date ShortDate) Timeframe {
endDate := NewShortDate(time.Now().In(date.Location()))
return Timeframe{date, endDate}
}
// TimeframeFromQuery parses a timeframe from a query. The param keys are
// expected to be `from` for the StartDate and `to` for the EndDate
func TimeframeFromQuery(params url.Values) (Timeframe, error) {
from := params.Get("from")
to := params.Get("to")
if from == "" || to == "" {
return Timeframe{}, fmt.Errorf("'from' and/or 'to' must be set")
}
startTime, err1 := time.Parse("20060102", from)
startDate := ShortDate{startTime}
endTime, err2 := time.Parse("20060102", to)
endDate := ShortDate{endTime}
if err1 != nil || err2 != nil {
return Timeframe{}, fmt.Errorf("Malformed query params")
}
return Timeframe{StartDate: startDate, EndDate: endDate}, nil
}
// ToQuery transforms a timeframe to a query. The param keys are
// `from` for the StartDate and `to` for the EndDate
func (tf *Timeframe) ToQuery() url.Values {
params := make(url.Values)
params.Set("from", tf.StartDate.Format("20060102"))
params.Set("to", tf.EndDate.Format("20060102"))
return params
}
// MarshalJSON marhsals the timeframe into a JSON string
func (tf *Timeframe) MarshalJSON() ([]byte, error) {
if tf.StartDate.IsZero() || tf.EndDate.IsZero() {
return json.Marshal("")
}
return json.Marshal(fmt.Sprintf("%s,%s", tf.StartDate.Format("2006-01-02"), tf.EndDate.Format("2006-01-02")))
}
// UnmarshalJSON unmarshals data into a timeframe
func (tf *Timeframe) UnmarshalJSON(data []byte) error {
unquotedData, _ := strconv.Unquote(string(data))
dates := strings.Split(unquotedData, ",")
if len(dates) != 2 {
*tf = Timeframe{}
return nil
}
startTime, err1 := time.Parse("2006-01-02", dates[0])
startDate := ShortDate{startTime}
endTime, err2 := time.Parse("2006-01-02", dates[1])
endDate := ShortDate{endTime}
if err1 != nil || err2 != nil {
*tf = Timeframe{}
return nil
}
*tf = Timeframe{StartDate: startDate, EndDate: endDate}
return nil
}
// IsZero returns true when StartDate and EndDate are both zero, i.e. when the
// Timeframe is uninitialized.
func (tf *Timeframe) IsZero() bool {
return tf.StartDate.IsZero() && tf.EndDate.IsZero()
}
func (tf *Timeframe) String() string {
return fmt.Sprintf("{%s-%s}", tf.StartDate, tf.EndDate)
} | domain/time.go | 0.848062 | 0.421254 | time.go | starcoder |
package deepcopy
import ()
func DeepCopyJsonData(dataFrom interface{}) interface{} {
switch dataFrom.(type) {
case map[string]interface{}:
dataTo := make(map[string]interface{})
DeepOverwriteJsonMap(dataFrom.(map[string]interface{}), dataTo)
return dataTo
case []interface{}:
fromJsonSlice := dataFrom.([]interface{})
toJsonSlice := make([]interface{}, 0)
DeepOverwriteJsonSlice(&fromJsonSlice, &toJsonSlice)
return toJsonSlice
default:
return dataFrom
}
}
func DeepOverwriteJsonMap(mapFrom map[string]interface{}, mapTo map[string]interface{}) {
for key, value := range mapFrom {
switch value.(type) {
case map[string]interface{}:
switch mapTo[key].(type) {
case map[string]interface{}:
default:
// If not json map, abandom the old value and create an empty json map
mapTo[key] = make(map[string]interface{})
}
DeepOverwriteJsonMap(value.(map[string]interface{}), mapTo[key].(map[string]interface{}))
case []interface{}:
switch mapTo[key].(type) {
case []interface{}:
default:
// If not json slice, abandom the old value and create an empty json slice
mapTo[key] = make([]interface{}, 0)
}
fromJsonSlice := value.([]interface{})
toJsonSlice := mapTo[key].([]interface{})
DeepOverwriteJsonSlice(&fromJsonSlice, &toJsonSlice)
// Reassign again because the slice may have append operation which cause the pointer address to change
mapTo[key] = toJsonSlice
default:
mapTo[key] = value
}
}
}
func DeepOverwriteJsonSlice(sliceFromPointer *[]interface{}, sliceToPointer *[]interface{}) {
sliceFrom := *sliceFromPointer
for index, value := range sliceFrom {
sliceTo := *sliceToPointer
sliceToLength := len(sliceTo)
switch value.(type) {
case map[string]interface{}:
if index < sliceToLength {
// Index exist
switch sliceTo[index].(type) {
case map[string]interface{}:
default:
// Not map so create a new one
sliceTo[index] = make(map[string]interface{})
}
DeepOverwriteJsonMap(value.(map[string]interface{}), sliceTo[index].(map[string]interface{}))
} else {
// Index not exist
newMap := make(map[string]interface{})
DeepOverwriteJsonMap(value.(map[string]interface{}), newMap)
*sliceToPointer = append(sliceTo, newMap)
}
case []interface{}:
if index < sliceToLength {
// Index exist
switch sliceTo[index].(type) {
case []interface{}:
default:
// Not slice so create a new one
sliceTo[index] = make([]interface{}, 0)
}
fromJsonSlice := value.([]interface{})
toJsonSlice := sliceTo[index].([]interface{})
DeepOverwriteJsonSlice(&fromJsonSlice, &toJsonSlice)
// Reassign again because the slice may have append operation which cause the pointer address to change
sliceTo[index] = toJsonSlice
} else {
// Index not exist
newSlice := make([]interface{}, 0)
fromJsonSlice := value.([]interface{})
toJsonSlice := newSlice
DeepOverwriteJsonSlice(&fromJsonSlice, &toJsonSlice)
*sliceToPointer = append(sliceTo, newSlice)
}
default:
// Not map or slice
if index < sliceToLength {
// Index exist, replace the old value
sliceTo[index] = value
} else {
// Index not exist, create a slot if not existing
*sliceToPointer = append(sliceTo, value)
}
}
}
} | deepcopy/json.go | 0.52342 | 0.40157 | json.go | starcoder |
package impl
import (
"errors"
"github.com/xichen2020/eventdb/document/field"
"github.com/xichen2020/eventdb/filter"
"github.com/xichen2020/eventdb/values"
"github.com/xichen2020/eventdb/values/iterator"
iterimpl "github.com/xichen2020/eventdb/values/iterator/impl"
"github.com/xichen2020/eventdb/x/pool"
)
var (
errDoubleValuesBuilderAlreadyClosed = errors.New("double values builder is already closed")
)
// ArrayBasedDoubleValues is a double values collection backed by an in-memory array.
// TODO(xichen): Investigate more compact encoding of the values for memory efficiency.
type ArrayBasedDoubleValues struct {
closed bool
initDone bool
min float64
max float64
vals *pool.RefCountedPooledFloat64Array
}
// NewArrayBasedDoubleValues create a new array based double values.
func NewArrayBasedDoubleValues(p *pool.BucketizedFloat64ArrayPool) *ArrayBasedDoubleValues {
rawArr := p.Get(defaultInitialFieldValuesCapacity)
refCountedArr := pool.NewRefCountedPooledFloat64Array(rawArr, p, nil)
return &ArrayBasedDoubleValues{
vals: refCountedArr,
}
}
// Metadata returns the values metadata.
func (b *ArrayBasedDoubleValues) Metadata() values.DoubleValuesMetadata {
return values.DoubleValuesMetadata{
Min: b.min,
Max: b.max,
Size: len(b.vals.Get()),
}
}
// Iter returns the values iterator.
func (b *ArrayBasedDoubleValues) Iter() (iterator.ForwardDoubleIterator, error) {
return iterimpl.NewArrayBasedDoubleIterator(b.vals.Get()), nil
}
// Filter applies the given filter against the values, returning an iterator
// identifying the positions of values matching the filter.
func (b *ArrayBasedDoubleValues) Filter(
op filter.Op,
filterValue *field.ValueUnion,
) (iterator.PositionIterator, error) {
return defaultFilteredArrayBasedDoubleValueIterator(b, op, filterValue)
}
// Add adds a new double value.
func (b *ArrayBasedDoubleValues) Add(v float64) error {
if b.closed {
return errDoubleValuesBuilderAlreadyClosed
}
if !b.initDone {
b.min = v
b.max = v
b.initDone = true
} else {
if b.min > v {
b.min = v
} else if b.max < v {
b.max = v
}
}
b.vals.Append(v)
return nil
}
// Snapshot takes a snapshot of the double values.
func (b *ArrayBasedDoubleValues) Snapshot() values.CloseableDoubleValues {
return &ArrayBasedDoubleValues{
min: b.min,
max: b.max,
vals: b.vals.Snapshot(),
}
}
// Seal seals the double values builder.
func (b *ArrayBasedDoubleValues) Seal() values.CloseableDoubleValues {
sealed := &ArrayBasedDoubleValues{
min: b.min,
max: b.max,
vals: b.vals,
}
// Close the current values so it's no longer writable.
*b = ArrayBasedDoubleValues{}
b.Close()
return sealed
}
// Close closes the double values.
func (b *ArrayBasedDoubleValues) Close() {
if b.closed {
return
}
b.closed = true
if b.vals != nil {
b.vals.Close()
b.vals = nil
}
} | values/impl/array_based_double_values.go | 0.561696 | 0.412648 | array_based_double_values.go | starcoder |
package flatmap
import (
"fmt"
"reflect"
)
// Flatten takes an object and returns a flat map of the object. The keys of the
// map is the path of the field names until a primitive field is reached and the
// value is a string representation of the terminal field.
func Flatten(obj interface{}, filter []string, primitiveOnly bool) map[string]string {
flat := make(map[string]string)
v := reflect.ValueOf(obj)
if !v.IsValid() {
return nil
}
flatten("", v, primitiveOnly, false, flat)
for _, f := range filter {
if _, ok := flat[f]; ok {
delete(flat, f)
}
}
return flat
}
// flatten recursively calls itself to create a flatmap representation of the
// passed value. The results are stored into the output map and the keys are
// the fields prepended with the passed prefix.
// XXX: A current restriction is that maps only support string keys.
func flatten(prefix string, v reflect.Value, primitiveOnly, enteredStruct bool, output map[string]string) {
switch v.Kind() {
case reflect.Bool:
output[prefix] = fmt.Sprintf("%v", v.Bool())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
output[prefix] = fmt.Sprintf("%v", v.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
output[prefix] = fmt.Sprintf("%v", v.Uint())
case reflect.Float32, reflect.Float64:
output[prefix] = fmt.Sprintf("%v", v.Float())
case reflect.Complex64, reflect.Complex128:
output[prefix] = fmt.Sprintf("%v", v.Complex())
case reflect.String:
output[prefix] = fmt.Sprintf("%v", v.String())
case reflect.Invalid:
output[prefix] = "nil"
case reflect.Ptr:
if primitiveOnly && enteredStruct {
return
}
e := v.Elem()
if !e.IsValid() {
output[prefix] = "nil"
}
flatten(prefix, e, primitiveOnly, enteredStruct, output)
case reflect.Map:
for _, k := range v.MapKeys() {
if k.Kind() == reflect.Interface {
k = k.Elem()
}
if k.Kind() != reflect.String {
panic(fmt.Sprintf("%q: map key is not string: %s", prefix, k))
}
flatten(getSubKeyPrefix(prefix, k.String()), v.MapIndex(k), primitiveOnly, enteredStruct, output)
}
case reflect.Struct:
if primitiveOnly && enteredStruct {
return
}
enteredStruct = true
t := v.Type()
for i := 0; i < v.NumField(); i++ {
name := t.Field(i).Name
val := v.Field(i)
if val.Kind() == reflect.Interface && !val.IsNil() {
val = val.Elem()
}
flatten(getSubPrefix(prefix, name), val, primitiveOnly, enteredStruct, output)
}
case reflect.Interface:
if primitiveOnly {
return
}
e := v.Elem()
if !e.IsValid() {
output[prefix] = "nil"
return
}
flatten(prefix, e, primitiveOnly, enteredStruct, output)
case reflect.Array, reflect.Slice:
if primitiveOnly {
return
}
if v.Kind() == reflect.Slice && v.IsNil() {
output[prefix] = "nil"
return
}
for i := 0; i < v.Len(); i++ {
flatten(fmt.Sprintf("%s[%d]", prefix, i), v.Index(i), primitiveOnly, enteredStruct, output)
}
default:
panic(fmt.Sprintf("prefix %q; unsupported type %v", prefix, v.Kind()))
}
}
// getSubPrefix takes the current prefix and the next subfield and returns an
// appropriate prefix.
func getSubPrefix(curPrefix, subField string) string {
newPrefix := ""
if curPrefix != "" {
newPrefix = fmt.Sprintf("%s.%s", curPrefix, subField)
} else {
newPrefix = fmt.Sprintf("%s", subField)
}
return newPrefix
}
// getSubKeyPrefix takes the current prefix and the next subfield and returns an
// appropriate prefix for a map field.
func getSubKeyPrefix(curPrefix, subField string) string {
newPrefix := ""
if curPrefix != "" {
newPrefix = fmt.Sprintf("%s[%s]", curPrefix, subField)
} else {
newPrefix = fmt.Sprintf("%s", subField)
}
return newPrefix
} | vendor/github.com/hashicorp/nomad/helper/flatmap/flatmap.go | 0.670177 | 0.475179 | flatmap.go | starcoder |
package wire
import (
"fmt"
"io"
)
// defaultInvListAlloc is the default size used for the backing array for an inventory list. The array will dynamically grow as needed, but this figure is intended to provide enough space for the max number of inventory vectors in a *typical* inventory message without needing to grow the backing array multiple times. Technically, the list can grow to MaxInvPerMsg, but rather than using that large figure, this figure more accurately reflects the typical case.
const defaultInvListAlloc = 1000
// MsgInv implements the Message interface and represents a bitcoin inv message. It is used to advertise a peer's known data such as blocks and transactions through inventory vectors. It may be sent unsolicited to inform other peers of the data or in response to a getblocks message (MsgGetBlocks). Each message is limited to a maximum number of inventory vectors, which is currently 50,000. Use the AddInvVect function to build up the list of inventory vectors when sending an inv message to another peer.
type MsgInv struct {
InvList []*InvVect
}
// AddInvVect adds an inventory vector to the message.
func (msg *MsgInv) AddInvVect(iv *InvVect) error {
if len(msg.InvList)+1 > MaxInvPerMsg {
str := fmt.Sprintf("too many invvect in message [max %v]",
MaxInvPerMsg)
return messageError("MsgInv.AddInvVect", str)
}
msg.InvList = append(msg.InvList, iv)
return nil
}
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. This is part of the Message interface implementation.
func (msg *MsgInv) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error {
count, err := ReadVarInt(r, pver)
if err != nil {
return err
}
// Limit to max inventory vectors per message.
if count > MaxInvPerMsg {
str := fmt.Sprintf("too many invvect in message [%v]", count)
return messageError("MsgInv.BtcDecode", str)
}
// Create a contiguous slice of inventory vectors to deserialize into in order to reduce the number of allocations.
invList := make([]InvVect, count)
msg.InvList = make([]*InvVect, 0, count)
for i := uint64(0); i < count; i++ {
iv := &invList[i]
err := readInvVect(r, pver, iv)
if err != nil {
return err
}
err = msg.AddInvVect(iv)
if err != nil {
fmt.Println(err)
}
}
return nil
}
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. This is part of the Message interface implementation.
func (msg *MsgInv) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) error {
// Limit to max inventory vectors per message.
count := len(msg.InvList)
if count > MaxInvPerMsg {
str := fmt.Sprintf("too many invvect in message [%v]", count)
return messageError("MsgInv.BtcEncode", str)
}
err := WriteVarInt(w, pver, uint64(count))
if err != nil {
return err
}
for _, iv := range msg.InvList {
err := writeInvVect(w, pver, iv)
if err != nil {
return err
}
}
return nil
}
// Command returns the protocol command string for the message. This is part of the Message interface implementation.
func (msg *MsgInv) Command() string {
return CmdInv
}
// MaxPayloadLength returns the maximum length the payload can be for the receiver. This is part of the Message interface implementation.
func (msg *MsgInv) MaxPayloadLength(pver uint32) uint32 {
// Num inventory vectors (varInt) + max allowed inventory vectors.
return MaxVarIntPayload + (MaxInvPerMsg * maxInvVectPayload)
}
// NewMsgInv returns a new bitcoin inv message that conforms to the Message interface. See MsgInv for details.
func NewMsgInv() *MsgInv {
return &MsgInv{
InvList: make([]*InvVect, 0, defaultInvListAlloc),
}
}
// NewMsgInvSizeHint returns a new bitcoin inv message that conforms to the Message interface. See MsgInv for details. This function differs from NewMsgInv in that it allows a default allocation size for the backing array which houses the inventory vector list. This allows callers who know in advance how large the inventory list will grow to avoid the overhead of growing the internal backing array several times when appending large amounts of inventory vectors with AddInvVect. Note that the specified hint is just that - a hint that is used for the default allocation size. Adding more (or less) inventory vectors will still work properly. The size hint is limited to MaxInvPerMsg.
func NewMsgInvSizeHint(sizeHint uint) *MsgInv {
// Limit the specified hint to the maximum allow per message.
if sizeHint > MaxInvPerMsg {
sizeHint = MaxInvPerMsg
}
return &MsgInv{
InvList: make([]*InvVect, 0, sizeHint),
}
} | pkg/chain/wire/msginv.go | 0.672977 | 0.451931 | msginv.go | starcoder |
package vert
import (
"fmt"
"reflect"
"syscall/js"
)
var zero = reflect.ValueOf(nil)
// AssignTo assigns a JS value to a Go pointer.
// Returns an error on invalid assignments.
func (v Value) AssignTo(i interface{}) error {
rv := reflect.ValueOf(i)
if k := rv.Kind(); k != reflect.Ptr || rv.IsNil() {
return &InvalidAssignmentError{Kind: k}
}
return recoverAssignTo(rv, v.JSValue())
}
// recoverAssignTo recovers unexpected assignment panics.
// Please report unexpected panics.
func recoverAssignTo(rv reflect.Value, jv js.Value) (err error) {
defer func() {
if rec := recover(); rec != nil {
err = &InvalidAssignmentError{rec: rec}
}
}()
_, err = assignTo(rv, jv)
return
}
// assignTo recursively assigns a value.
func assignTo(rv reflect.Value, jv js.Value) (reflect.Value, error) {
if jv.Equal(js.Null()) || jv.Equal(js.Undefined()) {
return zero, nil
}
k := rv.Kind()
switch k {
case reflect.Ptr:
return assignToPointer(rv, jv)
case reflect.Interface:
if e := rv.Elem(); e != zero {
return assignToInterface(rv, e, jv)
}
}
switch t := jv.Type(); t {
case js.TypeBoolean:
return assignToBasic(rv, jv.Bool(), t)
case js.TypeNumber:
return assignToBasic(rv, jv.Float(), t)
case js.TypeString:
return assignToBasic(rv, jv.String(), t)
case js.TypeObject:
return assignToValue(rv, jv)
default:
return zero, &InvalidAssignmentError{Type: t, Kind: k}
}
}
// assignToPointer assigns a value to a pointer.
func assignToPointer(p reflect.Value, jv js.Value) (reflect.Value, error) {
if p.IsNil() {
p = reflect.New(p.Type().Elem())
}
v, err := assignTo(p.Elem(), jv)
if err != nil {
return zero, err
}
if v != zero {
p.Elem().Set(v)
}
return p, nil
}
// assignToInterface assigns a value to an interface.
func assignToInterface(i, e reflect.Value, jv js.Value) (reflect.Value, error) {
v, err := assignTo(e, jv)
if err != nil {
return zero, err
}
if v != zero {
i.Set(v)
}
return i, nil
}
// assignToBasic assigns a primitive value to a basic value.
func assignToBasic(b reflect.Value, i interface{}, t js.Type) (val reflect.Value, err error) {
defer func() {
if rec := recover(); rec != nil {
err = &InvalidAssignmentError{Type: t, Kind: b.Kind()}
}
}()
v := reflect.ValueOf(i)
val = v.Convert(b.Type())
return
}
// assignToObject assigns an object to a value.
func assignToValue(rv reflect.Value, jv js.Value) (reflect.Value, error) {
switch k := rv.Kind(); k {
case reflect.Struct:
return assignToStruct(rv, jv)
case reflect.Map:
return assignToMap(rv, jv)
case reflect.Slice:
return assignToSlice(rv, jv)
default:
return zero, &InvalidAssignmentError{Type: jv.Type(), Kind: k}
}
}
// assignToStruct assigns an object to a struct.
func assignToStruct(s reflect.Value, val js.Value) (reflect.Value, error) {
t := s.Type()
s = reflect.New(t).Elem()
n := s.NumField()
for i := 0; i < n; i++ {
if f := s.Field(i); f.CanInterface() {
k := nameOf(t.Field(i))
jf := val.Get(k)
v, err := assignTo(f, jf)
if err != nil {
return zero, err
}
if v == zero {
continue
}
f.Set(v)
}
}
return s, nil
}
// assignToMap assigns an object to a map.
// Map keys must be of type string.
func assignToMap(m reflect.Value, jv js.Value) (reflect.Value, error) {
t := m.Type()
keys := object.Call("keys", jv)
n := keys.Length()
if m.IsNil() {
m = reflect.MakeMapWithSize(t, n)
}
kt := t.Key()
vt := t.Elem()
for i := 0; i < n; i++ {
jk := keys.Index(i)
k := reflect.New(kt).Elem()
k, err := assignTo(k, jk)
if err != nil {
return zero, err
}
if k == zero {
continue
}
jv := jv.Get(jk.String())
v := reflect.New(vt).Elem()
v, err = assignTo(v, jv)
if err != nil {
return zero, err
}
if v == zero {
continue
}
m.SetMapIndex(k, v)
}
return m, nil
}
// assignToSlice assigns an array object to a slice.
func assignToSlice(s reflect.Value, jv js.Value) (reflect.Value, error) {
t := s.Type()
n := jv.Length()
if s.IsNil() {
s = reflect.MakeSlice(t, 0, n)
}
et := t.Elem()
for i := 0; i < n; i++ {
e := reflect.New(et).Elem()
je := jv.Index(i)
e, err := assignTo(e, je)
if err != nil {
return zero, err
}
if e == zero {
continue
}
s = reflect.Append(s, e)
}
return s, nil
}
type InvalidAssignmentError struct {
Type js.Type
Kind reflect.Kind
rec interface{}
}
func (e *InvalidAssignmentError) Error() string {
if e.rec != nil {
return fmt.Sprintf("unexpected panic: %+v", e.rec)
}
if e.Type == js.TypeUndefined {
return fmt.Sprintf("invalid assignment to Go kind: %v must be a non-nil pointer", e.Kind)
}
return fmt.Sprintf("invalid assignment from JS type: %v to Go kind: %v", e.Type, e.Kind)
} | assign.go | 0.6488 | 0.427456 | assign.go | starcoder |
package base
import (
"github.com/scylladb/go-set/iset"
"math/rand"
)
// RandomGenerator is the random generator for gorse.
type RandomGenerator struct {
*rand.Rand
}
// NewRandomGenerator creates a RandomGenerator.
func NewRandomGenerator(seed int64) RandomGenerator {
return RandomGenerator{rand.New(rand.NewSource(int64(seed)))}
}
// UniformVector makes a vec filled with uniform random floats,
func (rng RandomGenerator) UniformVector(size int, low, high float32) []float32 {
ret := make([]float32, size)
scale := high - low
for i := 0; i < len(ret); i++ {
ret[i] = rng.Float32()*scale + low
}
return ret
}
// NewNormalVector makes a vec filled with normal random floats.
func (rng RandomGenerator) NewNormalVector(size int, mean, stdDev float32) []float32 {
ret := make([]float32, size)
for i := 0; i < len(ret); i++ {
ret[i] = float32(rng.NormFloat64())*stdDev + mean
}
return ret
}
// NormalMatrix makes a matrix filled with normal random floats.
func (rng RandomGenerator) NormalMatrix(row, col int, mean, stdDev float32) [][]float32 {
ret := make([][]float32, row)
for i := range ret {
ret[i] = rng.NewNormalVector(col, mean, stdDev)
}
return ret
}
// UniformMatrix makes a matrix filled with uniform random floats.
func (rng RandomGenerator) UniformMatrix(row, col int, low, high float32) [][]float32 {
ret := make([][]float32, row)
for i := range ret {
ret[i] = rng.UniformVector(col, low, high)
}
return ret
}
// NewNormalVector makes a vec filled with normal random floats.
func (rng RandomGenerator) NormalVector64(size int, mean, stdDev float64) []float64 {
ret := make([]float64, size)
for i := 0; i < len(ret); i++ {
ret[i] = rng.NormFloat64()*stdDev + mean
}
return ret
}
// NormalMatrix64 makes a matrix filled with normal random floats.
func (rng RandomGenerator) NormalMatrix64(row, col int, mean, stdDev float64) [][]float64 {
ret := make([][]float64, row)
for i := range ret {
ret[i] = rng.NormalVector64(col, mean, stdDev)
}
return ret
}
func (rng RandomGenerator) Sample(low, high, n int, exclude ...*iset.Set) []int {
intervalLength := high - low
excludeSet := iset.Union(exclude...)
sampled := make([]int, 0, n)
if n >= intervalLength-excludeSet.Size() {
for i := low; i < high; i++ {
if !excludeSet.Has(i) {
sampled = append(sampled, i)
excludeSet.Add(i)
}
}
} else {
for len(sampled) < n {
v := rng.Intn(intervalLength) + low
if !excludeSet.Has(v) {
sampled = append(sampled, v)
excludeSet.Add(v)
}
}
}
return sampled
} | base/random.go | 0.717408 | 0.480479 | random.go | starcoder |
package eval
import (
"bytes"
"fmt"
"io"
"reflect"
)
type (
RDetect map[interface{}]bool
Value interface {
fmt.Stringer
Equality
ToString(bld io.Writer, format FormatContext, g RDetect)
PType() Type
}
// Comparator returns true when a is less than b.
Comparator func(a, b Value) bool
Object interface {
Value
Initialize(c Context, arguments []Value)
InitFromHash(c Context, hash OrderedMap)
}
ReadableObject interface {
Get(key string) (value Value, ok bool)
}
// CallableObject is implemented by PuppetObjects that have functions
CallableObject interface {
Call(c Context, method ObjFunc, args []Value, block Lambda) (result Value, ok bool)
}
PuppetObject interface {
Value
ReadableObject
InitHash() OrderedMap
}
ErrorObject interface {
PuppetObject
// Kind returns the error kind
Kind() string
// Message returns the error message
Message() string
// IssueCode returns the issue code
IssueCode() string
// PartialResult returns the optional partial result. It returns
// eval.UNDEF if no partial result exists
PartialResult() Value
// Details returns the optional details. It returns
// an empty map when o details exist
Details() OrderedMap
}
DetailedTypeValue interface {
Value
DetailedType() Type
}
SizedValue interface {
Value
Len() int
IsEmpty() bool
}
InterfaceValue interface {
Value
Interface() interface{}
}
IterableValue interface {
Iterator() Iterator
ElementType() Type
IsHashStyle() bool
}
IteratorValue interface {
Value
AsArray() List
}
// List represents an Array. The iterative methods will not catch break exceptions. If
// // that is desired, then use an Iterator instead.
List interface {
SizedValue
IterableValue
Add(Value) List
AddAll(List) List
All(predicate Predicate) bool
Any(predicate Predicate) bool
AppendTo(slice []Value) []Value
At(index int) Value
Delete(Value) List
DeleteAll(List) List
Each(Consumer)
EachSlice(int, SliceConsumer)
EachWithIndex(consumer IndexedConsumer)
Find(predicate Predicate) (Value, bool)
Flatten() List
Map(mapper Mapper) List
Select(predicate Predicate) List
Slice(i int, j int) List
Reduce(redactor BiMapper) Value
Reduce2(initialValue Value, redactor BiMapper) Value
Reject(predicate Predicate) List
Unique() List
}
SortableList interface {
List
Sort(comparator Comparator) List
}
HashKey string
HashKeyValue interface {
ToKey() HashKey
}
StreamHashKeyValue interface {
ToKey(b *bytes.Buffer)
}
MapEntry interface {
Value
Key() Value
Value() Value
}
// OrderedMap represents a Hash. The iterative methods will not catch break exceptions. If
// that is desired, then use an Iterator instead.
OrderedMap interface {
List
AllPairs(BiPredicate) bool
AnyPair(BiPredicate) bool
AllKeysAreStrings() bool
Entries() List
EachKey(Consumer)
EachPair(BiConsumer)
EachValue(Consumer)
Get(key Value) (Value, bool)
Get2(key Value, dflt Value) Value
Get3(key Value, dflt Producer) Value
Get4(key string) (Value, bool)
Get5(key string, dflt Value) Value
Get6(key string, dflt Producer) Value
// GetEntry returns the entry that represents the mapping between
// the given key and its value
GetEntry(key string) (MapEntry, bool)
IncludesKey(o Value) bool
IncludesKey2(o string) bool
Keys() List
// MapEntries returns a new OrderedMap with both keys and values
// converted using the given mapper function
MapEntries(mapper EntryMapper) OrderedMap
// MapValues returns a new OrderedMap with the exact same keys as
// before but where each value has been converted using the given
// mapper function
MapValues(mapper Mapper) OrderedMap
Merge(OrderedMap) OrderedMap
Values() List
SelectPairs(BiPredicate) OrderedMap
RejectPairs(BiPredicate) OrderedMap
}
NumericValue interface {
Value
Int() int64
Float() float64
Abs() NumericValue
}
)
var EMPTY_ARRAY List
var EMPTY_MAP OrderedMap
var EMPTY_STRING Value
var EMPTY_VALUES []Value
var UNDEF Value
var DetailedValueType func(value Value) Type
var GenericValueType func(value Value) Type
var ToKey func(value Value) HashKey
var IsTruthy func(tv Value) bool
var ToInt func(v Value) (int64, bool)
var ToFloat func(v Value) (float64, bool)
var Wrap func(c Context, v interface{}) Value
var WrapReflected func(c Context, v reflect.Value) Value
// StringElements returns a slice containing each element in the given list as a string
func StringElements(l List) []string {
ss := make([]string, l.Len())
l.EachWithIndex(func(e Value, i int) {
ss[i] = e.String()
})
return ss
}
func ToString(t Value) string {
return ToString2(t, DEFAULT_FORMAT_CONTEXT)
}
func ToPrettyString(t Value) string {
return ToString2(t, PRETTY)
}
func ToString2(t Value, format FormatContext) string {
bld := bytes.NewBufferString(``)
t.ToString(bld, format, nil)
return bld.String()
}
func ToString3(t Value, writer io.Writer) {
ToString4(t, DEFAULT_FORMAT_CONTEXT, writer)
}
func ToString4(t Value, format FormatContext, writer io.Writer) {
t.ToString(writer, format, nil)
}
func CopyValues(src []Value) []Value {
dst := make([]Value, len(src))
for i, v := range src {
dst[i] = v
}
return dst
} | eval/values.go | 0.74512 | 0.412767 | values.go | starcoder |
package decimal
import (
"math/big"
)
// BigDecimal real value is
// d = (value + numerator / denominator) * 10 ^ (-scale)
// We have:
// denominator = 0 as initial => numerator / 0 = 0
// it means numerator / denominator only valid if denominator != 0
// Simple type of BigDecimal:
// - The numerator shouble be less than denominator (or both are zeros)
type BigDecimal struct {
value *big.Int
scale int32
numerator uint64
denominator uint64
strCache string
}
func (d *BigDecimal) ensureInitialized() {
if d.value == nil {
d.value = new(big.Int)
}
}
func (d BigDecimal) toFractionIgnoreScale() (*big.Int, *big.Int) {
if d.denominator == 0 {
return new(big.Int).Set(d.value), new(big.Int).Set(oneInt)
}
num, dem := new(big.Int).SetUint64(d.numerator), new(big.Int).SetUint64(d.denominator)
vMulD := new(big.Int).Set(d.value)
vMulD = vMulD.Mul(vMulD, dem)
num.Add(num, vMulD)
return num, dem
}
func (d *BigDecimal) optimize() {
if d.denominator == 0 {
if d.numerator != 0 {
panic("denominator is zero but numerator not")
}
return
}
if d.numerator >= d.denominator {
d.value = d.value.Add(d.value, new(big.Int).SetUint64(d.numerator/d.denominator))
d.numerator %= d.denominator
}
if d.numerator == 0 {
d.denominator = 0
}
}
func (d *BigDecimal) reduce() {
if d.denominator == 0 {
if d.numerator != 0 {
panic("denominator is zero but numerator not")
}
return
}
if d.numerator == 0 {
d.denominator = 0
return
}
gcdOfND := gcd(d.denominator, d.numerator)
d.denominator /= gcdOfND
d.numerator /= gcdOfND
}
// rescale helps to change the scale value but keep the real decimal value.
// rescale supports some operators; basically, the sum/add methods need two numbers
// have the same scale
func (d BigDecimal) rescale(scale int32) BigDecimal {
bigDec := BigDecimal{}
d.ensureInitialized()
bigDec.ensureInitialized()
if d.scale == scale {
bigDec = BigDecimal{
value: new(big.Int).Set(d.value),
scale: d.scale,
numerator: d.numerator,
denominator: d.denominator,
}
bigDec.optimize()
return bigDec
}
diffScale := scale - d.scale
value := new(big.Int).Set(d.value)
bigDec = BigDecimal{value: value, scale: scale}
if diffScale < 0 {
expScale := new(big.Int).Exp(tenInt, big.NewInt(int64(-diffScale)), nil)
rem := new(big.Int)
value, rem = value.DivMod(value, expScale, rem)
r := rem.Uint64()
bigDec.numerator, bigDec.denominator = addFraction(
r, expScale.Uint64(),
d.numerator, d.denominator*expScale.Uint64(),
)
} else {
expScale := new(big.Int).Exp(tenInt, big.NewInt(int64(diffScale)), nil)
value = value.Mul(value, expScale)
bigDec.numerator = d.numerator * expScale.Uint64()
bigDec.denominator = d.denominator
}
bigDec.optimize()
return bigDec
}
// rescalePair rescales two decimals to common exponential value (minimal exp of both decimals)
func rescalePair(d1 BigDecimal, d2 BigDecimal) (BigDecimal, BigDecimal) {
d1.ensureInitialized()
d2.ensureInitialized()
if d1.scale == d2.scale {
return d1, d2
}
baseScale := maxInt32(d1.scale, d2.scale)
if baseScale != d1.scale {
return d1.rescale(baseScale), d2
}
return d1, d2.rescale(baseScale)
} | decimal.go | 0.688678 | 0.437643 | decimal.go | starcoder |
package g2d
import (
"fmt"
"math"
"github.com/angelsolaorbaiceta/inkgeom/nums"
)
var (
IVersor = MakeVersor(1, 0)
JVersor = MakeVersor(0, 1)
)
/*
Vector is an entity with projections both in the X and Y axis.
Used to represent both points and vectors in two dimensions.
*/
type Vector struct {
x, y float64
}
// MakeVector creates a new vector.
func MakeVector(x, y float64) *Vector {
return &Vector{x, y}
}
// MakeVersor creates a vector with unitary norm following the direction of the given projections.
func MakeVersor(x, y float64) *Vector {
length := computeLength(x, y)
return &Vector{x / length, y / length}
}
func (v *Vector) X() float64 {
return v.x
}
func (v *Vector) Y() float64 {
return v.y
}
// Length returns the magnitude of the vector.
func (v *Vector) Length() float64 {
return computeLength(v.x, v.y)
}
// IsVersor returns true if the vector has a length of 1.
func (v *Vector) IsVersor() bool {
return nums.IsCloseToOne(v.Length())
}
// Equals returns true if the projections of this and other projectable are equal.
func (v *Vector) Equals(other *Vector) bool {
return nums.FloatsEqual(v.x, other.x) &&
nums.FloatsEqual(v.y, other.y)
}
// ToVersor returns a versor with the same direction as this vector.
func (v *Vector) ToVersor() *Vector {
if v.IsVersor() {
return v
}
return MakeVersor(v.x, v.y)
}
// Perpendicular returns the vector result of rotating PI/2 radians this one.
func (v *Vector) Perpendicular() *Vector {
return MakeVector(-v.y, v.x)
}
// Scaled creates a new vector with the projections scaled the given factor.
func (v *Vector) Scaled(factor float64) *Vector {
return MakeVector(v.x*factor, v.y*factor)
}
// Plus creates a new projectable adding this with other.
func (v *Vector) Plus(other *Vector) *Vector {
return MakeVector(v.x+other.x, v.y+other.y)
}
// Minus creates a new projectable subtracting this with other.
func (v *Vector) Minus(other *Vector) *Vector {
return MakeVector(v.x-other.x, v.y-other.y)
}
// DotTimes computes the dot product of this vector with other.
func (v *Vector) DotTimes(other *Vector) float64 {
return (v.x * other.x) + (v.y * other.y)
}
// CrossTimes computes the cross product of this vector with other.
func (v *Vector) CrossTimes(other *Vector) float64 {
return (v.x * other.y) - (v.y * other.x)
}
func computeLength(x, y float64) float64 {
return math.Sqrt(x*x + y*y)
}
func (v Vector) String() string {
return fmt.Sprintf("{%f, %f}", v.x, v.y)
} | g2d/vector.go | 0.910394 | 0.675353 | vector.go | starcoder |
package s2
import (
"github.com/golang/geo/r2"
)
// Cell is an S2 region object that represents a cell. Unlike CellIDs,
// it supports efficient containment and intersection tests. However, it is
// also a more expensive representation.
type Cell struct {
face int8
level int8
orientation int8
id CellID
uv r2.Rect
}
// CellFromCellID constructs a Cell corresponding to the given CellID.
func CellFromCellID(id CellID) Cell {
c := Cell{}
c.id = id
f, i, j, o := c.id.faceIJOrientation()
c.face = int8(f)
c.level = int8(c.id.Level())
c.orientation = int8(o)
c.uv = ijLevelToBoundUV(i, j, int(c.level))
return c
}
// CellFromPoint constructs a cell for the given Point.
func CellFromPoint(p Point) Cell {
return CellFromCellID(cellIDFromPoint(p))
}
// CellFromLatLng constructs a cell for the given LatLng.
func CellFromLatLng(ll LatLng) Cell {
return CellFromCellID(CellIDFromLatLng(ll))
}
// IsLeaf returns whether this Cell is a leaf or not.
func (c Cell) IsLeaf() bool {
return c.level == maxLevel
}
// SizeIJ returns the CellID value for the cells level.
func (c Cell) SizeIJ() int {
return sizeIJ(int(c.level))
}
// Vertex returns the k-th vertex of the cell (k = [0,3]) in CCW order
// (lower left, lower right, upper right, upper left in the UV plane).
func (c Cell) Vertex(k int) Point {
return Point{faceUVToXYZ(int(c.face), c.uv.Vertices()[k].X, c.uv.Vertices()[k].Y).Normalize()}
}
// Edge returns the inward-facing normal of the great circle passing through
// the CCW ordered edge from vertex k to vertex k+1 (mod 4).
func (c Cell) Edge(k int) Point {
switch k {
case 0:
return Point{vNorm(int(c.face), c.uv.Y.Lo).Normalize()} // Bottom
case 1:
return Point{uNorm(int(c.face), c.uv.X.Hi).Normalize()} // Right
case 2:
return Point{vNorm(int(c.face), c.uv.Y.Hi).Mul(-1.0).Normalize()} // Top
default:
return Point{uNorm(int(c.face), c.uv.X.Lo).Mul(-1.0).Normalize()} // Left
}
}
// ExactArea return the area of this cell as accurately as possible.
func (c Cell) ExactArea() float64 {
v0, v1, v2, v3 := c.Vertex(0), c.Vertex(1), c.Vertex(2), c.Vertex(3)
return PointArea(v0, v1, v2) + PointArea(v0, v2, v3)
}
// TODO(roberts, or $SOMEONE): Differences from C++, almost everything else still.
// Implement the accessor methods on the internal fields. | Godeps/_workspace/src/github.com/golang/geo/s2/cell.go | 0.801004 | 0.525856 | cell.go | starcoder |
package costmodel
import (
"fmt"
"math"
"strconv"
costAnalyzerCloud "github.com/kubecost/cost-model/cloud"
)
// NetworkUsageVNetworkUsageDataector contains the network usage values for egress network traffic
type NetworkUsageData struct {
PodName string
Namespace string
NetworkZoneEgress []*Vector
NetworkRegionEgress []*Vector
NetworkInternetEgress []*Vector
}
// NetworkUsageVector contains a network usage vector for egress network traffic
type NetworkUsageVector struct {
PodName string
Namespace string
Values []*Vector
}
// GetNetworkUsageData performs a join of the the results of zone, region, and internet usage queries to return a single
// map containing network costs for each namespace+pod
func GetNetworkUsageData(zr interface{}, rr interface{}, ir interface{}, isRange bool) (map[string]*NetworkUsageData, error) {
var vectorFn func(interface{}) (map[string]*NetworkUsageVector, error)
if isRange {
vectorFn = getNetworkUsageVectors
} else {
vectorFn = getNetworkUsageVector
}
zoneNetworkMap, err := vectorFn(zr)
if err != nil {
return nil, err
}
regionNetworkMap, err := vectorFn(rr)
if err != nil {
return nil, err
}
internetNetworkMap, err := vectorFn(ir)
if err != nil {
return nil, err
}
usageData := make(map[string]*NetworkUsageData)
for k, v := range zoneNetworkMap {
existing, ok := usageData[k]
if !ok {
usageData[k] = &NetworkUsageData{
PodName: v.PodName,
Namespace: v.Namespace,
NetworkZoneEgress: v.Values,
}
continue
}
existing.NetworkZoneEgress = v.Values
}
for k, v := range regionNetworkMap {
existing, ok := usageData[k]
if !ok {
usageData[k] = &NetworkUsageData{
PodName: v.PodName,
Namespace: v.Namespace,
NetworkRegionEgress: v.Values,
}
continue
}
existing.NetworkRegionEgress = v.Values
}
for k, v := range internetNetworkMap {
existing, ok := usageData[k]
if !ok {
usageData[k] = &NetworkUsageData{
PodName: v.PodName,
Namespace: v.Namespace,
NetworkInternetEgress: v.Values,
}
continue
}
existing.NetworkInternetEgress = v.Values
}
return usageData, nil
}
// GetNetworkCost computes the actual cost for NetworkUsageData based on data provided by the Provider.
func GetNetworkCost(usage *NetworkUsageData, cloud costAnalyzerCloud.Provider) ([]*Vector, error) {
var results []*Vector
pricing, err := cloud.NetworkPricing()
if err != nil {
return nil, err
}
zoneCost := pricing.ZoneNetworkEgressCost
regionCost := pricing.RegionNetworkEgressCost
internetCost := pricing.InternetNetworkEgressCost
zlen := len(usage.NetworkZoneEgress)
rlen := len(usage.NetworkRegionEgress)
ilen := len(usage.NetworkInternetEgress)
l := max(zlen, rlen, ilen)
for i := 0; i < l; i++ {
var cost float64 = 0
var timestamp float64
if i < zlen {
cost += usage.NetworkZoneEgress[i].Value * zoneCost
timestamp = usage.NetworkZoneEgress[i].Timestamp
}
if i < rlen {
cost += usage.NetworkRegionEgress[i].Value * regionCost
timestamp = usage.NetworkRegionEgress[i].Timestamp
}
if i < ilen {
cost += usage.NetworkInternetEgress[i].Value * internetCost
timestamp = usage.NetworkInternetEgress[i].Timestamp
}
results = append(results, &Vector{
Value: cost,
Timestamp: timestamp,
})
}
return results, nil
}
func getNetworkUsageVector(qr interface{}) (map[string]*NetworkUsageVector, error) {
ncdmap := make(map[string]*NetworkUsageVector)
data, ok := qr.(map[string]interface{})["data"]
if !ok {
e, err := wrapPrometheusError(qr)
if err != nil {
return nil, err
}
return nil, fmt.Errorf(e)
}
d, ok := data.(map[string]interface{})
if !ok {
return nil, fmt.Errorf("Data field improperly formatted in prometheus repsonse")
}
result, ok := d["result"]
if !ok {
return nil, fmt.Errorf("Result field not present in prometheus response")
}
results, ok := result.([]interface{})
if !ok {
return nil, fmt.Errorf("Result field improperly formatted in prometheus response")
}
for _, val := range results {
metricInterface, ok := val.(map[string]interface{})["metric"]
if !ok {
return nil, fmt.Errorf("Metric field does not exist in data result vector")
}
metricMap, ok := metricInterface.(map[string]interface{})
if !ok {
return nil, fmt.Errorf("Metric field is improperly formatted")
}
podName, ok := metricMap["pod_name"]
if !ok {
return nil, fmt.Errorf("Pod Name does not exist in data result vector")
}
podNameStr, ok := podName.(string)
if !ok {
return nil, fmt.Errorf("Pod Name field improperly formatted")
}
namespace, ok := metricMap["namespace"]
if !ok {
return nil, fmt.Errorf("Namespace field does not exist in data result vector")
}
namespaceStr, ok := namespace.(string)
if !ok {
return nil, fmt.Errorf("Namespace field improperly formatted")
}
dataPoint, ok := val.(map[string]interface{})["value"]
if !ok {
return nil, fmt.Errorf("Value field does not exist in data result vector")
}
value, ok := dataPoint.([]interface{})
if !ok || len(value) != 2 {
return nil, fmt.Errorf("Improperly formatted datapoint from Prometheus")
}
var vectors []*Vector
strVal := value[1].(string)
v, err := strconv.ParseFloat(strVal, 64)
if err != nil {
return nil, err
}
vectors = append(vectors, &Vector{
Timestamp: value[0].(float64),
Value: v,
})
key := namespaceStr + "," + podNameStr
ncdmap[key] = &NetworkUsageVector{
Namespace: namespaceStr,
PodName: podNameStr,
Values: vectors,
}
}
return ncdmap, nil
}
func getNetworkUsageVectors(qr interface{}) (map[string]*NetworkUsageVector, error) {
ncdmap := make(map[string]*NetworkUsageVector)
data, ok := qr.(map[string]interface{})["data"]
if !ok {
e, err := wrapPrometheusError(qr)
if err != nil {
return nil, err
}
return nil, fmt.Errorf(e)
}
d, ok := data.(map[string]interface{})
if !ok {
return nil, fmt.Errorf("Data field improperly formatted in prometheus repsonse")
}
result, ok := d["result"]
if !ok {
return nil, fmt.Errorf("Result field not present in prometheus response")
}
results, ok := result.([]interface{})
if !ok {
return nil, fmt.Errorf("Result field improperly formatted in prometheus response")
}
for _, val := range results {
metricInterface, ok := val.(map[string]interface{})["metric"]
if !ok {
return nil, fmt.Errorf("Metric field does not exist in data result vector")
}
metricMap, ok := metricInterface.(map[string]interface{})
if !ok {
return nil, fmt.Errorf("Metric field is improperly formatted")
}
podName, ok := metricMap["pod_name"]
if !ok {
return nil, fmt.Errorf("Pod Name does not exist in data result vector")
}
podNameStr, ok := podName.(string)
if !ok {
return nil, fmt.Errorf("Pod Name field improperly formatted")
}
namespace, ok := metricMap["namespace"]
if !ok {
return nil, fmt.Errorf("Namespace field does not exist in data result vector")
}
namespaceStr, ok := namespace.(string)
if !ok {
return nil, fmt.Errorf("Namespace field improperly formatted")
}
values, ok := val.(map[string]interface{})["values"].([]interface{})
if !ok {
return nil, fmt.Errorf("Values field is improperly formatted")
}
var vectors []*Vector
for _, value := range values {
dataPoint, ok := value.([]interface{})
if !ok || len(dataPoint) != 2 {
return nil, fmt.Errorf("Improperly formatted datapoint from Prometheus")
}
strVal := dataPoint[1].(string)
v, _ := strconv.ParseFloat(strVal, 64)
vectors = append(vectors, &Vector{
Timestamp: math.Round(dataPoint[0].(float64)/10) * 10,
Value: v,
})
}
key := namespaceStr + "," + podNameStr
ncdmap[key] = &NetworkUsageVector{
Namespace: namespaceStr,
PodName: podNameStr,
Values: vectors,
}
}
return ncdmap, nil
}
func max(x int, rest ...int) int {
curr := x
for _, v := range rest {
if v > curr {
curr = v
}
}
return curr
} | costmodel/networkcosts.go | 0.658088 | 0.566498 | networkcosts.go | starcoder |
package levels
import (
"image"
"image/color"
"github.com/Nyarum/img/channel"
"github.com/Nyarum/img/utils"
)
// linearScale scales the value given so that the range min to max is scaled to
// 0 to 1.
func linearScale(value, min, max float64) float64 {
return (value - min) * (1 / (max - min))
}
// http://en.wikipedia.org/wiki/Histogram_equalization
func Equalise(img image.Image) image.Image {
return img
}
func Auto(img image.Image, ch channel.Channel) image.Image {
var lightest, darkest float64
lightest = 0.0
darkest = 1.0
utils.PEachColor(img, func(c color.Color) {
v := ch.Get(c)
if v > lightest {
lightest = v
}
if v < darkest {
darkest = v
}
})
// Use linear stretching algorithm
// v = (v - inLow) * ((outUp - outLow) / (inUp - inLow)) + outLow
return utils.MapColor(img, func(c color.Color) color.Color {
v := ch.Get(c)
v = linearScale(v, darkest, lightest)
return ch.Set(c, v)
})
}
func AutoWhite(img image.Image, ch channel.Channel) image.Image {
lightest := 0.0
utils.PEachColor(img, func(c color.Color) {
v := ch.Get(c)
if v > lightest {
lightest = v
}
})
return SetWhite(img, ch, lightest)
}
// AutoBlack finds the darkest colour in the image and makes it black, adjusting
// the colours of every other point to achieve the same distribution.
func AutoBlack(img image.Image, ch channel.Channel) image.Image {
darkest := 1.0
utils.PEachColor(img, func(c color.Color) {
v := ch.Get(c)
if v < darkest {
darkest = v
}
})
return SetBlack(img, ch, darkest)
}
func SetBlack(img image.Image, ch channel.Channel, darkest float64) image.Image {
return utils.MapColor(img, SetBlackC(ch, darkest))
}
func SetBlackC(ch channel.Channel, darkest float64) utils.Composable {
return func(c color.Color) color.Color {
v := ch.Get(c)
v = linearScale(v, darkest, 1)
return ch.Set(c, v)
}
}
func SetWhite(img image.Image, ch channel.Channel, lightest float64) image.Image {
return utils.MapColor(img, SetWhiteC(ch, lightest))
}
func SetWhiteC(ch channel.Channel, lightest float64) utils.Composable {
return func(c color.Color) color.Color {
v := ch.Get(c)
v = linearScale(v, 0, lightest)
return ch.Set(c, v)
}
}
func SetCurve(img image.Image, ch channel.Channel, curve *Curve) image.Image {
return utils.MapColor(img, SetCurveC(ch, curve))
}
func SetCurveC(ch channel.Channel, curve *Curve) utils.Composable {
return func(c color.Color) color.Color {
v := ch.Get(c)
v = curve.Value(v)
return ch.Set(c, v)
}
} | levels/levels.go | 0.868004 | 0.401834 | levels.go | starcoder |
package gen
import (
"fmt"
"math/big"
"reflect"
"sort"
"strings"
"unicode"
"github.com/gocql/gocql"
"gopkg.in/inf.v0"
)
func (g *Generator) getDecoderName(t reflect.Type) string {
return g.functionName("decode", t)
}
// decoderGen is a function that generates unmarshaller code.
// It unmarshals t from bytes stored in variable which name is stored in `in` and stores the result into `out`.
// The gocql.TypeInfo is stored in variable named info.
// tags describe the field tags for the field being unmarshaled and indent specifies how much to indent the output.
type decoderGen func(g *Generator, t reflect.Type, info, in, out string, tags fieldTags, indent int) error
var (
stringType = reflect.TypeOf((*string)(nil)).Elem()
byteSliceType = reflect.TypeOf((*[]byte)(nil)).Elem()
bigIntType = reflect.TypeOf((*big.Int)(nil)).Elem()
infDecType = reflect.TypeOf((*inf.Dec)(nil)).Elem()
)
var decodersByKind = map[reflect.Kind]decoderMeta{
reflect.String: {
cqlTypes: map[gocql.Type]decoderGen{
gocql.TypeVarchar: varcharToStringDecoder,
gocql.TypeAscii: varcharToStringDecoder,
gocql.TypeBlob: varcharToStringDecoder,
gocql.TypeText: varcharToStringDecoder,
},
preferredType: gocql.TypeVarchar,
complete: true,
},
reflect.Int: {
cqlTypes: map[gocql.Type]decoderGen{
gocql.TypeTinyInt: intLikeToIntDecoder(gocql.TypeTinyInt),
gocql.TypeSmallInt: intLikeToIntDecoder(gocql.TypeSmallInt),
gocql.TypeInt: intLikeToIntDecoder(gocql.TypeInt),
gocql.TypeBigInt: intLikeToIntDecoder(gocql.TypeBigInt),
},
preferredType: gocql.TypeInt,
},
reflect.Int8: {
cqlTypes: map[gocql.Type]decoderGen{
gocql.TypeTinyInt: intLikeToIntDecoder(gocql.TypeTinyInt),
gocql.TypeSmallInt: intLikeToIntDecoder(gocql.TypeSmallInt),
gocql.TypeInt: intLikeToIntDecoder(gocql.TypeInt),
gocql.TypeBigInt: intLikeToIntDecoder(gocql.TypeBigInt),
},
preferredType: gocql.TypeTinyInt,
},
reflect.Int16: {
cqlTypes: map[gocql.Type]decoderGen{
gocql.TypeTinyInt: intLikeToIntDecoder(gocql.TypeTinyInt),
gocql.TypeSmallInt: intLikeToIntDecoder(gocql.TypeSmallInt),
gocql.TypeInt: intLikeToIntDecoder(gocql.TypeInt),
gocql.TypeBigInt: intLikeToIntDecoder(gocql.TypeBigInt),
},
preferredType: gocql.TypeSmallInt,
},
reflect.Int32: {
cqlTypes: map[gocql.Type]decoderGen{
gocql.TypeTinyInt: intLikeToIntDecoder(gocql.TypeTinyInt),
gocql.TypeSmallInt: intLikeToIntDecoder(gocql.TypeSmallInt),
gocql.TypeInt: intLikeToIntDecoder(gocql.TypeInt),
gocql.TypeBigInt: intLikeToIntDecoder(gocql.TypeBigInt),
},
preferredType: gocql.TypeInt,
},
reflect.Int64: {
cqlTypes: map[gocql.Type]decoderGen{
gocql.TypeTinyInt: intLikeToIntDecoder(gocql.TypeTinyInt),
gocql.TypeSmallInt: intLikeToIntDecoder(gocql.TypeSmallInt),
gocql.TypeInt: intLikeToIntDecoder(gocql.TypeInt),
gocql.TypeBigInt: intLikeToIntDecoder(gocql.TypeBigInt),
},
preferredType: gocql.TypeBigInt,
},
reflect.Uint: {
cqlTypes: map[gocql.Type]decoderGen{
gocql.TypeTinyInt: intLikeToIntDecoder(gocql.TypeTinyInt),
gocql.TypeSmallInt: intLikeToIntDecoder(gocql.TypeSmallInt),
gocql.TypeInt: intLikeToIntDecoder(gocql.TypeInt),
gocql.TypeBigInt: intLikeToIntDecoder(gocql.TypeBigInt),
},
preferredType: gocql.TypeInt,
},
reflect.Uint8: {
cqlTypes: map[gocql.Type]decoderGen{
gocql.TypeTinyInt: intLikeToIntDecoder(gocql.TypeTinyInt),
gocql.TypeSmallInt: intLikeToIntDecoder(gocql.TypeSmallInt),
gocql.TypeInt: intLikeToIntDecoder(gocql.TypeInt),
gocql.TypeBigInt: intLikeToIntDecoder(gocql.TypeBigInt),
},
preferredType: gocql.TypeTinyInt,
},
reflect.Uint16: {
cqlTypes: map[gocql.Type]decoderGen{
gocql.TypeTinyInt: intLikeToIntDecoder(gocql.TypeTinyInt),
gocql.TypeSmallInt: intLikeToIntDecoder(gocql.TypeSmallInt),
gocql.TypeInt: intLikeToIntDecoder(gocql.TypeInt),
gocql.TypeBigInt: intLikeToIntDecoder(gocql.TypeBigInt),
},
preferredType: gocql.TypeSmallInt,
},
reflect.Uint32: {
cqlTypes: map[gocql.Type]decoderGen{
gocql.TypeTinyInt: intLikeToIntDecoder(gocql.TypeTinyInt),
gocql.TypeSmallInt: intLikeToIntDecoder(gocql.TypeSmallInt),
gocql.TypeInt: intLikeToIntDecoder(gocql.TypeInt),
gocql.TypeBigInt: intLikeToIntDecoder(gocql.TypeBigInt),
},
preferredType: gocql.TypeInt,
},
reflect.Uint64: {
cqlTypes: map[gocql.Type]decoderGen{
gocql.TypeTinyInt: intLikeToIntDecoder(gocql.TypeTinyInt),
gocql.TypeSmallInt: intLikeToIntDecoder(gocql.TypeSmallInt),
gocql.TypeInt: intLikeToIntDecoder(gocql.TypeInt),
gocql.TypeBigInt: intLikeToIntDecoder(gocql.TypeBigInt),
},
preferredType: gocql.TypeBigInt,
},
reflect.Bool: {
cqlTypes: map[gocql.Type]decoderGen{
gocql.TypeBoolean: booleanToBoolDecoder,
},
preferredType: gocql.TypeBoolean,
complete: true,
},
reflect.Float32: {
cqlTypes: map[gocql.Type]decoderGen{
gocql.TypeFloat: floatToFloat32Decoder,
},
preferredType: gocql.TypeFloat,
complete: true,
},
reflect.Float64: {
cqlTypes: map[gocql.Type]decoderGen{
gocql.TypeDouble: doubleToFloat64Decoder,
},
preferredType: gocql.TypeDouble,
complete: true,
},
}
type decoderMeta struct {
// map of implemented generators per gocql type
cqlTypes map[gocql.Type]decoderGen
// default preferred gocql type.
// preferred type will be put first in the switch statement.
preferredType gocql.Type
// complete indicates whether we have decoder for all CQL types supported by gocql for this Go type.
complete bool
}
var decodersByType = map[reflect.Type]decoderMeta{
stringType: {
cqlTypes: map[gocql.Type]decoderGen{
gocql.TypeVarchar: varcharToStringDecoder,
gocql.TypeAscii: varcharToStringDecoder,
gocql.TypeBlob: varcharToStringDecoder,
gocql.TypeText: varcharToStringDecoder,
gocql.TypeTinyInt: intLikeToStringDecoder("DecTiny"),
gocql.TypeSmallInt: intLikeToStringDecoder("DecShort"),
gocql.TypeInt: intLikeToStringDecoder("DecInt"),
gocql.TypeBigInt: intLikeToStringDecoder("DecBigInt"),
gocql.TypeVarint: varIntToStringDecoder,
gocql.TypeUUID: uuidToStringDecoder,
gocql.TypeTimeUUID: uuidToStringDecoder,
gocql.TypeInet: inetToStringDecoder,
},
preferredType: gocql.TypeVarchar,
complete: true,
},
byteSliceType: {
cqlTypes: map[gocql.Type]decoderGen{
gocql.TypeVarchar: varcharToBytesDecoder,
gocql.TypeAscii: varcharToBytesDecoder,
gocql.TypeBlob: varcharToBytesDecoder,
gocql.TypeText: varcharToBytesDecoder,
gocql.TypeUUID: uuidToBytesDecoder,
gocql.TypeTimeUUID: uuidToBytesDecoder,
},
preferredType: gocql.TypeVarchar,
},
bigIntType: {
cqlTypes: map[gocql.Type]decoderGen{
gocql.TypeTinyInt: bigIntDecoder,
gocql.TypeSmallInt: bigIntDecoder,
gocql.TypeInt: bigIntDecoder,
gocql.TypeBigInt: bigIntDecoder,
gocql.TypeCounter: bigIntDecoder,
gocql.TypeVarint: bigIntDecoder,
},
preferredType: gocql.TypeVarint,
complete: true,
},
infDecType: {
cqlTypes: map[gocql.Type]decoderGen{
gocql.TypeDecimal: decimalDecoder,
},
preferredType: gocql.TypeDecimal,
complete: true,
},
}
var gocqlTypes = map[gocql.Type]string{
gocql.TypeCustom: "TypeCustom",
gocql.TypeAscii: "TypeAscii",
gocql.TypeBigInt: "TypeBigInt",
gocql.TypeBlob: "TypeBlob",
gocql.TypeBoolean: "TypeBoolean",
gocql.TypeCounter: "TypeCounter",
gocql.TypeDecimal: "TypeDecimal",
gocql.TypeDouble: "TypeDouble",
gocql.TypeFloat: "TypeFloat",
gocql.TypeInt: "TypeInt",
gocql.TypeText: "TypeText",
gocql.TypeTimestamp: "TypeTimestamp",
gocql.TypeUUID: "TypeUUID",
gocql.TypeVarchar: "TypeVarchar",
gocql.TypeVarint: "TypeVarint",
gocql.TypeTimeUUID: "TypeTimeUUID",
gocql.TypeInet: "TypeInet",
gocql.TypeDate: "TypeDate",
gocql.TypeTime: "TypeTime",
gocql.TypeSmallInt: "TypeSmallInt",
gocql.TypeTinyInt: "TypeTinyInt",
gocql.TypeDuration: "TypeDuration",
gocql.TypeList: "TypeList",
gocql.TypeMap: "TypeMap",
gocql.TypeSet: "TypeSet",
gocql.TypeUDT: "TypeUDT",
gocql.TypeTuple: "TypeTuple",
}
type gocqlIntType struct {
goType reflect.Type
decodeHelper string
}
var gocqlIntTypes = map[gocql.Type]gocqlIntType{
gocql.TypeTinyInt: {
goType: reflect.TypeOf((*int8)(nil)).Elem(),
decodeHelper: "DecTiny",
},
gocql.TypeSmallInt: {
goType: reflect.TypeOf((*int16)(nil)).Elem(),
decodeHelper: "DecShort",
},
gocql.TypeInt: {
goType: reflect.TypeOf((*int32)(nil)).Elem(),
decodeHelper: "DecInt",
},
gocql.TypeBigInt: {
goType: reflect.TypeOf((*int64)(nil)).Elem(),
decodeHelper: "DecBigInt",
},
}
func varcharToStringDecoder(g *Generator, t reflect.Type, info, in, out string, tags fieldTags, indent int) error {
ws := strings.Repeat(" ", indent)
fmt.Fprintln(g.out, ws+out+" = "+g.getType(t)+"("+in+")")
return nil
}
func intLikeToStringDecoder(decodeHelper string) decoderGen {
return func(g *Generator, t reflect.Type, info, in, out string, tags fieldTags, indent int) error {
ws := strings.Repeat(" ", indent)
fmt.Fprintf(g.out, "%s%s = %s(strconv.FormatInt(int64(marshal.%s(%s)), 10))\n",
ws, out, g.getType(t), decodeHelper, in)
return nil
}
}
func intLikeToIntDecoder(gocqlType gocql.Type) decoderGen {
return func(g *Generator, t reflect.Type, info, in, out string, tags fieldTags, indent int) error {
ws := strings.Repeat(" ", indent)
gocqlTypeMeta := gocqlIntTypes[gocqlType]
nativeVal := g.uniqueVarName()
switch t.Kind() {
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
fmt.Fprintf(g.out, "%s%s := marshal.%s(%s)\n", ws, nativeVal, gocqlTypeMeta.decodeHelper, in)
if gocqlTypeMeta.goType.Bits() > t.Bits() {
fmt.Fprintf(g.out, "%sif %s < math.MinInt%d || %s > math.MaxInt%d {\n", ws, nativeVal, t.Bits(),
nativeVal, t.Bits())
fmt.Fprintf(g.out, "%s return fmt.Errorf(\"unmarshal int: value %%d out of range for %s\", %s)\n",
ws, t.Name(), nativeVal)
fmt.Fprintf(g.out, "%s}\n", ws)
}
fmt.Fprintf(g.out, "%s%s = %s(%s)\n",
ws, out, g.getType(t), nativeVal)
case reflect.Int:
fmt.Fprintf(g.out, "%s%s := marshal.%s(%s)\n", ws, nativeVal, gocqlTypeMeta.decodeHelper, in)
if gocqlTypeMeta.goType.Bits() > 32 {
fmt.Fprintf(g.out, "%sif ^uint(0) == math.MaxUint32 && (%s < math.MinInt32 || %s > math.MaxInt32) {\n",
ws, nativeVal, nativeVal)
fmt.Fprintf(g.out, "%s return fmt.Errorf(\"unmarshal int: value %%d out of range for %s\", %s)\n",
ws, t.Name(), nativeVal)
fmt.Fprintf(g.out, "%s}\n", ws)
}
fmt.Fprintf(g.out, "%s%s = %s(%s)\n",
ws, out, g.getType(t), nativeVal)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
fmt.Fprintf(g.out, "%s%s := marshal.%s(%s)\n", ws, nativeVal, gocqlTypeMeta.decodeHelper, in)
if gocqlTypeMeta.goType.Bits() > t.Bits() {
fmt.Fprintf(g.out, "%sif %s < 0 || %s > math.MaxUint%d {\n", ws, nativeVal, nativeVal, t.Bits())
fmt.Fprintf(g.out, "%s return fmt.Errorf(\"unmarshal int: value %%d out of range for %s\", %s)\n",
ws, t.Name(), nativeVal)
fmt.Fprintf(g.out, "%s}\n", ws)
}
if gocqlTypeMeta.goType.Bits() < t.Bits() {
fmt.Fprintf(g.out, "%s%s = %s(%s) & 0x%x\n",
ws, out, g.getType(t), nativeVal, (uint64(1)<<uint(gocqlTypeMeta.goType.Bits()))-1)
} else {
fmt.Fprintf(g.out, "%s%s = %s(%s)\n",
ws, out, g.getType(t), nativeVal)
}
case reflect.Uint:
fmt.Fprintf(g.out, "%s%s := marshal.%s(%s)\n", ws, nativeVal, gocqlTypeMeta.decodeHelper, in)
if gocqlTypeMeta.goType.Bits() > 32 {
fmt.Fprintf(g.out, "%sif ^uint(0) == math.MaxUint32 && (%s < 0 || %s > math.MaxUint32) {\n",
ws, nativeVal, nativeVal)
fmt.Fprintf(g.out, "%s return fmt.Errorf(\"unmarshal int: value %%d out of range for %s\", %s)\n",
ws, t.Name(), nativeVal)
fmt.Fprintf(g.out, "%s}\n", ws)
}
if gocqlTypeMeta.goType.Bits() < 32 {
fmt.Fprintf(g.out, "%s%s = %s(%s) & 0x%x\n",
ws, out, g.getType(t), nativeVal, (uint64(1)<<uint(gocqlTypeMeta.goType.Bits()))-1)
} else {
fmt.Fprintf(g.out, "%s%s = %s(%s) & 0xffffffff\n",
ws, out, g.getType(t), nativeVal)
}
default:
return fmt.Errorf("cannot unmarshal %s into %s", gocqlType, t.Name())
}
return nil
}
}
func varIntToStringDecoder(g *Generator, t reflect.Type, info, in, out string, tags fieldTags, indent int) error {
ws := strings.Repeat(" ", indent)
value := g.uniqueVarName()
ok := g.uniqueVarName()
fmt.Fprintf(g.out, "%s%s, %s := marshal.VarIntToInt64("+in+")\n", ws, value, ok)
fmt.Fprintf(g.out, "%sif !%s {\n", ws, ok)
fmt.Fprintf(g.out, "%s return fmt.Errorf(\"unmarshal int: varint value %%v out of range for int64\", "+in+
")\n", ws)
fmt.Fprintf(g.out, "%s}\n", ws)
fmt.Fprintf(g.out, "%s%s = %s(strconv.FormatInt(%s, 10))\n", ws, out, g.getType(t), value)
return nil
}
func uuidDecoder(g *Generator, in, out string, indent int) {
ws := strings.Repeat(" ", indent)
err := g.uniqueVarName()
fmt.Fprintf(g.out, "%svar %s error\n", ws, err)
fmt.Fprintf(g.out, "%s%s, %s = gocql.UUIDFromBytes(%s)\n", ws, out, err, in)
fmt.Fprintf(g.out, "%sif %s != nil {\n", ws, err)
fmt.Fprintf(g.out, "%s return fmt.Errorf(\"unable to parse UUID: %%s\", %s)\n", ws, err)
fmt.Fprintf(g.out, "%s}\n", ws)
}
func uuidToStringDecoder(g *Generator, t reflect.Type, info, in, out string, tags fieldTags, indent int) error {
ws := strings.Repeat(" ", indent)
uuid := g.uniqueVarName()
fmt.Fprintf(g.out, "%sif len("+in+") == 0 {\n", ws)
fmt.Fprintf(g.out, "%s %s = %s(\"\")\n", ws, out, g.getType(t))
fmt.Fprintf(g.out, "%s} else {\n", ws)
fmt.Fprintf(g.out, "%s var %s gocql.UUID\n", ws, uuid)
uuidDecoder(g, in, uuid, indent+1)
fmt.Fprintf(g.out, "%s %s = %s(%s.String())\n", ws, out, g.getType(t), uuid)
fmt.Fprintf(g.out, "%s}\n", ws)
return nil
}
func inetToStringDecoder(g *Generator, t reflect.Type, info, in, out string, tags fieldTags, indent int) error {
ws := strings.Repeat(" ", indent)
ip := g.uniqueVarName()
ip4 := g.uniqueVarName()
fmt.Fprintf(g.out, "%sif len("+in+") == 0 {\n", ws)
fmt.Fprintf(g.out, "%s %s = %s(\"\")\n", ws, out, g.getType(t))
fmt.Fprintf(g.out, "%s} else {\n", ws)
fmt.Fprintf(g.out, "%s %s := net.IP("+in+")\n", ws, ip)
fmt.Fprintf(g.out, "%s if %s := %s.To4(); %s != nil {\n", ws, ip4, ip, ip4)
fmt.Fprintf(g.out, "%s %s = %s(%s.String())\n", ws, out, g.getType(t), ip4)
fmt.Fprintf(g.out, "%s } else {\n", ws)
fmt.Fprintf(g.out, "%s %s = %s(%s.String())\n", ws, out, g.getType(t), ip)
fmt.Fprintf(g.out, "%s }\n", ws)
fmt.Fprintf(g.out, "%s}\n", ws)
return nil
}
func varcharToBytesDecoder(g *Generator, t reflect.Type, info, in, out string, tags fieldTags, indent int) error {
ws := strings.Repeat(" ", indent)
fmt.Fprintf(g.out, "%sif "+in+" != nil {\n", ws)
fmt.Fprintf(g.out, "%s %s = append((%s)[:0], "+in+"...)\n", ws, out, out)
fmt.Fprintf(g.out, "%s} else {\n", ws)
fmt.Fprintf(g.out, "%s %s = nil\n", ws, out)
fmt.Fprintf(g.out, "%s}\n", ws)
return nil
}
func uuidToBytesDecoder(g *Generator, t reflect.Type, info, in, out string, tags fieldTags, indent int) error {
ws := strings.Repeat(" ", indent)
uuid := g.uniqueVarName()
fmt.Fprintf(g.out, "%sif len("+in+") == 0 {\n", ws)
fmt.Fprintf(g.out, "%s %s = %s(nil)\n", ws, out, g.getType(t))
fmt.Fprintf(g.out, "%s} else {\n", ws)
fmt.Fprintf(g.out, "%svar %s gocql.UUID\n", ws, uuid)
uuidDecoder(g, in, uuid, indent+1)
fmt.Fprintf(g.out, "%s %s = %s[:]\n", ws, out, uuid)
fmt.Fprintf(g.out, "%s}\n", ws)
return nil
}
func bigIntDecoder(g *Generator, t reflect.Type, info, in, out string, tags fieldTags, indent int) error {
ws := strings.Repeat(" ", indent)
fmt.Fprintf(g.out, "%smarshal.DecBigInt2C(%s, %s)\n", ws, in, reference(out))
return nil
}
func booleanToBoolDecoder(g *Generator, t reflect.Type, info, in, out string, tags fieldTags, indent int) error {
ws := strings.Repeat(" ", indent)
fmt.Fprintf(g.out, "%s%s = %s(marshal.DecBool(%s))\n",
ws, out, g.getType(t), in)
return nil
}
func decimalDecoder(g *Generator, t reflect.Type, info, in, out string, tags fieldTags, indent int) error {
ws := strings.Repeat(" ", indent)
scale := g.uniqueVarName()
unscaled := g.uniqueVarName()
fmt.Fprintf(g.out, "%sif len(%s) < 4 {\n", ws, in)
fmt.Fprintf(g.out, "%s return fmt.Errorf(\"malformed decimal value\")\n", ws)
fmt.Fprintf(g.out, "%s}\n", ws)
fmt.Fprintf(g.out, "%s%s := marshal.DecInt(%s[0:4])\n", ws, scale, in)
fmt.Fprintf(g.out, "%svar %s big.Int\n", ws, unscaled)
fmt.Fprintf(g.out, "%smarshal.DecBigInt2C(%s[4:], %s)\n", ws, in, reference(unscaled))
fmt.Fprintf(g.out, "%s(%s).SetUnscaledBig(%s)\n", ws, out, reference(unscaled))
fmt.Fprintf(g.out, "%s(%s).SetScale(inf.Scale(%s))\n", ws, out, scale)
return nil
}
func floatToFloat32Decoder(g *Generator, t reflect.Type, info, in, out string, tags fieldTags, indent int) error {
ws := strings.Repeat(" ", indent)
fmt.Fprintf(g.out, "%s%s = %s(math.Float32frombits(uint32(marshal.DecInt(%s))))\n",
ws, out, g.getType(t), in)
return nil
}
func doubleToFloat64Decoder(g *Generator, t reflect.Type, info, in, out string, tags fieldTags, indent int) error {
ws := strings.Repeat(" ", indent)
fmt.Fprintf(g.out, "%s%s = %s(math.Float64frombits(uint64(marshal.DecBigInt(%s))))\n",
ws, out, g.getType(t), in)
return nil
}
// genTypeDecoder generates decoding code for the type t, but uses unmarshaler interface if implemented by t.
func (g *Generator) genTypeDecoder(t reflect.Type, info, in, out string, tags fieldTags, indent int) error {
ws := strings.Repeat(" ", indent)
unmarshalerIface := reflect.TypeOf((*gocql.Unmarshaler)(nil)).Elem()
if reflect.PtrTo(t).Implements(unmarshalerIface) {
fallbackErr := g.uniqueVarName()
fmt.Fprintln(g.out, ws+"if "+fallbackErr+" := ("+out+").UnmarshalCQL("+info+", "+in+"); "+
fallbackErr+" != nil {")
fmt.Fprintln(g.out, ws+" return "+fallbackErr)
fmt.Fprintln(g.out, ws+"}")
return nil
}
err := g.genTypeDecoderNoCheck(t, info, in, out, tags, indent)
return err
}
// sortTypes sorts types and puts preferred to the first index.
func sortTypes(types []gocql.Type, preferred gocql.Type) {
if len(types) == 0 {
return
}
var sortFrom int
for i := range types {
if types[i] == preferred {
types[i], types[0] = types[0], types[i]
sortFrom = 1
}
}
toSort := types[sortFrom:]
sort.Slice(toSort, func(i, j int) bool {
return gocqlTypes[toSort[i]] < gocqlTypes[toSort[j]]
})
}
func decoderTypeKeys(m map[gocql.Type]decoderGen) []gocql.Type {
keys := make([]gocql.Type, 0, len(m))
for key := range m {
keys = append(keys, key)
}
return keys
}
func (g *Generator) genCQLTypeSwitch(t reflect.Type, info, in, out string, tags fieldTags, indent int, dm decoderMeta) error {
ws := strings.Repeat(" ", indent)
if g.conservative {
fallbackErr := g.uniqueVarName()
fmt.Fprintln(g.out, ws+" if "+fallbackErr+" := gocql.Unmarshal("+info+", "+in+", "+reference(out)+
"); "+fallbackErr+" != nil {")
fmt.Fprintln(g.out, ws+" return "+fallbackErr)
fmt.Fprintln(g.out, ws+" }")
return nil
}
fmt.Fprintln(g.out, ws+"switch "+info+".Type() {")
sortedTypes := decoderTypeKeys(dm.cqlTypes)
preferredType := dm.preferredType
if tags.cqlTypeSet {
preferredType = tags.cqlType
}
sortTypes(sortedTypes, preferredType)
for _, cqlType := range sortedTypes {
gen := dm.cqlTypes[cqlType]
fmt.Fprintln(g.out, ws+" case gocql."+gocqlTypes[cqlType]+":")
err := gen(g, t, info, in, out, tags, indent+1)
if err != nil {
return err
}
}
fmt.Fprintln(g.out, ws+" default:")
if dm.complete {
fmt.Fprintln(g.out, ws+" return fmt.Errorf(\"cannot decode %s\", "+info+".Type())")
} else {
fallbackErr := g.uniqueVarName()
fmt.Fprintln(g.out, ws+" if "+fallbackErr+" := gocql.Unmarshal("+info+", "+in+", "+
reference(out)+"); "+fallbackErr+" != nil {")
fmt.Fprintln(g.out, ws+" return "+fallbackErr)
fmt.Fprintln(g.out, ws+" }")
}
fmt.Fprintln(g.out, ws+"}")
return nil
}
// genTypeDecoderNoCheck generates decoding code for the type t.
func (g *Generator) genTypeDecoderNoCheck(t reflect.Type, info, in, out string, tags fieldTags, indent int) error {
ws := strings.Repeat(" ", indent)
if decoderMeta, ok := decodersByType[t]; ok {
return g.genCQLTypeSwitch(t, info, in, out, tags, indent, decoderMeta)
}
if decoderMeta, ok := decodersByKind[t.Kind()]; ok {
return g.genCQLTypeSwitch(t, info, in, out, tags, indent, decoderMeta)
}
if t.Kind() == reflect.Ptr {
fmt.Fprintln(g.out, ws+"if "+in+" == nil {")
fmt.Fprintln(g.out, ws+" "+out+" = nil")
fmt.Fprintln(g.out, ws+"} else {")
fmt.Fprintln(g.out, ws+" "+out+" = new("+g.getType(t.Elem())+")")
if err := g.genTypeDecoder(t.Elem(), info, in, "*"+out, tags, indent+1); err != nil {
return err
}
fmt.Fprintln(g.out, ws+"}")
return nil
}
fallbackErr := g.uniqueVarName()
fmt.Fprintln(g.out, ws+"// fallback to gocql for "+t.String())
fmt.Fprintln(g.out, ws+"if "+fallbackErr+" := gocql.Unmarshal("+info+", "+in+", "+reference(out)+"); "+
fallbackErr+" != nil {")
fmt.Fprintln(g.out, ws+" return "+fallbackErr)
fmt.Fprintln(g.out, ws+"}")
return nil
}
func reference(out string) string {
if len(out) > 0 && out[0] == '*' {
// NOTE: In order to remove an extra reference to a pointer
return out[1:]
}
return "&" + out
}
//nolint:gocritic // parameter f is huge
func (g *Generator) genStructFieldDecoder(t reflect.Type, f reflect.StructField) error {
tags, err := parseFieldTags(f)
if err != nil {
return err
}
if tags.omit {
return nil
}
cqlName := g.getFieldName(t, f, tags)
fmt.Fprintf(g.out, " case %q:\n", cqlName)
if err := g.genTypeDecoder(f.Type, "udtElement.Type", "elementData", "out."+f.Name, tags, 3); err != nil {
return err
}
if tags.required {
fmt.Fprintf(g.out, "%sSet = true\n", f.Name)
}
return nil
}
//nolint:gocritic // parameter f is huge
func (g *Generator) genRequiredFieldSet(_ reflect.Type, f reflect.StructField) error {
tags, err := parseFieldTags(f)
if err != nil {
return err
}
if !tags.required {
return nil
}
fmt.Fprintf(g.out, "var %sSet bool\n", f.Name)
return nil
}
//nolint:gocritic // parameter f is huge
func (g *Generator) genRequiredFieldCheck(t reflect.Type, f reflect.StructField) error {
tags, err := parseFieldTags(f)
if err != nil {
return err
}
if !tags.required {
return nil
}
cqlName := g.getFieldName(t, f, tags)
g.imports["fmt"] = "fmt"
fmt.Fprintf(g.out, "if !%sSet {\n", f.Name)
fmt.Fprintf(g.out, " return fmt.Errorf(\"key '%s' is required\")\n", cqlName)
fmt.Fprintf(g.out, "}\n")
return nil
}
func mergeStructFields(fields1, fields2 []reflect.StructField) (fields []reflect.StructField) {
used := map[string]bool{}
for _, f := range fields2 {
used[f.Name] = true
fields = append(fields, f)
}
for _, f := range fields1 {
if !used[f.Name] {
fields = append(fields, f)
}
}
return
}
func getStructFields(t reflect.Type) ([]reflect.StructField, error) {
if t.Kind() != reflect.Struct {
return nil, fmt.Errorf("got %v; expected a struct", t)
}
var efields []reflect.StructField
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
tags, err := parseFieldTags(f)
if err != nil {
return nil, err
}
if !f.Anonymous || tags.name != "" {
continue
}
t1 := f.Type
if t1.Kind() == reflect.Ptr {
t1 = t1.Elem()
}
fs, err := getStructFields(t1)
if err != nil {
return nil, fmt.Errorf("error processing embedded field: %v", err)
}
efields = mergeStructFields(efields, fs)
}
var fields []reflect.StructField
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
tags, err := parseFieldTags(f)
if err != nil {
return nil, err
}
if f.Anonymous && tags.name == "" {
continue
}
c := []rune(f.Name)[0]
if unicode.IsUpper(c) {
fields = append(fields, f)
}
}
return mergeStructFields(efields, fields), nil
}
func (g *Generator) genDecoder(t reflect.Type) error {
switch t.Kind() {
case reflect.Slice, reflect.Array, reflect.Map:
return g.genSliceArrayDecoder(t)
default:
return g.genStructDecoder(t)
}
}
func (g *Generator) genSliceArrayDecoder(t reflect.Type) error {
switch t.Kind() {
case reflect.Slice, reflect.Array, reflect.Map:
default:
return fmt.Errorf("cannot generate encoder/decoder for %v, not a slice/array/map type", t)
}
fname := g.getDecoderName(t)
typ := g.getType(t)
fmt.Fprintln(g.out, "func "+fname+"(info gocql.TypeInfo, data []byte, out *"+typ+") error {")
err := g.genTypeDecoderNoCheck(t, "info", "data", "*out", fieldTags{}, 1)
if err != nil {
return err
}
fmt.Fprintln(g.out, " return nil")
fmt.Fprintln(g.out, "}")
return nil
}
func (g *Generator) genStructDecoder(t reflect.Type) error {
if t.Kind() != reflect.Struct {
return fmt.Errorf("cannot generate encoder/decoder for %v, not a struct type", t)
}
fname := g.getDecoderName(t)
typ := g.getType(t)
fmt.Fprintln(g.out, "func "+fname+"(info gocql.TypeInfo, data []byte, out *"+typ+") error {")
fmt.Fprintln(g.out, " if data == nil {")
fmt.Fprintln(g.out, " return nil")
fmt.Fprintln(g.out, " }")
fmt.Fprintln(g.out, " udt, ok := info.(gocql.UDTTypeInfo)")
fmt.Fprintln(g.out, " if !ok {")
fmt.Fprintf(g.out, " return fmt.Errorf(\"cannot unmarshal non-udt type %%s to %%T\", info, out)")
fmt.Fprintln(g.out, " }")
// Init embedded pointer fields.
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if !f.Anonymous || f.Type.Kind() != reflect.Ptr {
continue
}
fmt.Fprintln(g.out, " out."+f.Name+" = new("+g.getType(f.Type.Elem())+")")
}
fs, err := getStructFields(t)
if err != nil {
return fmt.Errorf("cannot generate decoder for %v: %v", t, err)
}
for _, f := range fs {
err := g.genRequiredFieldSet(t, f)
if err != nil {
return err
}
}
fmt.Fprintln(g.out, " for _, udtElement := range udt.Elements {")
fmt.Fprintln(g.out, " if len(data) == 0 {")
fmt.Fprintln(g.out, " return nil")
fmt.Fprintln(g.out, " }")
fmt.Fprintln(g.out, " var elementData []byte")
fmt.Fprintln(g.out, " var readBytesErr error")
fmt.Fprintln(g.out, " elementData, data, readBytesErr = marshal.ReadBytes2(data)")
fmt.Fprintln(g.out, " if readBytesErr != nil {")
fmt.Fprintf(g.out, " return fmt.Errorf(\"%%s.%%s UDT unmarshal: %%v\", udt.Name, udtElement.Name, readBytesErr)\n")
fmt.Fprintln(g.out, " }")
fmt.Fprintln(g.out, " switch udtElement.Name {")
for _, f := range fs {
if err := g.genStructFieldDecoder(t, f); err != nil {
return err
}
}
if g.disallowUnknownFields {
fmt.Fprintln(g.out, " default:")
fmt.Fprintf(g.out, " return fmt.Errorf(\"unknown field: %%s\", udtElement.Name)")
}
fmt.Fprintln(g.out, " }")
fmt.Fprintln(g.out, " }")
for _, f := range fs {
err := g.genRequiredFieldCheck(t, f)
if err != nil {
return err
}
}
fmt.Fprintln(g.out, " return nil")
fmt.Fprintln(g.out, "}")
return nil
}
//nolint:dupl // this function is very similar to genStructMarshaler but does the opposite
func (g *Generator) genStructUnmarshaler(t reflect.Type) error {
switch t.Kind() {
case reflect.Slice, reflect.Array, reflect.Map, reflect.Struct:
default:
return fmt.Errorf("cannot generate encoder/decoder for %v, not a struct/slice/array/map type", t)
}
fname := g.getDecoderName(t)
typ := g.getType(t)
fmt.Fprintln(g.out, "// UnmarshalCQL implements custom unmarshaler as gocql.UnmarshalCQL")
fmt.Fprintln(g.out, "func (v *"+typ+") UnmarshalCQL(info gocql.TypeInfo, data []byte) error {")
fmt.Fprintln(g.out, " return "+fname+"(info, data, v)")
fmt.Fprintln(g.out, "}")
return nil
} | gen/decoder.go | 0.500732 | 0.507812 | decoder.go | starcoder |
package tensor
import "github.com/pkg/errors"
func (e StdEng) Argmax(t Tensor, axis int) (retVal Tensor, err error) {
switch tt := t.(type) {
case DenseTensor:
return e.argmaxDenseTensor(tt, axis)
default:
return nil, errors.Errorf(typeNYI, "StdEng.Argmax", t)
}
}
func (e StdEng) argmaxDenseTensor(t DenseTensor, axis int) (retVal *Dense, err error) {
if err = unaryCheck(t, ordTypes); err != nil {
return nil, errors.Wrapf(err, opFail, "Argmax")
}
if axis >= len(t.Shape()) {
return nil, errors.Errorf(dimMismatch, len(t.Shape()), axis)
}
dataA := t.hdr()
typ := t.rtype()
// SPECIAL CASE: FLAT ARGMAX
if axis == AllAxes {
var index int
if mt, ok := t.(MaskedTensor); ok && mt.IsMasked() {
if index = e.E.ArgmaxFlatMasked(typ, dataA, mt.Mask()); index == -1 {
return nil, errors.Errorf("t is not supported - %T of %v", t, t.Dtype())
}
} else {
if index = e.E.ArgmaxFlat(typ, dataA); index == -1 {
return nil, errors.Errorf("t is not supported - %T of %v", t, t.Dtype())
}
}
return New(FromScalar(index)), nil
}
// ARGMAX ALONG AXIS
var indices []int
axes := make([]int, len(t.Shape()))
for i := range t.Shape() {
switch {
case i < axis:
axes[i] = i
case i == axis:
axes[len(axes)-1] = i
case i > axis:
axes[i-1] = i
}
}
// be a good citizen - borrow and return, since we're only using this AP to figure out the moves
newAP, _, err := t.Info().T(axes...)
if _, ok := err.(NoOpError); !ok && err != nil {
return
} else if ok {
newAP = t.Info().Clone()
}
defer ReturnAP(newAP)
it := IteratorFromDense(t)
iteratorLoadAP(it, newAP)
lastSize := it.Shape()[len(it.Shape())-1]
newShape := it.Shape().Clone()
newShape = newShape[:len(newShape)-1]
defer ReturnInts(newShape)
if mt, ok := t.(MaskedTensor); ok && mt.IsMasked() {
mask := mt.Mask()
if indices, err = e.E.ArgmaxIterMasked(typ, dataA, mask, it, lastSize); err != nil {
return
}
} else {
if indices, err = e.E.ArgmaxIter(typ, dataA, it, lastSize); err != nil {
return
}
}
return New(WithShape(newShape...), WithBacking(indices)), nil
}
func (e StdEng) Argmin(t Tensor, axis int) (retVal Tensor, err error) {
switch tt := t.(type) {
case DenseTensor:
return e.argminDenseTensor(tt, axis)
default:
return nil, errors.Errorf(typeNYI, "StdEng.Argmin", t)
}
}
func (e StdEng) argminDenseTensor(t DenseTensor, axis int) (retVal *Dense, err error) {
if err = unaryCheck(t, ordTypes); err != nil {
return nil, errors.Wrapf(err, opFail, "Argmin")
}
if axis >= len(t.Shape()) {
return nil, errors.Errorf(dimMismatch, len(t.Shape()), axis)
}
dataA := t.hdr()
typ := t.rtype()
// SPECIAL CASE: FLAT ARGMAX
if axis == AllAxes {
var index int
if mt, ok := t.(MaskedTensor); ok && mt.IsMasked() {
if index = e.E.ArgminFlatMasked(typ, dataA, mt.Mask()); index == -1 {
return nil, errors.Errorf("t is not supported - %T of %v", t, t.Dtype())
}
} else {
if index = e.E.ArgminFlat(typ, dataA); index == -1 {
return nil, errors.Errorf("t is not supported - %T of %v", t, t.Dtype())
}
}
return New(FromScalar(index)), nil
}
// ARGMAX ALONG AXIS
var indices []int
axes := make([]int, len(t.Shape()))
for i := range t.Shape() {
switch {
case i < axis:
axes[i] = i
case i == axis:
axes[len(axes)-1] = i
case i > axis:
axes[i-1] = i
}
}
// be a good citizen - borrow and return, since we're only using this AP to figure out the moves
newAP, _, err := t.Info().T(axes...)
if _, ok := err.(NoOpError); !ok && err != nil {
return
} else if ok {
newAP = t.Info().Clone()
}
defer ReturnAP(newAP)
it := IteratorFromDense(t)
iteratorLoadAP(it, newAP)
lastSize := it.Shape()[len(it.Shape())-1]
newShape := it.Shape().Clone()
newShape = newShape[:len(newShape)-1]
defer ReturnInts(newShape)
if mt, ok := t.(MaskedTensor); ok && mt.IsMasked() {
mask := mt.Mask()
if indices, err = e.E.ArgminIterMasked(typ, dataA, mask, it, lastSize); err != nil {
return
}
} else {
if indices, err = e.E.ArgminIter(typ, dataA, it, lastSize); err != nil {
return
}
}
return New(WithShape(newShape...), WithBacking(indices)), nil
} | vendor/gorgonia.org/tensor/defaultengine_argmethods.go | 0.603815 | 0.459986 | defaultengine_argmethods.go | starcoder |
package pqt
const (
// FunctionBehaviourVolatile indicates that the function value can change even within a single table scan,
// so no optimizations can be made.
// Relatively few database functions are volatile in this sense; some examples are random(), currval(), timeofday().
// But note that any function that has side-effects must be classified volatile, even if its result is quite predictable,
// to prevent calls from being optimized away; an example is setval().
FunctionBehaviourVolatile FunctionBehaviour = iota
// FunctionBehaviourImmutable indicates that the function cannot modify the database and always returns the same result when given the same argument values;
// that is, it does not do database lookups or otherwise use information not directly present in its argument list.
// If this option is given, any call of the function with all-constant arguments can be immediately replaced with the function value.
FunctionBehaviourImmutable
// FunctionBehaviourStable indicates that the function cannot modify the database,
// and that within a single table scan it will consistently return the same result for the same argument values,
// but that its result could change across SQL statements.
// This is the appropriate selection for functions whose results depend on database lookups,
// parameter variables (such as the current time zone), etc.
// (It is inappropriate for AFTER triggers that wish to query rows modified by the current command.)
// Also note that the current_timestamp family of functions qualify as stable, since their values do not change within a transaction.
FunctionBehaviourStable
)
type FunctionBehaviour int
// Function ...
type Function struct {
Name string
BuiltIn bool
Type Type
Body string
Behaviour FunctionBehaviour
Args []*FunctionArg
}
// FunctionArg ...
type FunctionArg struct {
Name string
Type Type
}
// FunctionNow ...
func FunctionNow() *Function {
return &Function{
Name: "now",
BuiltIn: true,
Type: TypeTimestampTZ(),
}
} | function.go | 0.577972 | 0.575051 | function.go | starcoder |
package cios
import (
"fmt"
"math"
"reflect"
"sort"
)
type NullableMultipleSeriesImageStream []NullableMultipleSeriesImage
func NullableMultipleSeriesImageStreamOf(arg ...NullableMultipleSeriesImage) NullableMultipleSeriesImageStream {
return arg
}
func NullableMultipleSeriesImageStreamFrom(arg []NullableMultipleSeriesImage) NullableMultipleSeriesImageStream {
return arg
}
func CreateNullableMultipleSeriesImageStream(arg ...NullableMultipleSeriesImage) *NullableMultipleSeriesImageStream {
tmp := NullableMultipleSeriesImageStreamOf(arg...)
return &tmp
}
func GenerateNullableMultipleSeriesImageStream(arg []NullableMultipleSeriesImage) *NullableMultipleSeriesImageStream {
tmp := NullableMultipleSeriesImageStreamFrom(arg)
return &tmp
}
func (self *NullableMultipleSeriesImageStream) Add(arg NullableMultipleSeriesImage) *NullableMultipleSeriesImageStream {
return self.AddAll(arg)
}
func (self *NullableMultipleSeriesImageStream) AddAll(arg ...NullableMultipleSeriesImage) *NullableMultipleSeriesImageStream {
*self = append(*self, arg...)
return self
}
func (self *NullableMultipleSeriesImageStream) AddSafe(arg *NullableMultipleSeriesImage) *NullableMultipleSeriesImageStream {
if arg != nil {
self.Add(*arg)
}
return self
}
func (self *NullableMultipleSeriesImageStream) Aggregate(fn func(NullableMultipleSeriesImage, NullableMultipleSeriesImage) NullableMultipleSeriesImage) *NullableMultipleSeriesImageStream {
result := NullableMultipleSeriesImageStreamOf()
self.ForEach(func(v NullableMultipleSeriesImage, i int) {
if i == 0 {
result.Add(fn(NullableMultipleSeriesImage{}, v))
} else {
result.Add(fn(result[i-1], v))
}
})
*self = result
return self
}
func (self *NullableMultipleSeriesImageStream) AllMatch(fn func(NullableMultipleSeriesImage, int) bool) bool {
for i, v := range *self {
if !fn(v, i) {
return false
}
}
return true
}
func (self *NullableMultipleSeriesImageStream) AnyMatch(fn func(NullableMultipleSeriesImage, int) bool) bool {
for i, v := range *self {
if fn(v, i) {
return true
}
}
return false
}
func (self *NullableMultipleSeriesImageStream) Clone() *NullableMultipleSeriesImageStream {
temp := make([]NullableMultipleSeriesImage, self.Len())
copy(temp, *self)
return (*NullableMultipleSeriesImageStream)(&temp)
}
func (self *NullableMultipleSeriesImageStream) Copy() *NullableMultipleSeriesImageStream {
return self.Clone()
}
func (self *NullableMultipleSeriesImageStream) Concat(arg []NullableMultipleSeriesImage) *NullableMultipleSeriesImageStream {
return self.AddAll(arg...)
}
func (self *NullableMultipleSeriesImageStream) Contains(arg NullableMultipleSeriesImage) bool {
return self.FindIndex(func(_arg NullableMultipleSeriesImage, index int) bool { return reflect.DeepEqual(_arg, arg) }) != -1
}
func (self *NullableMultipleSeriesImageStream) Clean() *NullableMultipleSeriesImageStream {
*self = NullableMultipleSeriesImageStreamOf()
return self
}
func (self *NullableMultipleSeriesImageStream) Delete(index int) *NullableMultipleSeriesImageStream {
return self.DeleteRange(index, index)
}
func (self *NullableMultipleSeriesImageStream) DeleteRange(startIndex, endIndex int) *NullableMultipleSeriesImageStream {
*self = append((*self)[:startIndex], (*self)[endIndex+1:]...)
return self
}
func (self *NullableMultipleSeriesImageStream) Distinct() *NullableMultipleSeriesImageStream {
caches := map[string]bool{}
result := NullableMultipleSeriesImageStreamOf()
for _, v := range *self {
key := fmt.Sprintf("%+v", v)
if f, ok := caches[key]; ok {
if !f {
result = append(result, v)
}
} else if caches[key] = true; !f {
result = append(result, v)
}
}
*self = result
return self
}
func (self *NullableMultipleSeriesImageStream) Each(fn func(NullableMultipleSeriesImage)) *NullableMultipleSeriesImageStream {
for _, v := range *self {
fn(v)
}
return self
}
func (self *NullableMultipleSeriesImageStream) EachRight(fn func(NullableMultipleSeriesImage)) *NullableMultipleSeriesImageStream {
for i := self.Len() - 1; i >= 0; i-- {
fn(*self.Get(i))
}
return self
}
func (self *NullableMultipleSeriesImageStream) Equals(arr []NullableMultipleSeriesImage) bool {
if (*self == nil) != (arr == nil) || len(*self) != len(arr) {
return false
}
for i := range *self {
if !reflect.DeepEqual((*self)[i], arr[i]) {
return false
}
}
return true
}
func (self *NullableMultipleSeriesImageStream) Filter(fn func(NullableMultipleSeriesImage, int) bool) *NullableMultipleSeriesImageStream {
result := NullableMultipleSeriesImageStreamOf()
for i, v := range *self {
if fn(v, i) {
result.Add(v)
}
}
*self = result
return self
}
func (self *NullableMultipleSeriesImageStream) FilterSlim(fn func(NullableMultipleSeriesImage, int) bool) *NullableMultipleSeriesImageStream {
result := NullableMultipleSeriesImageStreamOf()
caches := map[string]bool{}
for i, v := range *self {
key := fmt.Sprintf("%+v", v)
if f, ok := caches[key]; ok {
if f {
result.Add(v)
}
} else if caches[key] = fn(v, i); caches[key] {
result.Add(v)
}
}
*self = result
return self
}
func (self *NullableMultipleSeriesImageStream) Find(fn func(NullableMultipleSeriesImage, int) bool) *NullableMultipleSeriesImage {
if i := self.FindIndex(fn); -1 != i {
tmp := (*self)[i]
return &tmp
}
return nil
}
func (self *NullableMultipleSeriesImageStream) FindOr(fn func(NullableMultipleSeriesImage, int) bool, or NullableMultipleSeriesImage) NullableMultipleSeriesImage {
if v := self.Find(fn); v != nil {
return *v
}
return or
}
func (self *NullableMultipleSeriesImageStream) FindIndex(fn func(NullableMultipleSeriesImage, int) bool) int {
if self == nil {
return -1
}
for i, v := range *self {
if fn(v, i) {
return i
}
}
return -1
}
func (self *NullableMultipleSeriesImageStream) First() *NullableMultipleSeriesImage {
return self.Get(0)
}
func (self *NullableMultipleSeriesImageStream) FirstOr(arg NullableMultipleSeriesImage) NullableMultipleSeriesImage {
if v := self.Get(0); v != nil {
return *v
}
return arg
}
func (self *NullableMultipleSeriesImageStream) ForEach(fn func(NullableMultipleSeriesImage, int)) *NullableMultipleSeriesImageStream {
for i, v := range *self {
fn(v, i)
}
return self
}
func (self *NullableMultipleSeriesImageStream) ForEachRight(fn func(NullableMultipleSeriesImage, int)) *NullableMultipleSeriesImageStream {
for i := self.Len() - 1; i >= 0; i-- {
fn(*self.Get(i), i)
}
return self
}
func (self *NullableMultipleSeriesImageStream) GroupBy(fn func(NullableMultipleSeriesImage, int) string) map[string][]NullableMultipleSeriesImage {
m := map[string][]NullableMultipleSeriesImage{}
for i, v := range self.Val() {
key := fn(v, i)
m[key] = append(m[key], v)
}
return m
}
func (self *NullableMultipleSeriesImageStream) GroupByValues(fn func(NullableMultipleSeriesImage, int) string) [][]NullableMultipleSeriesImage {
var tmp [][]NullableMultipleSeriesImage
for _, v := range self.GroupBy(fn) {
tmp = append(tmp, v)
}
return tmp
}
func (self *NullableMultipleSeriesImageStream) IndexOf(arg NullableMultipleSeriesImage) int {
for index, _arg := range *self {
if reflect.DeepEqual(_arg, arg) {
return index
}
}
return -1
}
func (self *NullableMultipleSeriesImageStream) IsEmpty() bool {
return self.Len() == 0
}
func (self *NullableMultipleSeriesImageStream) IsPreset() bool {
return !self.IsEmpty()
}
func (self *NullableMultipleSeriesImageStream) Last() *NullableMultipleSeriesImage {
return self.Get(self.Len() - 1)
}
func (self *NullableMultipleSeriesImageStream) LastOr(arg NullableMultipleSeriesImage) NullableMultipleSeriesImage {
if v := self.Last(); v != nil {
return *v
}
return arg
}
func (self *NullableMultipleSeriesImageStream) Len() int {
if self == nil {
return 0
}
return len(*self)
}
func (self *NullableMultipleSeriesImageStream) Limit(limit int) *NullableMultipleSeriesImageStream {
self.Slice(0, limit)
return self
}
func (self *NullableMultipleSeriesImageStream) Map(fn func(NullableMultipleSeriesImage, int) interface{}) interface{} {
_array := make([]interface{}, 0, len(*self))
for i, v := range *self {
_array = append(_array, fn(v, i))
}
return _array
}
func (self *NullableMultipleSeriesImageStream) Map2Int(fn func(NullableMultipleSeriesImage, int) int) []int {
_array := make([]int, 0, len(*self))
for i, v := range *self {
_array = append(_array, fn(v, i))
}
return _array
}
func (self *NullableMultipleSeriesImageStream) Map2Int32(fn func(NullableMultipleSeriesImage, int) int32) []int32 {
_array := make([]int32, 0, len(*self))
for i, v := range *self {
_array = append(_array, fn(v, i))
}
return _array
}
func (self *NullableMultipleSeriesImageStream) Map2Int64(fn func(NullableMultipleSeriesImage, int) int64) []int64 {
_array := make([]int64, 0, len(*self))
for i, v := range *self {
_array = append(_array, fn(v, i))
}
return _array
}
func (self *NullableMultipleSeriesImageStream) Map2Float32(fn func(NullableMultipleSeriesImage, int) float32) []float32 {
_array := make([]float32, 0, len(*self))
for i, v := range *self {
_array = append(_array, fn(v, i))
}
return _array
}
func (self *NullableMultipleSeriesImageStream) Map2Float64(fn func(NullableMultipleSeriesImage, int) float64) []float64 {
_array := make([]float64, 0, len(*self))
for i, v := range *self {
_array = append(_array, fn(v, i))
}
return _array
}
func (self *NullableMultipleSeriesImageStream) Map2Bool(fn func(NullableMultipleSeriesImage, int) bool) []bool {
_array := make([]bool, 0, len(*self))
for i, v := range *self {
_array = append(_array, fn(v, i))
}
return _array
}
func (self *NullableMultipleSeriesImageStream) Map2Bytes(fn func(NullableMultipleSeriesImage, int) []byte) [][]byte {
_array := make([][]byte, 0, len(*self))
for i, v := range *self {
_array = append(_array, fn(v, i))
}
return _array
}
func (self *NullableMultipleSeriesImageStream) Map2String(fn func(NullableMultipleSeriesImage, int) string) []string {
_array := make([]string, 0, len(*self))
for i, v := range *self {
_array = append(_array, fn(v, i))
}
return _array
}
func (self *NullableMultipleSeriesImageStream) Max(fn func(NullableMultipleSeriesImage, int) float64) *NullableMultipleSeriesImage {
f := self.Get(0)
if f == nil {
return nil
}
m := fn(*f, 0)
index := 0
for i := 1; i < self.Len(); i++ {
v := fn(*self.Get(i), i)
m = math.Max(m, v)
if m == v {
index = i
}
}
return self.Get(index)
}
func (self *NullableMultipleSeriesImageStream) Min(fn func(NullableMultipleSeriesImage, int) float64) *NullableMultipleSeriesImage {
f := self.Get(0)
if f == nil {
return nil
}
m := fn(*f, 0)
index := 0
for i := 1; i < self.Len(); i++ {
v := fn(*self.Get(i), i)
m = math.Min(m, v)
if m == v {
index = i
}
}
return self.Get(index)
}
func (self *NullableMultipleSeriesImageStream) NoneMatch(fn func(NullableMultipleSeriesImage, int) bool) bool {
return !self.AnyMatch(fn)
}
func (self *NullableMultipleSeriesImageStream) Get(index int) *NullableMultipleSeriesImage {
if self.Len() > index && index >= 0 {
tmp := (*self)[index]
return &tmp
}
return nil
}
func (self *NullableMultipleSeriesImageStream) GetOr(index int, arg NullableMultipleSeriesImage) NullableMultipleSeriesImage {
if v := self.Get(index); v != nil {
return *v
}
return arg
}
func (self *NullableMultipleSeriesImageStream) Peek(fn func(*NullableMultipleSeriesImage, int)) *NullableMultipleSeriesImageStream {
for i, v := range *self {
fn(&v, i)
self.Set(i, v)
}
return self
}
func (self *NullableMultipleSeriesImageStream) Reduce(fn func(NullableMultipleSeriesImage, NullableMultipleSeriesImage, int) NullableMultipleSeriesImage) *NullableMultipleSeriesImageStream {
return self.ReduceInit(fn, NullableMultipleSeriesImage{})
}
func (self *NullableMultipleSeriesImageStream) ReduceInit(fn func(NullableMultipleSeriesImage, NullableMultipleSeriesImage, int) NullableMultipleSeriesImage, initialValue NullableMultipleSeriesImage) *NullableMultipleSeriesImageStream {
result := NullableMultipleSeriesImageStreamOf()
self.ForEach(func(v NullableMultipleSeriesImage, i int) {
if i == 0 {
result.Add(fn(initialValue, v, i))
} else {
result.Add(fn(result[i-1], v, i))
}
})
*self = result
return self
}
func (self *NullableMultipleSeriesImageStream) ReduceInterface(fn func(interface{}, NullableMultipleSeriesImage, int) interface{}) []interface{} {
result := []interface{}{}
for i, v := range *self {
if i == 0 {
result = append(result, fn(NullableMultipleSeriesImage{}, v, i))
} else {
result = append(result, fn(result[i-1], v, i))
}
}
return result
}
func (self *NullableMultipleSeriesImageStream) ReduceString(fn func(string, NullableMultipleSeriesImage, int) string) []string {
result := []string{}
for i, v := range *self {
if i == 0 {
result = append(result, fn("", v, i))
} else {
result = append(result, fn(result[i-1], v, i))
}
}
return result
}
func (self *NullableMultipleSeriesImageStream) ReduceInt(fn func(int, NullableMultipleSeriesImage, int) int) []int {
result := []int{}
for i, v := range *self {
if i == 0 {
result = append(result, fn(0, v, i))
} else {
result = append(result, fn(result[i-1], v, i))
}
}
return result
}
func (self *NullableMultipleSeriesImageStream) ReduceInt32(fn func(int32, NullableMultipleSeriesImage, int) int32) []int32 {
result := []int32{}
for i, v := range *self {
if i == 0 {
result = append(result, fn(0, v, i))
} else {
result = append(result, fn(result[i-1], v, i))
}
}
return result
}
func (self *NullableMultipleSeriesImageStream) ReduceInt64(fn func(int64, NullableMultipleSeriesImage, int) int64) []int64 {
result := []int64{}
for i, v := range *self {
if i == 0 {
result = append(result, fn(0, v, i))
} else {
result = append(result, fn(result[i-1], v, i))
}
}
return result
}
func (self *NullableMultipleSeriesImageStream) ReduceFloat32(fn func(float32, NullableMultipleSeriesImage, int) float32) []float32 {
result := []float32{}
for i, v := range *self {
if i == 0 {
result = append(result, fn(0.0, v, i))
} else {
result = append(result, fn(result[i-1], v, i))
}
}
return result
}
func (self *NullableMultipleSeriesImageStream) ReduceFloat64(fn func(float64, NullableMultipleSeriesImage, int) float64) []float64 {
result := []float64{}
for i, v := range *self {
if i == 0 {
result = append(result, fn(0.0, v, i))
} else {
result = append(result, fn(result[i-1], v, i))
}
}
return result
}
func (self *NullableMultipleSeriesImageStream) ReduceBool(fn func(bool, NullableMultipleSeriesImage, int) bool) []bool {
result := []bool{}
for i, v := range *self {
if i == 0 {
result = append(result, fn(false, v, i))
} else {
result = append(result, fn(result[i-1], v, i))
}
}
return result
}
func (self *NullableMultipleSeriesImageStream) Reverse() *NullableMultipleSeriesImageStream {
for i, j := 0, self.Len()-1; i < j; i, j = i+1, j-1 {
(*self)[i], (*self)[j] = (*self)[j], (*self)[i]
}
return self
}
func (self *NullableMultipleSeriesImageStream) Replace(fn func(NullableMultipleSeriesImage, int) NullableMultipleSeriesImage) *NullableMultipleSeriesImageStream {
return self.ForEach(func(v NullableMultipleSeriesImage, i int) { self.Set(i, fn(v, i)) })
}
func (self *NullableMultipleSeriesImageStream) Select(fn func(NullableMultipleSeriesImage) interface{}) interface{} {
_array := make([]interface{}, 0, len(*self))
for _, v := range *self {
_array = append(_array, fn(v))
}
return _array
}
func (self *NullableMultipleSeriesImageStream) Set(index int, val NullableMultipleSeriesImage) *NullableMultipleSeriesImageStream {
if len(*self) > index && index >= 0 {
(*self)[index] = val
}
return self
}
func (self *NullableMultipleSeriesImageStream) Skip(skip int) *NullableMultipleSeriesImageStream {
return self.Slice(skip, self.Len()-skip)
}
func (self *NullableMultipleSeriesImageStream) SkippingEach(fn func(NullableMultipleSeriesImage, int) int) *NullableMultipleSeriesImageStream {
for i := 0; i < self.Len(); i++ {
skip := fn(*self.Get(i), i)
i += skip
}
return self
}
func (self *NullableMultipleSeriesImageStream) Slice(startIndex, n int) *NullableMultipleSeriesImageStream {
if last := startIndex + n; len(*self)-1 < startIndex || last < 0 || startIndex < 0 {
*self = []NullableMultipleSeriesImage{}
} else if len(*self) < last {
*self = (*self)[startIndex:len(*self)]
} else {
*self = (*self)[startIndex:last]
}
return self
}
func (self *NullableMultipleSeriesImageStream) Sort(fn func(i, j int) bool) *NullableMultipleSeriesImageStream {
sort.SliceStable(*self, fn)
return self
}
func (self *NullableMultipleSeriesImageStream) Tail() *NullableMultipleSeriesImage {
return self.Last()
}
func (self *NullableMultipleSeriesImageStream) TailOr(arg NullableMultipleSeriesImage) NullableMultipleSeriesImage {
return self.LastOr(arg)
}
func (self *NullableMultipleSeriesImageStream) ToList() []NullableMultipleSeriesImage {
return self.Val()
}
func (self *NullableMultipleSeriesImageStream) Unique() *NullableMultipleSeriesImageStream {
return self.Distinct()
}
func (self *NullableMultipleSeriesImageStream) Val() []NullableMultipleSeriesImage {
if self == nil {
return []NullableMultipleSeriesImage{}
}
return *self.Copy()
}
func (self *NullableMultipleSeriesImageStream) While(fn func(NullableMultipleSeriesImage, int) bool) *NullableMultipleSeriesImageStream {
for i, v := range self.Val() {
if !fn(v, i) {
break
}
}
return self
}
func (self *NullableMultipleSeriesImageStream) Where(fn func(NullableMultipleSeriesImage) bool) *NullableMultipleSeriesImageStream {
result := NullableMultipleSeriesImageStreamOf()
for _, v := range *self {
if fn(v) {
result.Add(v)
}
}
*self = result
return self
}
func (self *NullableMultipleSeriesImageStream) WhereSlim(fn func(NullableMultipleSeriesImage) bool) *NullableMultipleSeriesImageStream {
result := NullableMultipleSeriesImageStreamOf()
caches := map[string]bool{}
for _, v := range *self {
key := fmt.Sprintf("%+v", v)
if f, ok := caches[key]; ok {
if f {
result.Add(v)
}
} else if caches[key] = fn(v); caches[key] {
result.Add(v)
}
}
*self = result
return self
} | cios/stream_nullablemultipleseriesimage.go | 0.564579 | 0.476458 | stream_nullablemultipleseriesimage.go | starcoder |
package gafit
import (
"math"
"sort"
"gonum.org/v1/gonum/mat"
)
// OrthogonalMatchingPursuit optimizes the cost function by selecting the model that leads to the
// largest decrease in the cost function
func OrthogonalMatchingPursuit(dataset Dataset, cost CostFunction, maxFeatures int) OptimizeResult {
X := dataset.X
y := dataset.Y
Xnorm := mat.DenseCopyOf(X)
normalize(Xnorm)
_, cols := Xnorm.Dims()
residuals := mat.VecDenseCopyOf(y)
proj := mat.NewVecDense(cols, nil)
selected := []int{}
names := []string{}
bestScore := math.Inf(1)
bestSelection := make([]int, 0, cols)
end := maxFeatures
if cols < end {
end = cols
}
for i := 0; i < end; i++ {
proj.MulVec(Xnorm.T(), residuals)
best := argAbsMax(proj)
selected = append(selected, best)
names = append(names, dataset.ColNames[best])
sub := subMatrix(X, selected)
tempCoeff := Fit(sub, y)
score := cost(sub, y, tempCoeff, names)
if score < bestScore {
bestScore = score
bestSelection = bestSelection[:0]
for _, v := range selected {
bestSelection = append(bestSelection, v)
}
}
pred := Pred(sub, tempCoeff)
residuals.SubVec(y, pred)
}
// Perform a fit with the unnormalized matrix
sort.Ints(bestSelection)
sub := subMatrix(X, bestSelection)
coeff := Fit(sub, y)
// Convert selection to an include-bit string
return OptimizeResult{
Score: bestScore,
Coeff: coeff,
Include: selection2bitstring(bestSelection, cols),
}
}
func normalize(X *mat.Dense) {
rows, cols := X.Dims()
tol := 1e-16
for i := 0; i < cols; i++ {
length := math.Sqrt(mat.Dot(X.ColView(i), X.ColView(i)))
if length > tol {
for j := 0; j < rows; j++ {
X.Set(j, i, X.At(j, i)/length)
}
}
}
}
func allConstant(v mat.Vector, tol float64) bool {
for i := 0; i < v.Len(); i++ {
if math.Abs(v.AtVec(i)-v.AtVec(0)) > tol {
return false
}
}
return true
}
func standardize(X *mat.Dense) {
rows, cols := X.Dims()
// Subtract mean
for i := 0; i < cols; i++ {
if !allConstant(X.ColView(i), 1e-6) {
mean := mat.Sum(X.ColView(i)) / float64(rows)
for j := 0; j < rows; j++ {
X.Set(j, i, X.At(j, i)-mean)
}
}
}
normalize(X)
}
// argAbsMax returns the index of the element that has the maximum absolute value
func argAbsMax(v *mat.VecDense) int {
max := math.Abs(v.AtVec(0))
maxIdx := 0
for i := 0; i < v.Len(); i++ {
if math.Abs(v.AtVec(i)) > max {
max = math.Abs(v.AtVec(i))
maxIdx = i
}
}
return maxIdx
}
func selection2bitstring(selected []int, length int) []int {
include := make([]int, length)
for _, s := range selected {
include[s] = 1
}
return include
}
func subtractMean(v *mat.VecDense) {
mean := mat.Sum(v) / float64(v.Len())
for i := 0; i < v.Len(); i++ {
v.SetVec(i, v.AtVec(i)-mean)
}
} | gafit/greedy.go | 0.691706 | 0.421135 | greedy.go | starcoder |
package chart
const (
// DefaultChartHeight is the default chart height.
DefaultChartHeight = 400
// DefaultChartWidth is the default chart width.
DefaultChartWidth = 1024
// DefaultStrokeWidth is the default chart stroke width.
DefaultStrokeWidth = 0.0
// DefaultDotWidth is the default chart dot width.
DefaultDotWidth = 0.0
// DefaultSeriesLineWidth is the default line width.
DefaultSeriesLineWidth = 1.0
// DefaultAxisLineWidth is the line width of the axis lines.
DefaultAxisLineWidth = 1.0
//DefaultDPI is the default dots per inch for the chart.
DefaultDPI = 92.0
// DefaultMinimumFontSize is the default minimum font size.
DefaultMinimumFontSize = 8.0
// DefaultFontSize is the default font size.
DefaultFontSize = 10.0
// DefaultTitleFontSize is the default title font size.
DefaultTitleFontSize = 18.0
// DefaultAnnotationDeltaWidth is the width of the left triangle out of annotations.
DefaultAnnotationDeltaWidth = 10
// DefaultAnnotationFontSize is the font size of annotations.
DefaultAnnotationFontSize = 10.0
// DefaultAxisFontSize is the font size of the axis labels.
DefaultAxisFontSize = 10.0
// DefaultTitleTop is the default distance from the top of the chart to put the title.
DefaultTitleTop = 10
// DefaultBackgroundStrokeWidth is the default stroke on the chart background.
DefaultBackgroundStrokeWidth = 0.0
// DefaultCanvasStrokeWidth is the default stroke on the chart canvas.
DefaultCanvasStrokeWidth = 0.0
// DefaultLineSpacing is the default vertical distance between lines of text.
DefaultLineSpacing = 5
// DefaultYAxisMargin is the default distance from the right of the canvas to the y axis labels.
DefaultYAxisMargin = 10
// DefaultXAxisMargin is the default distance from bottom of the canvas to the x axis labels.
DefaultXAxisMargin = 10
//DefaultVerticalTickHeight is half the margin.
DefaultVerticalTickHeight = DefaultXAxisMargin >> 1
//DefaultHorizontalTickWidth is half the margin.
DefaultHorizontalTickWidth = DefaultYAxisMargin >> 1
// DefaultTickCount is the default number of ticks to show
DefaultTickCount = 10
// DefaultTickCountSanityCheck is a hard limit on number of ticks to prevent infinite loops.
DefaultTickCountSanityCheck = 1 << 10 //1024
// DefaultMinimumTickHorizontalSpacing is the minimum distance between horizontal ticks.
DefaultMinimumTickHorizontalSpacing = 0
// DefaultMinimumTickVerticalSpacing is the minimum distance between vertical ticks.
DefaultMinimumTickVerticalSpacing = 20
// DefaultDateFormat is the default date format.
DefaultDateFormat = "2006-01-02"
// DefaultDateHourFormat is the date format for hour timestamp formats.
DefaultDateHourFormat = "01-02 3PM"
// DefaultDateMinuteFormat is the date format for minute range timestamp formats.
DefaultDateMinuteFormat = "01-02 3:04PM"
// DefaultFloatFormat is the default float format.
DefaultFloatFormat = "%.2f"
// DefaultPercentValueFormat is the default percent format.
DefaultPercentValueFormat = "%0.2f%%"
// DefaultBarSpacing is the default pixel spacing between bars.
DefaultBarSpacing = 100
// DefaultBarWidth is the default pixel width of bars in a bar chart.
DefaultBarWidth = 50
)
var (
// DashArrayDots is a dash array that represents '....' style stroke dashes.
DashArrayDots = []int{1, 1}
// DashArrayDashesSmall is a dash array that represents '- - -' style stroke dashes.
DashArrayDashesSmall = []int{3, 3}
// DashArrayDashesMedium is a dash array that represents '-- -- --' style stroke dashes.
DashArrayDashesMedium = []int{5, 5}
// DashArrayDashesLarge is a dash array that represents '----- ----- -----' style stroke dashes.
DashArrayDashesLarge = []int{10, 10}
)
var (
// DefaultAnnotationPadding is the padding around an annotation.
DefaultAnnotationPadding = Box{Top: 5, Left: 5, Right: 5, Bottom: 5}
// DefaultBackgroundPadding is the default canvas padding config.
DefaultBackgroundPadding = Box{Top: 5, Left: 5, Right: 5, Bottom: 5}
)
const (
// ContentTypePNG is the png mime type.
ContentTypePNG = "image/png"
// ContentTypeSVG is the svg mime type.
ContentTypeSVG = "image/svg+xml"
) | defaults.go | 0.604866 | 0.406096 | defaults.go | starcoder |
package forGraphBLASGo
func ScalarApply[Dt, Df any](t *Scalar[Dt], accum BinaryOp[Dt, Dt, Dt], op UnaryOp[Dt, Df], f *Scalar[Df], _ Descriptor) error {
if t == nil || t.ref == nil || f == nil || f.ref == nil {
return UninitializedObject
}
t.ref = newScalarReference[Dt](newComputedScalar[Dt](t.ref, accum, newScalarApply(op, f.ref)))
return nil
}
func ScalarApplyBinaryOp1st[Dt, Ds, Df any](t *Scalar[Dt], accum BinaryOp[Dt, Dt, Dt], op BinaryOp[Dt, Ds, Df], value Ds, f *Scalar[Df], _ Descriptor) error {
if t == nil || t.ref == nil || f == nil || f.ref == nil {
return UninitializedObject
}
t.ref = newScalarReference[Dt](newComputedScalar[Dt](t.ref, accum, newScalarApplyBinaryOp1st(op, value, f.ref)))
return nil
}
func ScalarApplyBinaryOp2nd[Dt, Df, Ds any](t *Scalar[Dt], accum BinaryOp[Dt, Dt, Dt], op BinaryOp[Dt, Df, Ds], f *Scalar[Df], value Ds, _ Descriptor) error {
if t == nil || t.ref == nil || f == nil || f.ref == nil {
return UninitializedObject
}
t.ref = newScalarReference[Dt](newComputedScalar[Dt](t.ref, accum, newScalarApplyBinaryOp2nd(op, f.ref, value)))
return nil
}
func ScalarApplyBinary[Dt, Df1, Df2 any](t *Scalar[Dt], accum BinaryOp[Dt, Dt, Dt], op BinaryOp[Dt, Df1, Df2], f1 *Scalar[Df1], f2 *Scalar[Df2], _ Descriptor) error {
if t == nil || t.ref == nil || f1 == nil || f1.ref == nil || f2 == nil || f2.ref == nil {
return UninitializedObject
}
t.ref = newScalarReference[Dt](newComputedScalar[Dt](t.ref, accum, newScalarApplyBinary(op, f1.ref, f2.ref)))
return nil
}
func VectorApply[Dw, Du any](w *Vector[Dw], mask *Vector[bool], accum BinaryOp[Dw, Dw, Dw], op UnaryOp[Dw, Du], u *Vector[Du], desc Descriptor) error {
size, err := w.Size()
if err != nil {
return err
}
if err = u.expectSize(size); err != nil {
return err
}
maskAsStructure, err := vectorMask(mask, size)
if err != nil {
return err
}
w.ref = newVectorReference[Dw](newComputedVector[Dw](
size, w.ref, maskAsStructure, accum,
newVectorApply(op, u.ref),
desc,
), -1)
return nil
}
func MatrixApply[DC, DA any](C *Matrix[DC], mask *Matrix[bool], accum BinaryOp[DC, DC, DC], op UnaryOp[DC, DA], A *Matrix[DA], desc Descriptor) error {
nrows, ncols, err := C.Size()
if err != nil {
return err
}
isTran, err := A.expectSizeTran(nrows, ncols, desc, Inp0)
if err != nil {
return err
}
maskAsStructure, err := matrixMask(mask, nrows, ncols)
if err != nil {
return err
}
C.ref = newMatrixReference[DC](newComputedMatrix[DC](
nrows, ncols, C.ref,
maskAsStructure, accum,
newMatrixApply(op, maybeTran(A.ref, isTran)),
desc,
), -1)
return nil
}
func VectorApplyBinaryOp1st[Dw, Ds, Du any](w *Vector[Dw], mask *Vector[bool], accum BinaryOp[Dw, Dw, Dw], op BinaryOp[Dw, Ds, Du], value Ds, u *Vector[Du], desc Descriptor) error {
size, err := w.Size()
if err != nil {
return err
}
if err = u.expectSize(size); err != nil {
return err
}
maskAsStructure, err := vectorMask(mask, size)
if err != nil {
return err
}
w.ref = newVectorReference[Dw](newComputedVector[Dw](
size, w.ref,
maskAsStructure, accum,
newVectorApplyBinaryOp1st(op, value, u.ref),
desc,
), -1)
return nil
}
func VectorApplyBinaryOp1stScalar[Dw, Ds, Du any](w *Vector[Dw], mask *Vector[bool], accum BinaryOp[Dw, Dw, Dw], op BinaryOp[Dw, Ds, Du], value *Scalar[Ds], u *Vector[Du], desc Descriptor) error {
size, err := w.Size()
if err != nil {
return err
}
if value == nil || value.ref == nil {
return UninitializedObject
}
if err = u.expectSize(size); err != nil {
return err
}
maskAsStructure, err := vectorMask(mask, size)
if err != nil {
return err
}
w.ref = newVectorReference[Dw](newComputedVector[Dw](
size, w.ref,
maskAsStructure, accum,
newVectorApplyBinaryOp1stScalar(op, value.ref, u.ref),
desc,
), -1)
return nil
}
func VectorApplyBinaryOp2nd[Dw, Du, Ds any](w *Vector[Dw], mask *Vector[bool], accum BinaryOp[Dw, Dw, Dw], op BinaryOp[Dw, Du, Ds], u *Vector[Du], value Ds, desc Descriptor) error {
size, err := w.Size()
if err != nil {
return err
}
if err = u.expectSize(size); err != nil {
return err
}
maskAsStructure, err := vectorMask(mask, size)
if err != nil {
return err
}
w.ref = newVectorReference[Dw](newComputedVector[Dw](
size, w.ref,
maskAsStructure, accum,
newVectorApplyBinaryOp2nd(op, u.ref, value),
desc,
), -1)
return nil
}
func VectorApplyBinaryOp2ndScalar[Dw, Du, Ds any](w *Vector[Dw], mask *Vector[bool], accum BinaryOp[Dw, Dw, Dw], op BinaryOp[Dw, Du, Ds], u *Vector[Du], value *Scalar[Ds], desc Descriptor) error {
size, err := w.Size()
if err != nil {
return err
}
if err = u.expectSize(size); err != nil {
return err
}
if value == nil || value.ref == nil {
return UninitializedObject
}
maskAsStructure, err := vectorMask(mask, size)
if err != nil {
return err
}
w.ref = newVectorReference[Dw](newComputedVector[Dw](
size, w.ref,
maskAsStructure, accum,
newVectorApplyBinaryOp2ndScalar(op, u.ref, value.ref),
desc,
), -1)
return nil
}
func MatrixApplyBinaryOp1st[DC, Ds, DA any](C *Matrix[DC], mask *Matrix[bool], accum BinaryOp[DC, DC, DC], op BinaryOp[DC, Ds, DA], value Ds, A *Matrix[DA], desc Descriptor) error {
nrows, ncols, err := C.Size()
if err != nil {
return err
}
isTran, err := A.expectSizeTran(nrows, ncols, desc, Inp0)
if err != nil {
return err
}
maskAsStructure, err := matrixMask(mask, nrows, ncols)
if err != nil {
return err
}
C.ref = newMatrixReference[DC](newComputedMatrix[DC](
nrows, ncols,
C.ref, maskAsStructure, accum,
newMatrixApplyBinaryOp1st(op, value, maybeTran(A.ref, isTran)),
desc,
), -1)
return nil
}
func MatrixApplyBinaryOp1stScalar[DC, Ds, DA any](C *Matrix[DC], mask *Matrix[bool], accum BinaryOp[DC, DC, DC], op BinaryOp[DC, Ds, DA], value *Scalar[Ds], A *Matrix[DA], desc Descriptor) error {
nrows, ncols, err := C.Size()
if err != nil {
return err
}
if value == nil || value.ref == nil {
return UninitializedObject
}
isTran, err := A.expectSizeTran(nrows, ncols, desc, Inp0)
if err != nil {
return err
}
maskAsStructure, err := matrixMask(mask, nrows, ncols)
if err != nil {
return err
}
C.ref = newMatrixReference[DC](newComputedMatrix[DC](
nrows, ncols,
C.ref, maskAsStructure, accum,
newMatrixApplyBinaryOp1stScalar(op, value.ref, maybeTran(A.ref, isTran)),
desc,
), -1)
return nil
}
func MatrixApplyBinaryOp2nd[DC, DA, Ds any](C *Matrix[DC], mask *Matrix[bool], accum BinaryOp[DC, DC, DC], op BinaryOp[DC, DA, Ds], A *Matrix[DA], value Ds, desc Descriptor) error {
nrows, ncols, err := C.Size()
if err != nil {
return err
}
isTran, err := A.expectSizeTran(nrows, ncols, desc, Inp0)
if err != nil {
return err
}
maskAsStructure, err := matrixMask(mask, nrows, ncols)
if err != nil {
return err
}
C.ref = newMatrixReference[DC](newComputedMatrix[DC](
nrows, ncols,
C.ref, maskAsStructure, accum,
newMatrixApplyBinaryOp2nd(op, maybeTran(A.ref, isTran), value),
desc,
), -1)
return nil
}
func MatrixApplyBinaryOp2ndScalar[DC, DA, Ds any](C *Matrix[DC], mask *Matrix[bool], accum BinaryOp[DC, DC, DC], op BinaryOp[DC, DA, Ds], A *Matrix[DA], value *Scalar[Ds], desc Descriptor) error {
nrows, ncols, err := C.Size()
if err != nil {
return err
}
isTran, err := A.expectSizeTran(nrows, ncols, desc, Inp0)
if err != nil {
return err
}
if value == nil || value.ref == nil {
return UninitializedObject
}
maskAsStructure, err := matrixMask(mask, nrows, ncols)
if err != nil {
return err
}
C.ref = newMatrixReference[DC](newComputedMatrix[DC](
nrows, ncols,
C.ref, maskAsStructure, accum,
newMatrixApplyBinaryOp2ndScalar(op, maybeTran(A.ref, isTran), value.ref),
desc,
), -1)
return nil
}
func VectorApplyIndexOp[Dw, Du, Ds any](w *Vector[Dw], mask *Vector[bool], accum BinaryOp[Dw, Dw, Dw], op IndexUnaryOp[Dw, Du, Ds], u *Vector[Du], value Ds, desc Descriptor) error {
size, err := w.Size()
if err != nil {
return err
}
if err = u.expectSize(size); err != nil {
return err
}
maskAsStructure, err := vectorMask(mask, size)
if err != nil {
return err
}
w.ref = newVectorReference[Dw](newComputedVector[Dw](
size, w.ref,
maskAsStructure, accum,
newVectorApplyIndexOp(op, u.ref, value),
desc,
), -1)
return nil
}
func VectorApplyIndexOpScalar[Dw, Du, Ds any](w *Vector[Dw], mask *Vector[bool], accum BinaryOp[Dw, Dw, Dw], op IndexUnaryOp[Dw, Du, Ds], u *Vector[Du], value *Scalar[Ds], desc Descriptor) error {
size, err := w.Size()
if err != nil {
return err
}
if err = u.expectSize(size); err != nil {
return err
}
if value == nil || value.ref == nil {
return UninitializedObject
}
maskAsStructure, err := vectorMask(mask, size)
if err != nil {
return err
}
w.ref = newVectorReference[Dw](newComputedVector[Dw](
size, w.ref,
maskAsStructure, accum,
newVectorApplyIndexOpScalar(op, u.ref, value.ref),
desc,
), -1)
return nil
}
func MatrixApplyIndexOp[DC, DA, Ds any](C *Matrix[DC], mask *Matrix[bool], accum BinaryOp[DC, DC, DC], op IndexUnaryOp[DC, DA, Ds], A *Matrix[DA], value Ds, desc Descriptor) error {
nrows, ncols, err := C.Size()
if err != nil {
return err
}
isTran, err := A.expectSizeTran(nrows, ncols, desc, Inp0)
if err != nil {
return err
}
maskAsStructure, err := matrixMask(mask, nrows, ncols)
if err != nil {
return err
}
C.ref = newMatrixReference[DC](newComputedMatrix[DC](
nrows, ncols,
C.ref, maskAsStructure, accum,
newMatrixApplyIndexOp(op, maybeTran(A.ref, isTran), value),
desc,
), -1)
return nil
}
func MatrixApplyIndexOpScalar[DC, DA, Ds any](C *Matrix[DC], mask *Matrix[bool], accum BinaryOp[DC, DC, DC], op IndexUnaryOp[DC, DA, Ds], A *Matrix[DA], value *Scalar[Ds], desc Descriptor) error {
nrows, ncols, err := C.Size()
if err != nil {
return err
}
isTran, err := A.expectSizeTran(nrows, ncols, desc, Inp0)
if err != nil {
return err
}
if value == nil || value.ref == nil {
return UninitializedObject
}
maskAsStructure, err := matrixMask(mask, nrows, ncols)
if err != nil {
return err
}
C.ref = newMatrixReference[DC](newComputedMatrix[DC](
nrows, ncols,
C.ref, maskAsStructure, accum,
newMatrixApplyIndexOpScalar(op, maybeTran(A.ref, isTran), value.ref),
desc,
), -1)
return nil
} | api_Apply.go | 0.690455 | 0.764188 | api_Apply.go | starcoder |
package ui
import (
"math"
"github.com/thinkofdeath/steven/render"
)
// Text is a drawable that draws a string.
type Text struct {
baseElement
x, y float64
r, g, b, a int
value string
Width float64
scaleX, scaleY float64
rotation float64
}
// NewText creates a new Text drawable.
func NewText(val string, x, y float64, r, g, b int) *Text {
return &Text{
value: val,
Width: render.SizeOfString(val),
r: r, g: g, b: b, a: 255,
x: x, y: y,
scaleX: 1, scaleY: 1,
baseElement: baseElement{
visible: true,
isNew: true,
},
}
}
// Attach changes the location where this is attached to.
func (t *Text) Attach(vAttach, hAttach AttachPoint) *Text {
t.vAttach, t.hAttach = vAttach, hAttach
return t
}
func (t *Text) Value() string { return t.value }
func (t *Text) X() float64 { return t.x }
func (t *Text) SetX(x float64) {
if t.x != x {
t.x = x
t.dirty = true
}
}
func (t *Text) Y() float64 { return t.y }
func (t *Text) SetY(y float64) {
if t.y != y {
t.y = y
t.dirty = true
}
}
func (t *Text) R() int { return t.r }
func (t *Text) SetR(r int) {
if t.r != r {
t.r = r
t.dirty = true
}
}
func (t *Text) G() int { return t.g }
func (t *Text) SetG(g int) {
if t.g != g {
t.g = g
t.dirty = true
}
}
func (t *Text) B() int { return t.b }
func (t *Text) SetB(b int) {
if t.b != b {
t.b = b
t.dirty = true
}
}
func (t *Text) A() int { return t.a }
func (t *Text) SetA(a int) {
if a > 255 {
a = 255
}
if a < 0 {
a = 0
}
if t.a != a {
t.a = a
t.dirty = true
}
}
func (t *Text) ScaleX() float64 { return t.scaleX }
func (t *Text) SetScaleX(s float64) {
if t.scaleX != s {
t.scaleX = s
t.dirty = true
}
}
func (t *Text) ScaleY() float64 { return t.scaleY }
func (t *Text) SetScaleY(s float64) {
if t.scaleY != s {
t.scaleY = s
t.dirty = true
}
}
func (t *Text) Rotation() float64 { return t.rotation }
func (t *Text) SetRotation(r float64) {
if t.rotation != r {
t.rotation = r
t.dirty = true
}
}
// Update updates the string drawn by this drawable.
func (t *Text) Update(val string) {
if t.value == val {
return
}
t.value = val
t.Width = render.SizeOfString(val)
t.dirty = true
}
// Draw draws this to the target region.
func (t *Text) Draw(r Region, delta float64) {
if t.isNew || t.isDirty() || forceDirty {
t.isNew = false
cw, ch := t.Size()
sx, sy := r.W/cw, r.H/ch
var text render.UIText
if t.rotation == 0 {
text = render.NewUITextScaled(t.value, r.X, r.Y, sx*t.scaleX, sy*t.scaleY, t.r, t.g, t.b)
} else {
c := math.Cos(t.rotation)
s := math.Sin(t.rotation)
tmpx := r.W / 2
tmpy := r.H / 2
w := math.Abs(tmpx*c - tmpy*s)
h := math.Abs(tmpy*c + tmpx*s)
text = render.NewUITextRotated(t.value, r.X+w-(r.W/2), r.Y+h-(r.H/2), sx*t.scaleX, sy*t.scaleY, t.rotation, t.r, t.g, t.b)
}
text.Alpha(t.a)
for _, txt := range text.Elements {
txt.Layer = t.Layer()
}
t.data = text.Bytes()
}
render.UIAddBytes(t.data)
}
// Offset returns the offset of this drawable from the attachment
// point.
func (t *Text) Offset() (float64, float64) {
return t.x, t.y
}
// Size returns the size of this drawable.
func (t *Text) Size() (float64, float64) {
w, h := (t.Width + 2), 18.0
return w * t.scaleX, h * t.scaleY
}
// Remove removes the text element from the draw list.
func (t *Text) Remove() {
Remove(t)
} | ui/text.go | 0.740456 | 0.447581 | text.go | starcoder |
package intersection_of_two_arrays
import "sort"
/*
给定两个数组,编写一个函数来计算它们的交集。
示例 1:
输入: nums1 = [1,2,2,1], nums2 = [2,2]
输出: [2]
示例 2:
输入: nums1 = [4,9,5], nums2 = [9,4,9,8,4]
输出: [9,4]
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/intersection-of-two-arrays
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
*/
/*
1。借助一个set解决
复杂度:
假设两个数组但长度分别为m,n
时间复杂度是O(m+n)
空间复杂度是O(2*min(m, n)) = O(2*min(m, n)), set和结果数组需要的长度
*/
func intersection1(nums1 []int, nums2 []int) []int {
if len(nums1) == 0 || len(nums2) == 0 {
return nil
}
if len(nums1) > len(nums2) { // 保证nums1长度较小,后边set和result分配但空间将较小
nums1, nums2 = nums2, nums1
}
set := make(map[int]struct{}, len(nums1))
for _, v := range nums1 {
set[v] = struct{}{}
}
result := make([]int, 0, len(nums1))
for _, v := range nums2 {
if _, ok := set[v]; ok {
result = append(result, v)
delete(set, v)
}
}
return result
}
/*
2. 空间复杂度尽量低的实现
将两个数组排序,其中一个排序是为了方便二分查找,另一排序是为了在遍历时判断是否有重复元素(当前元素与上一个比较即可)
复杂度:
综合时间复杂度O(n*logn + mlogm)
可以利用已有数组,不用新建数组,空间复杂度: O(1)
*/
func intersection2(nums1 []int, nums2 []int) []int {
if len(nums1) == 0 || len(nums2) == 0 {
return nil
}
sort.Ints(nums1)
sort.Ints(nums2)
k := 0
last := nums2[0]
for i := 0; i < len(nums2); i++ {
if isSameAsLast(nums2[i], last, i) || !has(nums1, nums2[i]) {
continue
}
nums2[k] = nums2[i]
k++
last = nums2[i]
}
return nums2[:k]
}
func has(arr []int, val int) bool {
i := sort.SearchInts(arr, val)
return i < len(arr) && arr[i] == val
}
func isSameAsLast(last, current, index int) bool {
return index > 0 && last == current
}
/*
3. 两个数组排序后,可以一次遍历搞定结果
分别用i,j两个指针遍历两个数组
当发现i处元素比j处小,i++;
i处元素比j处大,j++
相等的时候,则写入结果,且i和j右移到不等于当前值的位置
注意可以用已有数组做结果
*/
func intersection(nums1 []int, nums2 []int) []int {
sort.Ints(nums1)
sort.Ints(nums2)
var i, j, k int
for i < len(nums1) && j < len(nums2) {
switch {
case nums1[i] > nums2[j]:
j++
case nums1[i] < nums2[j]:
i++
default:
v := nums1[i]
nums1[k] = v // 用nums1作为结果数组,也可换用nums2
k++
for i < len(nums1) && nums1[i] == v {
i++
}
for j < len(nums2) && nums2[j] == v {
j++
}
}
}
return nums1[:k]
} | solutions/intersection-of-two-arrays/d.go | 0.518546 | 0.583856 | d.go | starcoder |
package kneedle
import (
"math"
"github.com/pkg/errors"
)
//gaussian calculates the gaussian of an input
//where height is the height of the center of the curve (sometimes called 'a'),
//center is the center of the curve (sometimes called 'b'),
//and width is the standard deviation, i.e ~68% of the data will be contained in center ± the width. /
func gaussian(x float64, height float64, center float64, width float64) float64{
return height * math.Exp(-(x-center)*(x-center)/(2.0*width*width) )
}
//gaussianSmooth2d smooths the data using a gaussian kernel
//where w is the size of sliding window (i.e number of indices either side to sample).
func gaussianSmooth2d(data [][]float64, w int) (smoothed [][]float64, err error){
dataSize := len(data)
if(dataSize == 0){
err = errors.New("Cannot smooth empty data.")
return
}
nDims := len(data[0])
if(nDims == 0){
err = errors.New("Cannot smooth a data point with no values. Uniformly populate every entry in your data with 1 or more dimensions.")
}
smoothed = make([][]float64, dataSize)
for i := 0; i < dataSize; i++{
var startIdx, endIdx int
if 0 < i -w {
startIdx = i - w
}
if dataSize - 1 < i + w{
endIdx = dataSize - 1
} else {
endIdx = i + w
}
sumWeights := make([]float64, nDims)
var sumIndexWeight float64
for j := startIdx; j < endIdx + 1; j++{
indexScore := math.Abs(float64(j - i))/float64(w)
indexWeight := gaussian(indexScore, 1, 0, 1);
for n := 0; n < nDims; n++{
sumWeights[n] += (indexWeight * data[j][n])
}
sumIndexWeight += indexWeight
}
tmp := make([]float64, nDims)
for n := 0; n < nDims; n++{
tmp[n] = sumWeights[n]/sumIndexWeight
}
smoothed[i] = tmp
}
return
}
// minmaxNormalise performs min-max normalisation on n-dimensional data (as long as the dimensionality is uniform, that is, all data is 2d or all 3d etc.).
// For reference, refer to <a href="https://en.wikipedia.org/wiki/Feature_scaling#Rescaling">Wikipedia article about feature re-scaling.</a>
func minmaxNormalise(data [][]float64) (outputNormalised [][]float64, err error){
dataSize := len(data)
if(dataSize == 0){
err = errors.New("Cannot smooth empty data.")
return
}
nDims := len(data[0])
if(nDims == 0){
err = errors.New("Cannot smooth a data point with no values. " +
"Uniformly populate every entry in your data with 1 or more dimensions.")
return
}
//1) get min and max for each dimension of the data
minEachDim := make([]float64, nDims)
maxEachDim := make([]float64, nDims)
for i := 0; i < nDims; i++ {
minEachDim[i] = math.MaxFloat64
maxEachDim[i] = math.SmallestNonzeroFloat64
}
for _, coords := range data{
for n := 0; n < nDims; n++ {
v := coords[n]
if (v < minEachDim[n]) {
minEachDim[n] = v
}
if (v > maxEachDim[n]) {
maxEachDim[n] = v
}
}
}
//2) normalise the data using the min and max
rangeEachDim := make([]float64, nDims)
for n := 0; n < nDims; n++{
rangeEachDim[n] = maxEachDim[n] - minEachDim[n]
}
outputNormalised = make([][]float64, dataSize)
for i := 0; i < dataSize; i++{
tmp := make([]float64, nDims)
for n := 0; n < nDims; n++{
//normalising step
tmp[n] = (data[i][n] - minEachDim[n]) / rangeEachDim[n]
}
outputNormalised[i] = tmp
}
return
} | maths.go | 0.609989 | 0.681275 | maths.go | starcoder |
// Package index provides constants and functions for reading
// a spreadsheet that lists other spreadsheets: each one a budget
// covering a particular date range. The app uses these functions
// to look up the budget spreadsheets and determine which one(s)
// a transaction should be added to.
package index
import (
"fmt"
"github.com/araddon/dateparse"
"google.golang.org/api/sheets/v4"
"log"
"time"
)
// Range is a constant that indicates where in the index spreadsheet to find the
// list of budget spreadsheets.
const Range = "Index!A2:E"
// Record holds one index entry, representing one budget spreadsheet.
// It identifies the sheet ID, its start and end dates (inclusive), and the
// last date and time that sheet was updated.
type Record struct {
Index int
Filename string
Start time.Time
End time.Time
LastUpdated time.Time
SpreadsheetID string
IndexID string
}
// getDate accepts a Time and returns a new Time object containing only the
// date -- i.e., with the time elements zeroed out.
func getDate(t time.Time) time.Time {
return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.Local)
}
// An active record overlaps the specified date range, AND was not
// updated after the end date
// getActiveRecordTester returns a closure that tests whether a record is
// active -- i.e., whether its start/end dates overlap with the specified
// start and end values, and whether the record's last updated date was
// prior to the record's end date.
func getActiveRecordTester(start time.Time, end time.Time) func(Record) bool {
start = getDate(start)
end = getDate(end).Add(24*time.Hour - 1*time.Second) // Not leap-second proof
return func(record Record) bool {
a := getDate(record.Start)
b := getDate(record.End).Add(24 * time.Hour)
// This record ends before the time interval starts
if start.After(b) {
return false
}
// This record starts after the time interval ends
if end.Before(a) {
return false
}
// This record has never been updated
if record.LastUpdated.IsZero() {
return true
}
// This record was last updated after its period ended
x := getDate(record.LastUpdated)
if x.After(b) {
return false
}
return true
}
}
// Filter accepts an array of Records and a "test" function that
// accepts a Record and returns a boolean. It returns an array
// of those records for which the test function returned true.
func Filter(history []Record, test func(Record) bool) (ret []Record) {
for _, record := range history {
if test(record) {
ret = append(ret, record)
}
}
return ret
}
// FilterActiveRecords accepts an array of Records and a start and end date.
// It returns an array of records dated between start and end.
func FilterActiveRecords(history []Record, start time.Time, end time.Time) []Record {
test := getActiveRecordTester(start, end)
return Filter(history, test)
}
// FromGoogleSheet uses the Google sheets service and specified spreadsheet ID
// to read all the index Records on that sheet, which it returns as an array.
func FromGoogleSheet(srv *sheets.Service, spreadsheetID string) ([]Record, error) {
var history []Record
// Open the spreadsheet
response, err := srv.Spreadsheets.Values.Get(spreadsheetID, Range).Do()
if err != nil {
log.Printf("Unable to retrieve index from sheet ID %s: %v", spreadsheetID, err)
return history, err
}
// It's technically OK for there to be no index data, but we go
// ahead and log it
if len(response.Values) == 0 {
log.Printf("No index data found in sheet ID %s", spreadsheetID)
return history, nil
}
// OK, parse it
for i, row := range response.Values {
record, err := FromSpreadsheetRow(i+1, row)
if err != nil {
return history, err
}
record.IndexID = spreadsheetID
history = append(history, record)
}
return history, nil
}
// FromSpreadsheetRow reads a row of spreadsheet data to initialize
// a Record. It return the Record and/or an error. If an optional field
// fails to parse, it will return both an error and a record.
func FromSpreadsheetRow(index int, row []interface{}) (Record, error) {
record := Record{}
var err error
// Strings are easy
record.Index = index
record.Filename = fmt.Sprintf("%s", row[0])
record.SpreadsheetID = fmt.Sprintf("%s", row[4])
// Dates need parsed, and error-checked
record.Start, err = dateparse.ParseLocal(fmt.Sprintf("%s", row[1]))
if err != nil {
log.Printf("Failed to parse date \"%s\": %v", row[1], err)
return record, err
}
record.End, err = dateparse.ParseLocal(fmt.Sprintf("%s", row[2]))
if err != nil {
log.Printf("Failed to parse date \"%s\": %v", row[2], err)
return record, err
}
// LastUpdated is optional
if fmt.Sprintf("%s", row[3]) != "" {
record.LastUpdated, err = dateparse.ParseLocal(fmt.Sprintf("%s", row[3]))
if err != nil {
log.Printf("Failed to parse date \"%s\": %v", row[3], err)
return record, err
}
}
return record, nil
} | index/index.go | 0.745584 | 0.699126 | index.go | starcoder |
package types
import "github.com/attic-labs/noms/go/hash"
type SkipValueCallback func(v Value) bool
// WalkValues loads prolly trees progressively by walking down the tree. We don't wants to invoke
// the value callback on internal sub-trees (which are valid values) because they are not logical
// values in the graph
type valueRec struct {
v Value
cb bool
}
const maxRefCount = 1 << 12 // ~16MB of data
// WalkValues recursively walks over all types. Values reachable from r and calls cb on them.
func WalkValues(target Value, vr ValueReader, cb SkipValueCallback) {
visited := hash.HashSet{}
refs := map[hash.Hash]bool{}
values := []valueRec{{target, true}}
for len(values) > 0 || len(refs) > 0 {
for len(values) > 0 {
rec := values[len(values)-1]
values = values[:len(values)-1]
v := rec.v
if rec.cb && cb(v) {
continue
}
if _, ok := v.(Blob); ok {
continue // don't traverse into blob ptrees
}
if r, ok := v.(Ref); ok {
refs[r.TargetHash()] = true
continue
}
if col, ok := v.(Collection); ok && !col.sequence().isLeaf() {
ms := col.sequence().(metaSequence)
for _, mt := range ms.tuples {
if mt.child != nil {
values = append(values, valueRec{mt.child, false})
} else {
refs[mt.ref.TargetHash()] = false
}
}
continue
}
v.WalkValues(func(sv Value) {
values = append(values, valueRec{sv, true})
})
}
if len(refs) == 0 {
continue
}
hs := hash.HashSet{}
oldRefs := refs
refs = map[hash.Hash]bool{}
for h := range oldRefs {
if _, ok := visited[h]; ok {
continue
}
if len(hs) >= maxRefCount {
refs[h] = oldRefs[h]
continue
}
hs.Insert(h)
visited.Insert(h)
}
if len(hs) > 0 {
valueChan := make(chan Value, len(hs))
vr.ReadManyValues(hs, valueChan)
close(valueChan)
for sv := range valueChan {
values = append(values, valueRec{sv, oldRefs[sv.Hash()]})
}
}
}
}
func mightContainStructs(t *Type) (mightHaveStructs bool) {
if t.TargetKind() == StructKind || t.TargetKind() == ValueKind {
mightHaveStructs = true
return
}
t.WalkValues(func(v Value) {
mightHaveStructs = mightHaveStructs || mightContainStructs(v.(*Type))
})
return
} | go/types/walk.go | 0.563378 | 0.412234 | walk.go | starcoder |
package util
import (
"context"
"fmt"
"time"
)
// Unit is a type alias representing the standard functional programming Unit type
type Unit = struct{}
// ErrorOf is a type that wraps an arbitrary value to turn the value into an error instance
type ErrorOf struct {
Value interface{}
}
// Error implements the error interface
func (err ErrorOf) Error() string {
return fmt.Sprintf("%v", err.Value)
}
// ToError transforms an arbitrary value x into an error. If x is an error, it does nothing.
// Otherwise, it wraps x in an ErrorOf.
func ToError(x interface{}) error {
switch x.(type) {
case error:
return x.(error)
default:
return ErrorOf{x}
}
}
// Tuple2 is tuple with 2 elements
type Tuple2[T1, T2 any] struct {
X1 T1
X2 T2
}
// Tuple3 is tuple with 3 elements
type Tuple3[T1, T2, T3 any] struct {
X1 T1
X2 T2
X3 T3
}
// Tuple4 is tuple with 4 elements
type Tuple4[T1, T2, T3, T4 any] struct {
X1 T1
X2 T2
X3 T3
X4 T4
}
// SafeFunc0E returns a function that never panics.
// That function returns the same values as f if f doesn't panic and returns an error if f panics.
func SafeFunc0E[U any](f func() (U, error)) func() (U, error) {
return func() (res U, err error) {
defer func() {
err0 := recover()
if err0 != nil {
err = ToError(err0)
}
}()
return f()
}
}
// SafeFunc0VE returns a function that never panics.
// That function returns the same value as f if f doesn't panic and returns an error if f panics.
func SafeFunc0VE(f func() error) func() error {
fu := func() (Unit, error) { return Unit{}, f() }
return func() error {
_, err := SafeFunc0E(fu)()
return err
}
}
// SafeFunc0 returns a function that never panics.
// That function returns the same value as f if f doesn't panic and returns an error if f panics.
func SafeFunc0[U any](f func() U) func() (U, error) {
fe := func() (U, error) { return f(), nil }
return SafeFunc0E(fe)
}
// SafeFunc0V returns a function that never panics.
// That function returns nil if f doesn't panic and returns an error if f panics.
func SafeFunc0V(f func()) func() error {
fe := func() error { f(); return nil }
return SafeFunc0VE(fe)
}
// SafeFunc1E returns a function that never panics.
// That function returns the same values as f if f doesn't panic and returns an error if f panics.
func SafeFunc1E[T1, U any](f func(T1) (U, error)) func(T1) (U, error) {
return func(t1 T1) (res U, err error) {
defer func() {
err0 := recover()
if err0 != nil {
err = ToError(err0)
}
}()
return f(t1)
}
}
// SafeFunc1VE returns a function that never panics.
// That function returns the same value as f if f doesn't panic and returns an error if f panics.
func SafeFunc1VE[T1 any](f func(T1) error) func(T1) error {
fu := func(t1 T1) (Unit, error) { return Unit{}, f(t1) }
return func(t1 T1) error {
_, err := SafeFunc1E(fu)(t1)
return err
}
}
// SafeFunc1 returns a function that never panics.
// That function returns the same value as f if f doesn't panic and returns an error if f panics.
func SafeFunc1[T1, U any](f func(T1) U) func(T1) (U, error) {
fe := func(t1 T1) (U, error) { return f(t1), nil }
return SafeFunc1E(fe)
}
// SafeFunc1V returns a function that never panics.
// That function returns nil if f doesn't panic and returns an error if f panics.
func SafeFunc1V[T1 any](f func(T1)) func(T1) error {
fe := func(t1 T1) error { f(t1); return nil }
return SafeFunc1VE(fe)
}
// SafeFunc2E returns a function that never panics.
// That function returns the same values as f if f doesn't panic and returns an error if f panics.
func SafeFunc2E[T1, T2, U any](f func(T1, T2) (U, error)) func(T1, T2) (U, error) {
return func(t1 T1, t2 T2) (res U, err error) {
defer func() {
err0 := recover()
if err0 != nil {
err = ToError(err0)
}
}()
return f(t1, t2)
}
}
// SafeFunc2VE returns a function that never panics.
// That function returns the same value as f if f doesn't panic and returns an error if f panics.
func SafeFunc2VE[T1, T2 any](f func(T1, T2) error) func(T1, T2) error {
fu := func(t1 T1, t2 T2) (Unit, error) { return Unit{}, f(t1, t2) }
return func(t1 T1, t2 T2) error {
_, err := SafeFunc2E(fu)(t1, t2)
return err
}
}
// SafeFunc2 returns a function that never panics.
// That function returns the same value as f if f doesn't panic and returns an error if f panics.
func SafeFunc2[T1, T2, U any](f func(T1, T2) U) func(T1, T2) (U, error) {
fe := func(t1 T1, t2 T2) (U, error) { return f(t1, t2), nil }
return SafeFunc2E(fe)
}
// SafeFunc2V returns a function that never panics.
// That function returns nil if f doesn't panic and returns an error if f panics.
func SafeFunc2V[T1, T2 any](f func(T1, T2)) func(T1, T2) error {
fe := func(t1 T1, t2 T2) error { f(t1, t2); return nil }
return SafeFunc2VE(fe)
}
// RunWithTimeout executes function f with a context constructed from ctx with the addition
// of timeout.
func RunWithTimeout[T any](
ctx context.Context,
timeout time.Duration,
f func(context.Context) (T, error),
) (T, error) {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return f(ctx)
} | util/util.go | 0.768863 | 0.420243 | util.go | starcoder |
package canvas
// Polyline defines a list of points in 2D space that form a polyline. If the last coordinate equals the first coordinate, we assume the polyline to close itself.
type Polyline struct {
coords []Point
}
// PolylineFromPath returns a polyline from the given path by approximating it by linear line segments (ie. flattening).
func PolylineFromPath(p *Path) *Polyline {
return &Polyline{p.Flatten().Coords()}
}
// PolylineFromPathCoords returns a polyline from the given path from each of the start/end coordinates of the segments, ie. converting all non-linear segments to linear ones.
func PolylineFromPathCoords(p *Path) *Polyline {
return &Polyline{p.Coords()}
}
// Add adds a new point to the polyline.
func (p *Polyline) Add(x, y float64) *Polyline {
p.coords = append(p.coords, Point{x, y})
return p
}
// Coords returns the list of coordinates of the polyline.
func (p *Polyline) Coords() []Point {
return p.coords
}
// ToPath convertes the polyline to a path. If the last coordinate equals the first one, we close the path.
func (p *Polyline) ToPath() *Path {
if len(p.coords) < 2 {
return &Path{}
}
q := &Path{}
q.MoveTo(p.coords[0].X, p.coords[0].Y)
for _, coord := range p.coords[1 : len(p.coords)-1] {
q.LineTo(coord.X, coord.Y)
}
if p.coords[0].Equals(p.coords[len(p.coords)-1]) {
q.Close()
} else {
q.LineTo(p.coords[len(p.coords)-1].X, p.coords[len(p.coords)-1].Y)
}
return q
}
// FillCount returns the number of times the test point is enclosed by the polyline. Counter clockwise enclosures are counted positively and clockwise enclosures negatively.
func (p *Polyline) FillCount(x, y float64) int {
test := Point{x, y}
count := 0
prevCoord := p.coords[0]
for _, coord := range p.coords[1:] {
// see https://wrf.ecse.rpi.edu//Research/Short_Notes/pnpoly.html
if (test.Y < coord.Y) != (test.Y < prevCoord.Y) &&
test.X < (prevCoord.X-coord.X)*(test.Y-coord.Y)/(prevCoord.Y-coord.Y)+coord.X {
if prevCoord.Y < coord.Y {
count--
} else {
count++
}
}
prevCoord = coord
}
return count
}
// Interior is true when the point (x,y) is in the interior of the path, ie. gets filled. This depends on the FillRule.
func (p *Polyline) Interior(x, y float64, fillRule FillRule) bool {
fillCount := p.FillCount(x, y)
if fillRule == NonZero {
return fillCount != 0
}
return fillCount%2 != 0
}
// Smoothen returns a new path that smoothens out a path using cubic Béziers between all the path points. It makes sure that the curvature is smooth along the whole path. If the path is closed, it will be smooth between start and end segment too.
func (p *Polyline) Smoothen() *Path {
K := p.coords
if len(K) < 2 {
return &Path{}
} else if len(K) == 2 { // there are only two coordinates, that's a straight line
q := &Path{}
q.MoveTo(K[0].X, K[0].Y)
q.LineTo(K[1].X, K[1].Y)
return q
}
var p1, p2 []Point
closed := K[0].Equals(K[len(K)-1])
if closed {
// see http://www.jacos.nl/jacos_html/spline/circular/index.html
n := len(K) - 1
p1 = make([]Point, n+1)
p2 = make([]Point, n)
a := make([]float64, n)
b := make([]float64, n)
c := make([]float64, n)
d := make([]Point, n)
for i := 0; i < n; i++ {
a[i] = 1.0
b[i] = 4.0
c[i] = 1.0
d[i] = K[i].Mul(4.0).Add(K[i+1].Mul(2.0))
}
lc := make([]float64, n)
lc[0] = a[0]
lr := c[n-1]
for i := 0; i < n-3; i++ {
m := a[i+1] / b[i]
b[i+1] -= m * c[i]
d[i+1] = d[i+1].Sub(d[i].Mul(m))
lc[i+1] = -m * lc[i]
m = lr / b[i]
b[n-1] -= m * lc[i]
lr = -m * c[i]
d[n-1] = d[n-1].Sub(d[i].Mul(m))
}
i := n - 3
m := a[i+1] / b[i]
b[i+1] -= m * c[i]
d[i+1] = d[i+1].Sub(d[i].Mul(m))
c[i+1] -= m * lc[i]
m = lr / b[i]
b[n-1] -= m * lc[i]
a[n-1] -= m * c[i]
d[n-1] = d[n-1].Sub(d[i].Mul(m))
i = n - 2
m = a[i+1] / b[i]
b[i+1] -= m * c[i]
d[i+1] = d[i+1].Sub(d[i].Mul(m))
p1[n-1] = d[n-1].Div(b[n-1])
lc[n-2] = 0.0
for i := n - 2; i >= 0; i-- {
p1[i] = d[i].Sub(p1[i+1].Mul(c[i])).Sub(p1[n-1].Mul(lc[i])).Div(b[i])
}
p1[n] = p1[0]
for i := 0; i < n; i++ {
p2[i] = K[i+1].Mul(2.0).Sub(p1[i+1])
}
} else {
// see https://www.particleincell.com/2012/bezier-splines/
n := len(K) - 1
p1 = make([]Point, n)
p2 = make([]Point, n)
a := make([]float64, n)
b := make([]float64, n)
c := make([]float64, n)
d := make([]Point, n)
b[0] = 2.0
c[0] = 1.0
d[0] = K[0].Add(K[1].Mul(2.0))
for i := 1; i < n-1; i++ {
a[i] = 1.0
b[i] = 4.0
c[i] = 1.0
d[i] = K[i].Mul(4.0).Add(K[i+1].Mul(2.0))
}
a[n-1] = 2.0
b[n-1] = 7.0
d[n-1] = K[n].Add(K[n-1].Mul(8.0))
// solve with tridiagonal matrix algorithm
for i := 1; i < n; i++ {
w := a[i] / b[i-1]
b[i] -= w * c[i-1]
d[i] = d[i].Sub(d[i-1].Mul(w))
}
p1[n-1] = d[n-1].Div(b[n-1])
for i := n - 2; i >= 0; i-- {
p1[i] = d[i].Sub(p1[i+1].Mul(c[i])).Div(b[i])
}
for i := 0; i < n-1; i++ {
p2[i] = K[i+1].Mul(2.0).Sub(p1[i+1])
}
p2[n-1] = K[n].Add(p1[n-1]).Mul(0.5)
}
q := &Path{}
q.MoveTo(K[0].X, K[0].Y)
for i := 0; i < len(K)-1; i++ {
q.CubeTo(p1[i].X, p1[i].Y, p2[i].X, p2[i].Y, K[i+1].X, K[i+1].Y)
}
if closed {
q.Close()
}
return q
} | polyline.go | 0.84572 | 0.833528 | polyline.go | starcoder |
package pdfjet
import (
"fmt"
"io"
"strconv"
)
// insertStringAt inserts the string s1 into a1 at the specified index
func insertStringAt(a1 []string, s1 string, index int) []string {
a2 := make([]string, 0)
a2 = append(a2, a1[:index]...)
a2 = append(a2, s1)
a2 = append(a2, a1[index:]...)
return a2
}
// insertArrayAt inserts the array a2 into a1 at the specified index
func insertArrayAt(a1, a2 []string, index int) []string {
a3 := make([]string, 0)
a3 = append(a3, a1[:index]...)
a3 = append(a3, a2...)
a3 = append(a3, a1[index:]...)
return a3
}
func appendInteger(a1 *[]byte, value int) {
*a1 = append(*a1, []byte(strconv.Itoa(value))...)
}
func appendFloat32(a1 *[]byte, value float32) {
*a1 = append(*a1, []byte(strconv.FormatFloat(float64(value), 'f', 2, 32))...)
}
func appendString(a1 *[]byte, s1 string) {
*a1 = append(*a1, []byte(s1)...)
}
func appendByte(a1 *[]byte, b1 byte) {
*a1 = append(*a1, b1)
}
func appendByteArray(a1 *[]byte, a2 []byte) {
*a1 = append(*a1, a2...)
}
func appendByteArraySlice(a1 *[]byte, a2 []byte, offset, length int) {
*a1 = append(*a1, a2[offset:offset+length]...)
}
func getUint8(r io.Reader) uint8 {
buf := make([]byte, 1)
io.ReadFull(r, buf)
return buf[0]
}
func getUint16(r io.Reader) uint16 {
buf := make([]byte, 2)
io.ReadFull(r, buf)
return uint16(buf[0])<<8 | uint16(buf[1])
}
func getUint24(r io.Reader) uint32 {
buf := make([]byte, 3)
io.ReadFull(r, buf)
return uint32(buf[0])<<16 | uint32(buf[1])<<8 | uint32(buf[2])
}
func getUint32(r io.Reader) uint32 {
buf := make([]byte, 4)
io.ReadFull(r, buf)
return uint32(buf[0])<<24 | uint32(buf[1])<<16 | uint32(buf[2])<<8 | uint32(buf[3])
}
func getInt16(r io.Reader) int16 {
buf := make([]byte, 2)
io.ReadFull(r, buf)
return int16(buf[0])<<8 | int16(buf[1])
}
func getInt32(r io.Reader) int32 {
buf := make([]byte, 4)
io.ReadFull(r, buf)
return int32(buf[0])<<24 | int32(buf[1])<<16 | int32(buf[2])<<8 | int32(buf[3])
}
func toHexString(code int) string {
return fmt.Sprintf("%04X", code)
}
func skipNBytes(reader io.Reader, n int) {
getNBytes(reader, n)
}
func getNBytes(r io.Reader, n int) []byte {
buf := make([]byte, n)
io.ReadFull(r, buf)
return buf
} | src/pdfjet/helperfunctions.go | 0.60778 | 0.497559 | helperfunctions.go | starcoder |
package dfl
import (
"fmt"
"reflect"
"github.com/pkg/errors"
)
// CompareNumbers compares parameter a and parameter b.
// The parameters may be of type uint8, int, int64, or float64.
// If a > b, then returns 1. If a < b, then returns -1. If a == b, then return 0.
func CompareNumbers(a interface{}, b interface{}) (int, error) {
switch a.(type) {
case int:
switch b.(type) {
case int:
if a.(int) > b.(int) {
return 1, nil
} else if a.(int) < b.(int) {
return -1, nil
} else {
return 0, nil
}
case int64:
if int64(a.(int)) > b.(int64) {
return 1, nil
} else if int64(a.(int)) < b.(int64) {
return -1, nil
} else {
return 0, nil
}
case uint8:
if a.(int) > int(b.(uint8)) {
return 1, nil
} else if a.(int) < int(b.(uint8)) {
return -1, nil
} else {
return 0, nil
}
case float64:
if float64(a.(int)) > b.(float64) {
return 1, nil
} else if float64(a.(int)) < b.(float64) {
return -1, nil
} else {
return 0, nil
}
}
case int64:
switch b.(type) {
case int:
if a.(int64) > int64(b.(int)) {
return 1, nil
} else if a.(int64) < int64(b.(int)) {
return -1, nil
} else {
return 0, nil
}
case int64:
if a.(int64) > b.(int64) {
return 1, nil
} else if a.(int64) < b.(int64) {
return -1, nil
} else {
return 0, nil
}
case uint8:
if a.(int64) > int64(b.(uint8)) {
return 1, nil
} else if a.(int64) < int64(b.(uint8)) {
return -1, nil
} else {
return 0, nil
}
case float64:
if float64(a.(int64)) > b.(float64) {
return 1, nil
} else if float64(a.(int64)) < b.(float64) {
return -1, nil
} else {
return 0, nil
}
}
case uint8:
switch b.(type) {
case int:
if int(a.(uint8)) > int(b.(int)) {
return 1, nil
} else if int(a.(uint8)) < int(b.(int)) {
return -1, nil
} else {
return 0, nil
}
case int64:
if int64(a.(uint8)) > b.(int64) {
return 1, nil
} else if int64(a.(uint8)) < b.(int64) {
return -1, nil
} else {
return 0, nil
}
case uint8:
if a.(uint8) > b.(uint8) {
return 1, nil
} else if a.(uint8) < b.(uint8) {
return -1, nil
} else {
return 0, nil
}
case float64:
if float64(a.(uint8)) > b.(float64) {
return 1, nil
} else if float64(a.(uint8)) < b.(float64) {
return -1, nil
} else {
return 0, nil
}
}
case float64:
switch b.(type) {
case int:
if a.(float64) > float64(b.(int)) {
return 1, nil
} else if a.(float64) < float64(b.(int)) {
return -1, nil
} else {
return 0, nil
}
case int64:
if a.(float64) > float64(b.(int64)) {
return 1, nil
} else if a.(float64) < float64(b.(int64)) {
return -1, nil
} else {
return 0, nil
}
case uint8:
if a.(float64) > float64(b.(uint8)) {
return 1, nil
} else if a.(float64) < float64(b.(uint8)) {
return -1, nil
} else {
return 0, nil
}
case float64:
if a.(float64) > b.(float64) {
return 1, nil
} else if a.(float64) < b.(float64) {
return -1, nil
} else {
return 0, nil
}
}
}
return 0, errors.New(fmt.Sprintf("Error comparing numbers %#v (%v) and %#v (%v)", a, reflect.TypeOf(a).String(), b, reflect.TypeOf(b).String()))
} | pkg/dfl/CompareNumbers.go | 0.539954 | 0.415314 | CompareNumbers.go | starcoder |
package common
import (
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"math/big"
"github.com/33cn/chain33/common/crypto/sha3"
"golang.org/x/crypto/ripemd160"
)
const (
hashLength = 32
)
//Hash hash
type Hash [hashLength]byte
//BytesToHash []byte -> hash
func BytesToHash(b []byte) Hash {
var h Hash
h.SetBytes(b)
return h
}
//StringToHash string -> hash
func StringToHash(s string) Hash { return BytesToHash([]byte(s)) }
//BigToHash *big.Int -> hash
func BigToHash(b *big.Int) Hash { return BytesToHash(b.Bytes()) }
//HexToHash hex -> hash
func HexToHash(s string) Hash {
b, _ := FromHex(s)
return BytesToHash(b)
}
//Str Get the string representation of the underlying hash
func (h Hash) Str() string { return string(h[:]) }
//Bytes Get the []byte representation of the underlying hash
func (h Hash) Bytes() []byte { return h[:] }
//Hex Get the hex representation of the underlying hash
func (h Hash) Hex() string { return hexEncode(h[:]) }
// TerminalString implements log.TerminalStringer, formatting a string for console
// output during logging.
func (h Hash) TerminalString() string {
return fmt.Sprintf("%x…%x", h[:3], h[29:])
}
// String implements the stringer interface and is used also by the logger when
// doing full logging into a file.
func (h Hash) String() string {
return h.Hex()
}
// Format implements fmt.Formatter, forcing the byte slice to be formatted as is,
// without going through the stringer interface used for logging.
func (h Hash) Format(s fmt.State, c rune) {
fmt.Fprintf(s, "%"+string(c), h[:])
}
//SetBytes Sets the hash to the value of b. If b is larger than len(h), 'b' will be cropped (from the left).
func (h *Hash) SetBytes(b []byte) {
if len(b) > len(h) {
b = b[len(b)-hashLength:]
}
copy(h[hashLength-len(b):], b)
}
//SetString Set string `s` to h. If s is larger than len(h) s will be cropped (from left) to fit.
func (h *Hash) SetString(s string) { h.SetBytes([]byte(s)) }
//Set Sets h to other
func (h *Hash) Set(other Hash) {
for i, v := range other {
h[i] = v
}
}
//EmptyHash hash是否为空
func EmptyHash(h Hash) bool {
return h == Hash{}
}
func hexEncode(b []byte) string {
enc := make([]byte, len(b)*2+2)
copy(enc, "0x")
hex.Encode(enc[2:], b)
return string(enc)
}
//ToHex []byte -> hex
func ToHex(b []byte) string {
hex := Bytes2Hex(b)
// Prefer output of "0x0" instead of "0x"
if len(hex) == 0 {
return ""
}
return "0x" + hex
}
//HashHex []byte -> hex
func HashHex(d []byte) string {
var buf [64]byte
hex.Encode(buf[:], d)
return string(buf[:])
}
//FromHex hex -> []byte
func FromHex(s string) ([]byte, error) {
if len(s) > 1 {
if s[0:2] == "0x" || s[0:2] == "0X" {
s = s[2:]
}
if len(s)%2 == 1 {
s = "0" + s
}
return Hex2Bytes(s)
}
return []byte{}, nil
}
// CopyBytes Returns an exact copy of the provided bytes
func CopyBytes(b []byte) (copiedBytes []byte) {
if b == nil {
return nil
}
copiedBytes = make([]byte, len(b))
copy(copiedBytes, b)
return
}
//HasHexPrefix 是否包含0x前缀
func HasHexPrefix(str string) bool {
l := len(str)
return l >= 2 && str[0:2] == "0x"
}
//IsHex 是否是hex字符串
func IsHex(str string) bool {
l := len(str)
return l >= 4 && l%2 == 0 && str[0:2] == "0x"
}
//Bytes2Hex []byte -> hex
func Bytes2Hex(d []byte) string {
return hex.EncodeToString(d)
}
//Sha256 加密
func Sha256(b []byte) []byte {
data := sha256.Sum256(b)
return data[:]
}
//ShaKeccak256 加密
func ShaKeccak256(b []byte) []byte {
data := sha3.KeccakSum256(b)
return data[:]
}
//Hex2Bytes hex -> []byte
func Hex2Bytes(str string) ([]byte, error) {
return hex.DecodeString(str)
}
func sha2Hash(b []byte, out []byte) {
s := sha256.New()
s.Write(b[:])
tmp := s.Sum(nil)
s.Reset()
s.Write(tmp)
copy(out[:], s.Sum(nil))
}
// Sha2Sum Returns hash: SHA256( SHA256( data ) )
// Where possible, using ShaHash() should be a bit faster
func Sha2Sum(b []byte) (out [32]byte) {
sha2Hash(b, out[:])
return
}
func rimpHash(in []byte, out []byte) {
sha := sha256.New()
sha.Write(in)
rim := ripemd160.New()
rim.Write(sha.Sum(nil)[:])
copy(out, rim.Sum(nil))
}
// Rimp160AfterSha256 Returns hash: RIMP160( SHA256( data ) )
// Where possible, using RimpHash() should be a bit faster
func Rimp160AfterSha256(b []byte) (out [20]byte) {
rimpHash(b, out[:])
return
}
//RandKey 随机key
func RandKey() (ret [32]byte) {
_, err := io.ReadFull(rand.Reader, ret[:])
if err != nil {
panic(err)
}
return
} | vendor/github.com/33cn/chain33/common/hash.go | 0.634543 | 0.431464 | hash.go | starcoder |
package syntaxtree
import (
"bytes"
"strings"
"github.com/manishmeganathan/tunalang/lexer"
)
// A structure that represents an Identifier literal
type Identifier struct {
// Represents the lexological token 'IDENT'
Token lexer.Token
// Represents the identifier name
Value string
}
// A method of Identifier to satisfy the Expression interface
func (i *Identifier) expressionNode() {}
// A method of Identifier that returns its token literal value
func (i *Identifier) TokenLiteral() string { return i.Token.Literal }
// A method of Identifier that returns its string representation
func (i *Identifier) String() string { return i.Value }
// A structure that represents an Integer literal
type IntegerLiteral struct {
// Represents the lexological token 'INT'
Token lexer.Token
// Represents the integer value
Value int64
}
// A method of IntegerLiteral to satisfy the Expression interface
func (il *IntegerLiteral) expressionNode() {}
// A method of IntegerLiteral that returns its token literal value
func (il *IntegerLiteral) TokenLiteral() string { return il.Token.Literal }
// A method of IntegerLiteral that returns its string representation
func (il *IntegerLiteral) String() string { return il.Token.Literal }
// A structure that represents a Boolean literal
type BooleanLiteral struct {
// Represents the lexological token 'TRUE'/'FALSE'
Token lexer.Token
// Represents the boolean value
Value bool
}
// A method of BooleanLiteral to satisfy the Expression interface
func (b *BooleanLiteral) expressionNode() {}
// A method of BooleanLiteral that returns its token literal value
func (b *BooleanLiteral) TokenLiteral() string { return b.Token.Literal }
// A method of BooleanLiteral that returns its string representation
func (b *BooleanLiteral) String() string { return b.Token.Literal }
// A structure that represents an String literal
type StringLiteral struct {
// Represents the lexological token 'STRING'
Token lexer.Token
// Represents the string value
Value string
}
// A method of StringLiteral to satisfy the Expression interface
func (il *StringLiteral) expressionNode() {}
// A method of StringLiteral that returns its token literal value
func (il *StringLiteral) TokenLiteral() string { return il.Token.Literal }
// A method of StringLiteral that returns its string representation
func (il *StringLiteral) String() string { return il.Token.Literal }
// A structure that represents a Function literal
type FunctionLiteral struct {
// Represents the lexological token 'FN'
Token lexer.Token
// Represent the list of function parameters
Parameters []*Identifier
// Represents the block of statements in the function
Body *BlockStatement
}
// A method of FunctionLiteral to satisfy the Expression interface
func (fl *FunctionLiteral) expressionNode() {}
// A method of FunctionLiteral that returns its token literal value
func (fl *FunctionLiteral) TokenLiteral() string { return fl.Token.Literal }
// A method of FunctionLiteral that returns its string representation
func (fl *FunctionLiteral) String() string {
// Declare a bytes buffer
var out bytes.Buffer
// Initialize the parameter list
params := []string{}
// Iterate over the parameters of the fn literal
for _, p := range fl.Parameters {
// Add parameter to the list
params = append(params, p.String())
}
// Start function with the 'FN' token
out.WriteString(fl.TokenLiteral())
// Add the function parameters
out.WriteString("(")
out.WriteString(strings.Join(params, ", "))
out.WriteString(") ")
// Add the function block of code
out.WriteString(fl.Body.String())
// Return the string from the buffer
return out.String()
}
// A structure that represents a List literal
type ListLiteral struct {
// Represents the lexological token '['
Token lexer.Token
// Represents the slice of list elements
Elements []Expression
}
// A method of ListLiteral to satisfy the Expression interface
func (ll *ListLiteral) expressionNode() {}
// A method of ListLiteral that returns its token literal value
func (ll *ListLiteral) TokenLiteral() string { return ll.Token.Literal }
// A method of ListLiteral that returns its string representation
func (ll *ListLiteral) String() string {
// Declare a bytes buffer
var out bytes.Buffer
// Declare an empty slice
elements := []string{}
// Iterate over the elements of the list literal
for _, el := range ll.Elements {
// Add element to the list
elements = append(elements, el.String())
}
// Start list with the '[' token
out.WriteString("[")
// Add the list elements as comma separated values
out.WriteString(strings.Join(elements, ", "))
// Add the ']' token
out.WriteString("]")
// Return the string from the buffer
return out.String()
}
// A structure that represents a Map literal
type MapLiteral struct {
// Represents the lexological token '{'
Token lexer.Token
// Represents the key-value pairs of the mapping
Pairs map[Expression]Expression
}
// A method of MapLiteral to satisfy the Expression interface
func (ml *MapLiteral) expressionNode() {}
// A method of MapLiteral that returns its token literal value
func (ml *MapLiteral) TokenLiteral() string { return ml.Token.Literal }
// A method of MapLiteral that returns its string representation
func (ml *MapLiteral) String() string {
// Declare a bytes buffer
var out bytes.Buffer
// Declare an empty slice
pairs := []string{}
// Iterate over the key-value pairs of the map literal
for key, value := range ml.Pairs {
// Add pair to the list
pairs = append(pairs, key.String()+":"+value.String())
}
// Start map with the '{' token
out.WriteString("{")
out.WriteString(strings.Join(pairs, ", "))
out.WriteString("}")
// Return the string from the buffer
return out.String()
} | syntaxtree/literals.go | 0.832373 | 0.428054 | literals.go | starcoder |
package openapi
import (
"time"
"encoding/json"
)
// NullableClass struct for NullableClass
type NullableClass struct {
IntegerProp *int32 `json:"integer_prop,omitempty"`
isExplicitNullIntegerProp bool `json:"-"`
NumberProp *float32 `json:"number_prop,omitempty"`
isExplicitNullNumberProp bool `json:"-"`
BooleanProp *bool `json:"boolean_prop,omitempty"`
isExplicitNullBooleanProp bool `json:"-"`
StringProp *string `json:"string_prop,omitempty"`
isExplicitNullStringProp bool `json:"-"`
DateProp *string `json:"date_prop,omitempty"`
isExplicitNullDateProp bool `json:"-"`
DatetimeProp *time.Time `json:"datetime_prop,omitempty"`
isExplicitNullDatetimeProp bool `json:"-"`
ArrayNullableProp *[]map[string]interface{} `json:"array_nullable_prop,omitempty"`
isExplicitNullArrayNullableProp bool `json:"-"`
ArrayAndItemsNullableProp *[]map[string]interface{} `json:"array_and_items_nullable_prop,omitempty"`
isExplicitNullArrayAndItemsNullableProp bool `json:"-"`
ArrayItemsNullable *[]map[string]interface{} `json:"array_items_nullable,omitempty"`
ObjectNullableProp *map[string]map[string]interface{} `json:"object_nullable_prop,omitempty"`
isExplicitNullObjectNullableProp bool `json:"-"`
ObjectAndItemsNullableProp *map[string]map[string]interface{} `json:"object_and_items_nullable_prop,omitempty"`
isExplicitNullObjectAndItemsNullableProp bool `json:"-"`
ObjectItemsNullable *map[string]map[string]interface{} `json:"object_items_nullable,omitempty"`
}
// GetIntegerProp returns the IntegerProp field if non-nil, zero value otherwise.
func (o *NullableClass) GetIntegerProp() int32 {
if o == nil || o.IntegerProp == nil {
var ret int32
return ret
}
return *o.IntegerProp
}
// GetIntegerPropOk returns a tuple with the IntegerProp field if it's non-nil, zero value otherwise
// and a boolean to check if the value has been set.
func (o *NullableClass) GetIntegerPropOk() (int32, bool) {
if o == nil || o.IntegerProp == nil {
var ret int32
return ret, false
}
return *o.IntegerProp, true
}
// HasIntegerProp returns a boolean if a field has been set.
func (o *NullableClass) HasIntegerProp() bool {
if o != nil && o.IntegerProp != nil {
return true
}
return false
}
// SetIntegerProp gets a reference to the given int32 and assigns it to the IntegerProp field.
func (o *NullableClass) SetIntegerProp(v int32) {
o.IntegerProp = &v
}
// SetIntegerPropExplicitNull (un)sets IntegerProp to be considered as explicit "null" value
// when serializing to JSON (pass true as argument to set this, false to unset)
// The IntegerProp value is set to nil even if false is passed
func (o *NullableClass) SetIntegerPropExplicitNull(b bool) {
o.IntegerProp = nil
o.isExplicitNullIntegerProp = b
}
// GetNumberProp returns the NumberProp field if non-nil, zero value otherwise.
func (o *NullableClass) GetNumberProp() float32 {
if o == nil || o.NumberProp == nil {
var ret float32
return ret
}
return *o.NumberProp
}
// GetNumberPropOk returns a tuple with the NumberProp field if it's non-nil, zero value otherwise
// and a boolean to check if the value has been set.
func (o *NullableClass) GetNumberPropOk() (float32, bool) {
if o == nil || o.NumberProp == nil {
var ret float32
return ret, false
}
return *o.NumberProp, true
}
// HasNumberProp returns a boolean if a field has been set.
func (o *NullableClass) HasNumberProp() bool {
if o != nil && o.NumberProp != nil {
return true
}
return false
}
// SetNumberProp gets a reference to the given float32 and assigns it to the NumberProp field.
func (o *NullableClass) SetNumberProp(v float32) {
o.NumberProp = &v
}
// SetNumberPropExplicitNull (un)sets NumberProp to be considered as explicit "null" value
// when serializing to JSON (pass true as argument to set this, false to unset)
// The NumberProp value is set to nil even if false is passed
func (o *NullableClass) SetNumberPropExplicitNull(b bool) {
o.NumberProp = nil
o.isExplicitNullNumberProp = b
}
// GetBooleanProp returns the BooleanProp field if non-nil, zero value otherwise.
func (o *NullableClass) GetBooleanProp() bool {
if o == nil || o.BooleanProp == nil {
var ret bool
return ret
}
return *o.BooleanProp
}
// GetBooleanPropOk returns a tuple with the BooleanProp field if it's non-nil, zero value otherwise
// and a boolean to check if the value has been set.
func (o *NullableClass) GetBooleanPropOk() (bool, bool) {
if o == nil || o.BooleanProp == nil {
var ret bool
return ret, false
}
return *o.BooleanProp, true
}
// HasBooleanProp returns a boolean if a field has been set.
func (o *NullableClass) HasBooleanProp() bool {
if o != nil && o.BooleanProp != nil {
return true
}
return false
}
// SetBooleanProp gets a reference to the given bool and assigns it to the BooleanProp field.
func (o *NullableClass) SetBooleanProp(v bool) {
o.BooleanProp = &v
}
// SetBooleanPropExplicitNull (un)sets BooleanProp to be considered as explicit "null" value
// when serializing to JSON (pass true as argument to set this, false to unset)
// The BooleanProp value is set to nil even if false is passed
func (o *NullableClass) SetBooleanPropExplicitNull(b bool) {
o.BooleanProp = nil
o.isExplicitNullBooleanProp = b
}
// GetStringProp returns the StringProp field if non-nil, zero value otherwise.
func (o *NullableClass) GetStringProp() string {
if o == nil || o.StringProp == nil {
var ret string
return ret
}
return *o.StringProp
}
// GetStringPropOk returns a tuple with the StringProp field if it's non-nil, zero value otherwise
// and a boolean to check if the value has been set.
func (o *NullableClass) GetStringPropOk() (string, bool) {
if o == nil || o.StringProp == nil {
var ret string
return ret, false
}
return *o.StringProp, true
}
// HasStringProp returns a boolean if a field has been set.
func (o *NullableClass) HasStringProp() bool {
if o != nil && o.StringProp != nil {
return true
}
return false
}
// SetStringProp gets a reference to the given string and assigns it to the StringProp field.
func (o *NullableClass) SetStringProp(v string) {
o.StringProp = &v
}
// SetStringPropExplicitNull (un)sets StringProp to be considered as explicit "null" value
// when serializing to JSON (pass true as argument to set this, false to unset)
// The StringProp value is set to nil even if false is passed
func (o *NullableClass) SetStringPropExplicitNull(b bool) {
o.StringProp = nil
o.isExplicitNullStringProp = b
}
// GetDateProp returns the DateProp field if non-nil, zero value otherwise.
func (o *NullableClass) GetDateProp() string {
if o == nil || o.DateProp == nil {
var ret string
return ret
}
return *o.DateProp
}
// GetDatePropOk returns a tuple with the DateProp field if it's non-nil, zero value otherwise
// and a boolean to check if the value has been set.
func (o *NullableClass) GetDatePropOk() (string, bool) {
if o == nil || o.DateProp == nil {
var ret string
return ret, false
}
return *o.DateProp, true
}
// HasDateProp returns a boolean if a field has been set.
func (o *NullableClass) HasDateProp() bool {
if o != nil && o.DateProp != nil {
return true
}
return false
}
// SetDateProp gets a reference to the given string and assigns it to the DateProp field.
func (o *NullableClass) SetDateProp(v string) {
o.DateProp = &v
}
// SetDatePropExplicitNull (un)sets DateProp to be considered as explicit "null" value
// when serializing to JSON (pass true as argument to set this, false to unset)
// The DateProp value is set to nil even if false is passed
func (o *NullableClass) SetDatePropExplicitNull(b bool) {
o.DateProp = nil
o.isExplicitNullDateProp = b
}
// GetDatetimeProp returns the DatetimeProp field if non-nil, zero value otherwise.
func (o *NullableClass) GetDatetimeProp() time.Time {
if o == nil || o.DatetimeProp == nil {
var ret time.Time
return ret
}
return *o.DatetimeProp
}
// GetDatetimePropOk returns a tuple with the DatetimeProp field if it's non-nil, zero value otherwise
// and a boolean to check if the value has been set.
func (o *NullableClass) GetDatetimePropOk() (time.Time, bool) {
if o == nil || o.DatetimeProp == nil {
var ret time.Time
return ret, false
}
return *o.DatetimeProp, true
}
// HasDatetimeProp returns a boolean if a field has been set.
func (o *NullableClass) HasDatetimeProp() bool {
if o != nil && o.DatetimeProp != nil {
return true
}
return false
}
// SetDatetimeProp gets a reference to the given time.Time and assigns it to the DatetimeProp field.
func (o *NullableClass) SetDatetimeProp(v time.Time) {
o.DatetimeProp = &v
}
// SetDatetimePropExplicitNull (un)sets DatetimeProp to be considered as explicit "null" value
// when serializing to JSON (pass true as argument to set this, false to unset)
// The DatetimeProp value is set to nil even if false is passed
func (o *NullableClass) SetDatetimePropExplicitNull(b bool) {
o.DatetimeProp = nil
o.isExplicitNullDatetimeProp = b
}
// GetArrayNullableProp returns the ArrayNullableProp field if non-nil, zero value otherwise.
func (o *NullableClass) GetArrayNullableProp() []map[string]interface{} {
if o == nil || o.ArrayNullableProp == nil {
var ret []map[string]interface{}
return ret
}
return *o.ArrayNullableProp
}
// GetArrayNullablePropOk returns a tuple with the ArrayNullableProp field if it's non-nil, zero value otherwise
// and a boolean to check if the value has been set.
func (o *NullableClass) GetArrayNullablePropOk() ([]map[string]interface{}, bool) {
if o == nil || o.ArrayNullableProp == nil {
var ret []map[string]interface{}
return ret, false
}
return *o.ArrayNullableProp, true
}
// HasArrayNullableProp returns a boolean if a field has been set.
func (o *NullableClass) HasArrayNullableProp() bool {
if o != nil && o.ArrayNullableProp != nil {
return true
}
return false
}
// SetArrayNullableProp gets a reference to the given []map[string]interface{} and assigns it to the ArrayNullableProp field.
func (o *NullableClass) SetArrayNullableProp(v []map[string]interface{}) {
o.ArrayNullableProp = &v
}
// SetArrayNullablePropExplicitNull (un)sets ArrayNullableProp to be considered as explicit "null" value
// when serializing to JSON (pass true as argument to set this, false to unset)
// The ArrayNullableProp value is set to nil even if false is passed
func (o *NullableClass) SetArrayNullablePropExplicitNull(b bool) {
o.ArrayNullableProp = nil
o.isExplicitNullArrayNullableProp = b
}
// GetArrayAndItemsNullableProp returns the ArrayAndItemsNullableProp field if non-nil, zero value otherwise.
func (o *NullableClass) GetArrayAndItemsNullableProp() []map[string]interface{} {
if o == nil || o.ArrayAndItemsNullableProp == nil {
var ret []map[string]interface{}
return ret
}
return *o.ArrayAndItemsNullableProp
}
// GetArrayAndItemsNullablePropOk returns a tuple with the ArrayAndItemsNullableProp field if it's non-nil, zero value otherwise
// and a boolean to check if the value has been set.
func (o *NullableClass) GetArrayAndItemsNullablePropOk() ([]map[string]interface{}, bool) {
if o == nil || o.ArrayAndItemsNullableProp == nil {
var ret []map[string]interface{}
return ret, false
}
return *o.ArrayAndItemsNullableProp, true
}
// HasArrayAndItemsNullableProp returns a boolean if a field has been set.
func (o *NullableClass) HasArrayAndItemsNullableProp() bool {
if o != nil && o.ArrayAndItemsNullableProp != nil {
return true
}
return false
}
// SetArrayAndItemsNullableProp gets a reference to the given []map[string]interface{} and assigns it to the ArrayAndItemsNullableProp field.
func (o *NullableClass) SetArrayAndItemsNullableProp(v []map[string]interface{}) {
o.ArrayAndItemsNullableProp = &v
}
// SetArrayAndItemsNullablePropExplicitNull (un)sets ArrayAndItemsNullableProp to be considered as explicit "null" value
// when serializing to JSON (pass true as argument to set this, false to unset)
// The ArrayAndItemsNullableProp value is set to nil even if false is passed
func (o *NullableClass) SetArrayAndItemsNullablePropExplicitNull(b bool) {
o.ArrayAndItemsNullableProp = nil
o.isExplicitNullArrayAndItemsNullableProp = b
}
// GetArrayItemsNullable returns the ArrayItemsNullable field if non-nil, zero value otherwise.
func (o *NullableClass) GetArrayItemsNullable() []map[string]interface{} {
if o == nil || o.ArrayItemsNullable == nil {
var ret []map[string]interface{}
return ret
}
return *o.ArrayItemsNullable
}
// GetArrayItemsNullableOk returns a tuple with the ArrayItemsNullable field if it's non-nil, zero value otherwise
// and a boolean to check if the value has been set.
func (o *NullableClass) GetArrayItemsNullableOk() ([]map[string]interface{}, bool) {
if o == nil || o.ArrayItemsNullable == nil {
var ret []map[string]interface{}
return ret, false
}
return *o.ArrayItemsNullable, true
}
// HasArrayItemsNullable returns a boolean if a field has been set.
func (o *NullableClass) HasArrayItemsNullable() bool {
if o != nil && o.ArrayItemsNullable != nil {
return true
}
return false
}
// SetArrayItemsNullable gets a reference to the given []map[string]interface{} and assigns it to the ArrayItemsNullable field.
func (o *NullableClass) SetArrayItemsNullable(v []map[string]interface{}) {
o.ArrayItemsNullable = &v
}
// GetObjectNullableProp returns the ObjectNullableProp field if non-nil, zero value otherwise.
func (o *NullableClass) GetObjectNullableProp() map[string]map[string]interface{} {
if o == nil || o.ObjectNullableProp == nil {
var ret map[string]map[string]interface{}
return ret
}
return *o.ObjectNullableProp
}
// GetObjectNullablePropOk returns a tuple with the ObjectNullableProp field if it's non-nil, zero value otherwise
// and a boolean to check if the value has been set.
func (o *NullableClass) GetObjectNullablePropOk() (map[string]map[string]interface{}, bool) {
if o == nil || o.ObjectNullableProp == nil {
var ret map[string]map[string]interface{}
return ret, false
}
return *o.ObjectNullableProp, true
}
// HasObjectNullableProp returns a boolean if a field has been set.
func (o *NullableClass) HasObjectNullableProp() bool {
if o != nil && o.ObjectNullableProp != nil {
return true
}
return false
}
// SetObjectNullableProp gets a reference to the given map[string]map[string]interface{} and assigns it to the ObjectNullableProp field.
func (o *NullableClass) SetObjectNullableProp(v map[string]map[string]interface{}) {
o.ObjectNullableProp = &v
}
// SetObjectNullablePropExplicitNull (un)sets ObjectNullableProp to be considered as explicit "null" value
// when serializing to JSON (pass true as argument to set this, false to unset)
// The ObjectNullableProp value is set to nil even if false is passed
func (o *NullableClass) SetObjectNullablePropExplicitNull(b bool) {
o.ObjectNullableProp = nil
o.isExplicitNullObjectNullableProp = b
}
// GetObjectAndItemsNullableProp returns the ObjectAndItemsNullableProp field if non-nil, zero value otherwise.
func (o *NullableClass) GetObjectAndItemsNullableProp() map[string]map[string]interface{} {
if o == nil || o.ObjectAndItemsNullableProp == nil {
var ret map[string]map[string]interface{}
return ret
}
return *o.ObjectAndItemsNullableProp
}
// GetObjectAndItemsNullablePropOk returns a tuple with the ObjectAndItemsNullableProp field if it's non-nil, zero value otherwise
// and a boolean to check if the value has been set.
func (o *NullableClass) GetObjectAndItemsNullablePropOk() (map[string]map[string]interface{}, bool) {
if o == nil || o.ObjectAndItemsNullableProp == nil {
var ret map[string]map[string]interface{}
return ret, false
}
return *o.ObjectAndItemsNullableProp, true
}
// HasObjectAndItemsNullableProp returns a boolean if a field has been set.
func (o *NullableClass) HasObjectAndItemsNullableProp() bool {
if o != nil && o.ObjectAndItemsNullableProp != nil {
return true
}
return false
}
// SetObjectAndItemsNullableProp gets a reference to the given map[string]map[string]interface{} and assigns it to the ObjectAndItemsNullableProp field.
func (o *NullableClass) SetObjectAndItemsNullableProp(v map[string]map[string]interface{}) {
o.ObjectAndItemsNullableProp = &v
}
// SetObjectAndItemsNullablePropExplicitNull (un)sets ObjectAndItemsNullableProp to be considered as explicit "null" value
// when serializing to JSON (pass true as argument to set this, false to unset)
// The ObjectAndItemsNullableProp value is set to nil even if false is passed
func (o *NullableClass) SetObjectAndItemsNullablePropExplicitNull(b bool) {
o.ObjectAndItemsNullableProp = nil
o.isExplicitNullObjectAndItemsNullableProp = b
}
// GetObjectItemsNullable returns the ObjectItemsNullable field if non-nil, zero value otherwise.
func (o *NullableClass) GetObjectItemsNullable() map[string]map[string]interface{} {
if o == nil || o.ObjectItemsNullable == nil {
var ret map[string]map[string]interface{}
return ret
}
return *o.ObjectItemsNullable
}
// GetObjectItemsNullableOk returns a tuple with the ObjectItemsNullable field if it's non-nil, zero value otherwise
// and a boolean to check if the value has been set.
func (o *NullableClass) GetObjectItemsNullableOk() (map[string]map[string]interface{}, bool) {
if o == nil || o.ObjectItemsNullable == nil {
var ret map[string]map[string]interface{}
return ret, false
}
return *o.ObjectItemsNullable, true
}
// HasObjectItemsNullable returns a boolean if a field has been set.
func (o *NullableClass) HasObjectItemsNullable() bool {
if o != nil && o.ObjectItemsNullable != nil {
return true
}
return false
}
// SetObjectItemsNullable gets a reference to the given map[string]map[string]interface{} and assigns it to the ObjectItemsNullable field.
func (o *NullableClass) SetObjectItemsNullable(v map[string]map[string]interface{}) {
o.ObjectItemsNullable = &v
}
// MarshalJSON returns the JSON representation of the model.
func (o NullableClass) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.IntegerProp == nil {
if o.isExplicitNullIntegerProp {
toSerialize["integer_prop"] = o.IntegerProp
}
} else {
toSerialize["integer_prop"] = o.IntegerProp
}
if o.NumberProp == nil {
if o.isExplicitNullNumberProp {
toSerialize["number_prop"] = o.NumberProp
}
} else {
toSerialize["number_prop"] = o.NumberProp
}
if o.BooleanProp == nil {
if o.isExplicitNullBooleanProp {
toSerialize["boolean_prop"] = o.BooleanProp
}
} else {
toSerialize["boolean_prop"] = o.BooleanProp
}
if o.StringProp == nil {
if o.isExplicitNullStringProp {
toSerialize["string_prop"] = o.StringProp
}
} else {
toSerialize["string_prop"] = o.StringProp
}
if o.DateProp == nil {
if o.isExplicitNullDateProp {
toSerialize["date_prop"] = o.DateProp
}
} else {
toSerialize["date_prop"] = o.DateProp
}
if o.DatetimeProp == nil {
if o.isExplicitNullDatetimeProp {
toSerialize["datetime_prop"] = o.DatetimeProp
}
} else {
toSerialize["datetime_prop"] = o.DatetimeProp
}
if o.ArrayNullableProp == nil {
if o.isExplicitNullArrayNullableProp {
toSerialize["array_nullable_prop"] = o.ArrayNullableProp
}
} else {
toSerialize["array_nullable_prop"] = o.ArrayNullableProp
}
if o.ArrayAndItemsNullableProp == nil {
if o.isExplicitNullArrayAndItemsNullableProp {
toSerialize["array_and_items_nullable_prop"] = o.ArrayAndItemsNullableProp
}
} else {
toSerialize["array_and_items_nullable_prop"] = o.ArrayAndItemsNullableProp
}
if o.ArrayItemsNullable != nil {
toSerialize["array_items_nullable"] = o.ArrayItemsNullable
}
if o.ObjectNullableProp == nil {
if o.isExplicitNullObjectNullableProp {
toSerialize["object_nullable_prop"] = o.ObjectNullableProp
}
} else {
toSerialize["object_nullable_prop"] = o.ObjectNullableProp
}
if o.ObjectAndItemsNullableProp == nil {
if o.isExplicitNullObjectAndItemsNullableProp {
toSerialize["object_and_items_nullable_prop"] = o.ObjectAndItemsNullableProp
}
} else {
toSerialize["object_and_items_nullable_prop"] = o.ObjectAndItemsNullableProp
}
if o.ObjectItemsNullable != nil {
toSerialize["object_items_nullable"] = o.ObjectItemsNullable
}
return json.Marshal(toSerialize)
} | samples/openapi3/client/petstore/go-experimental/go-petstore/model_nullable_class.go | 0.776962 | 0.428473 | model_nullable_class.go | starcoder |
package helper
import (
"encoding/json"
"fmt"
"math"
"math/rand"
"strings"
"time"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
// RandomGenerator is delegated to generate random without call seed every time
type RandomGenerator struct {
randomizer *rand.Rand
}
// InitRandomizer initialize a new RandomGenerator
func InitRandomizer() RandomGenerator {
var random RandomGenerator
random.randomizer = rand.New(rand.NewSource(time.Now().UnixNano()))
return random
}
// RandomInt initialize a new seed using the UNIX Nano time and return an integer between the 2 input value
func (rander RandomGenerator) RandomInt(min, max int) int {
return rander.randomizer.Intn(max-min) + min
}
// RandomInt32 initialize a new seed using the UNIX Nano time and return an integer between the 2 input value
func (rander RandomGenerator) RandomInt32(min, max int32) int32 {
return rander.randomizer.Int31n(max-min) + min
}
// RandomInt64 initialize a new seed using the UNIX Nano time and return an integer between the 2 input value
func (rander RandomGenerator) RandomInt64(min, max int64) int64 {
return rander.randomizer.Int63n(max-min) + min
}
// RandomFloat32 initialize a new seed using the UNIX Nano time and return a float32 between the 2 input value
func (rander RandomGenerator) RandomFloat32(min, max float32) float32 {
return min + rander.randomizer.Float32()*(max-min)
}
// RandomFloat64 initialize a new seed using the UNIX Nano time and return a float64 between the 2 input value
func (rander RandomGenerator) RandomFloat64(min, max float64) float64 {
return min + rander.randomizer.Float64()*(max-min)
}
// RandomIntArray return a new array with random number from min to max of length len
func (rander RandomGenerator) RandomIntArray(min, max, length int) []int {
array := make([]int, length)
for i := 0; i < length; i++ {
array[i] = rander.RandomInt(min, max)
}
return array
}
// RandomInt32Array return a new array with random number from min to max of length len
func (rander RandomGenerator) RandomInt32Array(min, max int32, length int) []int32 {
array := make([]int32, length)
for i := 0; i < length; i++ {
array[i] = rander.RandomInt32(min, max)
}
return array
}
// RandomInt64Array return a new array with random number from min to max of length len
func (rander RandomGenerator) RandomInt64Array(min, max int64, length int) []int64 {
array := make([]int64, length)
for i := 0; i < length; i++ {
array[i] = rander.RandomInt64(min, max)
}
return array
}
// RandomFloat32Array return a new array with random number from min to max of length len
func (rander RandomGenerator) RandomFloat32Array(min, max float32, length int) []float32 {
array := make([]float32, length)
for i := 0; i < length; i++ {
array[i] = rander.RandomFloat32(min, max)
}
return array
}
// RandomFloat64Array return a new array with random number from min to max of length len
func (rander RandomGenerator) RandomFloat64Array(min, max float64, length int) []float64 {
array := make([]float64, length)
for i := 0; i < length; i++ {
array[i] = rander.RandomFloat64(min, max)
}
return array
}
// RandomByte is delegated to generate a byte array with the given input length
func RandomByte(length int) []byte {
data := make([]byte, length)
rand.Read(data)
return data
}
// RandomInt initialize a new seed using the UNIX Nano time and return an integer between the 2 input value
func RandomInt(min, max int) int {
return rand.Intn(max-min) + min
}
// RandomInt32 initialize a new seed using the UNIX Nano time and return an integer between the 2 input value
func RandomInt32(min, max int32) int32 {
return rand.Int31n(max-min) + min
}
// RandomInt64 initialize a new seed using the UNIX Nano time and return an integer between the 2 input value
func RandomInt64(min, max int64) int64 {
return rand.Int63n(max-min) + min
}
// RandomFloat64 initialize a new seed using the UNIX Nano time and return a float64 between the 2 input value
func RandomFloat64(min, max float64) float64 {
return min + rand.Float64()*(max-min)
}
// RandomFloat32 initialize a new seed using the UNIX Nano time and return a float32 between the 2 input value
func RandomFloat32(min, max float32) float32 {
return min + rand.Float32()*(max-min)
}
// GenerateSequentialIntArray is delegated to generate an array of sequential number
func GenerateSequentialIntArray(length int) []int {
array := make([]int, length)
for i := 0; i < length; i++ {
array[i] = i
}
return array
}
// GenerateSequentialFloat32Array is delegated to generate an array of sequential number
func GenerateSequentialFloat32Array(length int) []float32 {
array := make([]float32, length)
for i := 0; i < length; i++ {
array[i] = float32(i)
}
return array
}
// ByteCountSI convert the byte in input to MB/KB/TB ecc
func ByteCountSI(b int64) string {
const unit = 1000
if b < unit {
return fmt.Sprintf("%d B", b)
}
div, exp := int64(unit), 0
for n := b / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %cB",
float64(b)/float64(div), "kMGTPE"[exp])
}
// ByteCountIEC convert the byte in input to MB/KB/TB ecc
func ByteCountIEC(b int64) string {
const unit = 1024
if b < unit {
return fmt.Sprintf("%d B", b)
}
div, exp := int64(unit), 0
for n := b / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %ciB",
float64(b)/float64(div), "KMGTPE"[exp])
}
// ConvertSize is delegated to return the dimension related to the input byte size
func ConvertSize(bytes float64, dimension string) float64 {
var value float64
dimension = strings.ToUpper(dimension)
switch dimension {
case "KB", "KILOBYTE":
value = bytes / 1000
case "MB", "MEGABYTE":
value = bytes / math.Pow(1000, 2)
case "GB", "GIGABYTE":
value = bytes / math.Pow(1000, 3)
case "TB", "TERABYTE":
value = bytes / math.Pow(1000, 4)
case "PB", "PETABYTE":
value = bytes / math.Pow(1000, 5)
case "XB", "EXABYTE":
value = bytes / math.Pow(1000, 6)
case "ZB", "ZETTABYTE":
value = bytes / math.Pow(1000, 7)
}
return value
}
func Marshal(data interface{}) string {
if indent, err := json.Marshal(data); err == nil {
return string(indent)
} else {
return fmt.Sprintf("%+v\n", data)
}
}
func MarshalIndent(data interface{}) string {
if indent, err := json.MarshalIndent(data, " ", " "); err == nil {
return string(indent)
} else {
return fmt.Sprintf("%+v\n", data)
}
} | helper/helper.go | 0.706494 | 0.547585 | helper.go | starcoder |
package lib
import (
"gopkg.in/dedis/crypto.v0/abstract"
"gopkg.in/dedis/crypto.v0/cipher"
"gopkg.in/dedis/onet.v1/network"
"strconv"
"strings"
"sync"
)
// Objects
//______________________________________________________________________________________________________________________
// GroupingKey is an ID corresponding to grouping attributes.
type GroupingKey string
// TempID unique ID used in related maps which is used when we split a map in two associated maps.
type TempID uint64
// CipherVectorScalar contains the elements forming precomputed values for shuffling, a CipherVector and the scalars
// corresponding to each element
type CipherVectorScalar struct {
CipherV CipherVector
S []abstract.Scalar
}
// CipherVectorScalarBytes is a CipherVectorScalar in bytes
type CipherVectorScalarBytes struct {
CipherV [][][]byte
S [][]byte
}
// DpClearResponse represents a DP response when data is stored in clear at each server/hospital
type DpClearResponse struct {
WhereClear map[string]int64
WhereEnc map[string]int64
GroupByClear map[string]int64
GroupByEnc map[string]int64
AggregatingAttributesClear map[string]int64
AggregatingAttributesEnc map[string]int64
}
// DpResponse represents an encrypted DP response (as it is sent to a server)
type DpResponse struct {
WhereClear map[string]int64
WhereEnc map[string]CipherText
GroupByClear map[string]int64
GroupByEnc map[string]CipherText
AggregatingAttributesClear map[string]int64
AggregatingAttributesEnc map[string]CipherText
}
// DpResponseToSend is a DpResponse formatted such that it can be sent with protobuf
type DpResponseToSend struct {
WhereClear map[string]int64
WhereEnc map[string][]byte
GroupByClear map[string]int64
GroupByEnc map[string][]byte
AggregatingAttributesClear map[string]int64
AggregatingAttributesEnc map[string][]byte
}
// ProcessResponse is a response in the format used for shuffling and det tag
type ProcessResponse struct {
WhereEnc CipherVector
GroupByEnc CipherVector
AggregatingAttributes CipherVector
}
// WhereQueryAttribute is the name and encrypted value of a where attribute in the query
type WhereQueryAttribute struct {
Name string
Value CipherText
}
// WhereQueryAttributeTagged is WhereQueryAttributes deterministically tagged
type WhereQueryAttributeTagged struct {
Name string
Value GroupingKey
}
// ProcessResponseDet represents a DP response associated to a det. hash
type ProcessResponseDet struct {
PR ProcessResponse
DetTagGroupBy GroupingKey
DetTagWhere []GroupingKey
}
// FilteredResponseDet is a FilteredResponse with its deterministic tag
type FilteredResponseDet struct {
DetTagGroupBy GroupingKey
Fr FilteredResponse
}
// FilteredResponse is a response after the filtering step of the proto and until the end
type FilteredResponse struct {
GroupByEnc CipherVector
AggregatingAttributes CipherVector
}
// Functions
//______________________________________________________________________________________________________________________
// NewFilteredResponse creates a new client response with chosen grouping and aggregating number of attributes
func NewFilteredResponse(grpEncSize, attrSize int) FilteredResponse {
return FilteredResponse{*NewCipherVector(grpEncSize), *NewCipherVector(attrSize)}
}
// GroupingKey
//______________________________________________________________________________________________________________________
// Key allows to transform non-encrypted grouping attributes to a tag (groupingkey)
func Key(ga []int64) GroupingKey {
var key []string
for _, a := range ga {
key = append(key, strconv.Itoa(int(a)))
key = append(key, ",")
}
return GroupingKey(strings.Join(key, ""))
}
// UnKey permits to go from a tag non-encrypted grouping attributes to grouping attributes
func UnKey(gk GroupingKey) []int64 {
tab := make([]int64, 0)
count := 0
nbrString := make([]string, 1)
for _, a := range gk {
if a != ',' {
nbrString[0] = string(a)
} else {
tmp, _ := strconv.Atoi(strings.Join(nbrString, ""))
tab = append(tab, int64(tmp))
nbrString = make([]string, 1)
count++
}
}
return tab
}
// ClientResponse
//______________________________________________________________________________________________________________________
// Add permits to add to FilteredResponses
func (cv *FilteredResponse) Add(cv1, cv2 FilteredResponse) *FilteredResponse {
cv.GroupByEnc = cv1.GroupByEnc
cv.AggregatingAttributes.Add(cv1.AggregatingAttributes, cv2.AggregatingAttributes)
return cv
}
// CipherVectorTag computes all the e for a process response based on a seed h
func (cv *ProcessResponse) CipherVectorTag(h abstract.Point) []abstract.Scalar {
aggrAttrLen := len((*cv).AggregatingAttributes)
grpAttrLen := len((*cv).GroupByEnc)
whereAttrLen := len((*cv).WhereEnc)
es := make([]abstract.Scalar, aggrAttrLen+grpAttrLen+whereAttrLen)
seed, _ := h.MarshalBinary()
var wg sync.WaitGroup
if PARALLELIZE {
for i := 0; i < aggrAttrLen+grpAttrLen+whereAttrLen; i = i + VPARALLELIZE {
wg.Add(1)
go func(i int) {
defer wg.Done()
for j := 0; j < VPARALLELIZE && (j+i < aggrAttrLen+grpAttrLen+whereAttrLen); j++ {
es[i+j] = ComputeE(i+j, (*cv), seed, aggrAttrLen, grpAttrLen)
}
}(i)
}
wg.Wait()
} else {
for i := 0; i < aggrAttrLen+grpAttrLen+whereAttrLen; i++ {
//+detAttrLen
es[i] = ComputeE(i, (*cv), seed, aggrAttrLen, grpAttrLen)
}
}
return es
}
// ComputeE computes e used in a shuffle proof. Computation based on a public seed.
func ComputeE(index int, cv ProcessResponse, seed []byte, aggrAttrLen, grpAttrLen int) abstract.Scalar {
var dataC []byte
var dataK []byte
randomCipher := network.Suite.Cipher(seed)
if index < aggrAttrLen {
dataC, _ = cv.AggregatingAttributes[index].C.MarshalBinary()
dataK, _ = cv.AggregatingAttributes[index].K.MarshalBinary()
} else if index < aggrAttrLen+grpAttrLen {
dataC, _ = cv.GroupByEnc[index-aggrAttrLen].C.MarshalBinary()
dataK, _ = cv.GroupByEnc[index-aggrAttrLen].K.MarshalBinary()
} else {
dataC, _ = cv.WhereEnc[index-aggrAttrLen-grpAttrLen].C.MarshalBinary()
dataK, _ = cv.WhereEnc[index-aggrAttrLen-grpAttrLen].K.MarshalBinary()
}
randomCipher.Message(nil, nil, dataC)
randomCipher.Message(nil, nil, dataK)
return network.Suite.Scalar().Pick(randomCipher)
}
// DpClearResponse
//______________________________________________________________________________________________________________________
// EncryptDpClearResponse encrypts a DP response
func EncryptDpClearResponse(ccr DpClearResponse, encryptionKey abstract.Point, count bool) DpResponseToSend {
cr := DpResponseToSend{}
cr.GroupByClear = ccr.GroupByClear
cr.GroupByEnc = make(map[string][]byte, len(ccr.GroupByEnc))
for i, v := range ccr.GroupByEnc {
cr.GroupByEnc[i] = ((*EncryptInt(encryptionKey, v)).ToBytes())
}
//cr.GroupByEnc = *EncryptIntVector(encryptionKey, ccr.GroupByEnc)
cr.WhereClear = ccr.WhereClear
cr.WhereEnc = make(map[string][]byte, len(ccr.WhereEnc))
for i, v := range ccr.WhereEnc {
cr.WhereEnc[i] = ((*EncryptInt(encryptionKey, v)).ToBytes())
}
//cr.WhereEnc = *EncryptIntVector(encryptionKey, ccr.WhereEnc)
cr.AggregatingAttributesClear = ccr.AggregatingAttributesClear
cr.AggregatingAttributesEnc = make(map[string][]byte, len(ccr.AggregatingAttributesEnc))
for i, v := range ccr.AggregatingAttributesEnc {
cr.AggregatingAttributesEnc[i] = ((*EncryptInt(encryptionKey, v)).ToBytes())
}
if count {
cr.AggregatingAttributesEnc["count"] = (*EncryptInt(encryptionKey, int64(1))).ToBytes()
}
return cr
}
// Other random stuff!! :P
//______________________________________________________________________________________________________________________
// CreatePrecomputedRandomize creates precomputed values for shuffling using public key and size parameters
func CreatePrecomputedRandomize(g, h abstract.Point, rand cipher.Stream, lineSize, nbrLines int) []CipherVectorScalar {
result := make([]CipherVectorScalar, nbrLines)
wg := StartParallelize(len(result))
var mutex sync.Mutex
for i := range result {
result[i].CipherV = make(CipherVector, lineSize)
result[i].S = make([]abstract.Scalar, lineSize)
if PARALLELIZE {
go func(i int) {
defer (*wg).Done()
for w := range result[i].CipherV {
mutex.Lock()
tmp := network.Suite.Scalar().Pick(rand)
mutex.Unlock()
result[i].S[w] = tmp
result[i].CipherV[w].K = network.Suite.Point().Mul(g, tmp)
result[i].CipherV[w].C = network.Suite.Point().Mul(h, tmp)
}
}(i)
} else {
for w := range result[i].CipherV {
tmp := network.Suite.Scalar().Pick(rand)
result[i].S[w] = tmp
result[i].CipherV[w].K = network.Suite.Point().Mul(g, tmp)
result[i].CipherV[w].C = network.Suite.Point().Mul(h, tmp)
}
}
}
EndParallelize(wg)
return result
}
// Conversion
//______________________________________________________________________________________________________________________
// ToBytes converts a Filtered to a byte array
func (cv *FilteredResponse) ToBytes() ([]byte, int, int) {
b := make([]byte, 0)
pgaeb := make([]byte, 0)
pgaebLength := 0
aab, aabLength := (*cv).AggregatingAttributes.ToBytes()
if (*cv).GroupByEnc != nil {
pgaeb, pgaebLength = (*cv).GroupByEnc.ToBytes()
}
b = append(b, aab...)
b = append(b, pgaeb...)
return b, pgaebLength, aabLength
}
// FromBytes converts a byte array to a FilteredResponse. Note that you need to create the (empty) object beforehand.
func (cv *FilteredResponse) FromBytes(data []byte, aabLength, pgaebLength int) {
(*cv).AggregatingAttributes = make(CipherVector, aabLength)
(*cv).GroupByEnc = make(CipherVector, pgaebLength)
aabByteLength := (aabLength * 64) //CAREFUL: hardcoded 64 (size of el-gamal element C,K)
pgaebByteLength := (pgaebLength * 64)
aab := data[:aabByteLength]
pgaeb := data[aabByteLength : aabByteLength+pgaebByteLength]
(*cv).AggregatingAttributes.FromBytes(aab, aabLength)
(*cv).GroupByEnc.FromBytes(pgaeb, pgaebLength)
}
// ToBytes converts a FilteredResponseDet to a byte array
func (crd *FilteredResponseDet) ToBytes() ([]byte, int, int, int) {
b, gacbLength, aabLength := (*crd).Fr.ToBytes()
dtbgb := []byte((*crd).DetTagGroupBy)
dtbgbLength := len(dtbgb)
b = append(b, dtbgb...)
return b, gacbLength, aabLength, dtbgbLength
}
// FromBytes converts a byte array to a FilteredResponseDet. Note that you need to create the (empty) object beforehand.
func (crd *FilteredResponseDet) FromBytes(data []byte, gacbLength, aabLength, dtbgbLength int) {
(*crd).Fr.AggregatingAttributes = make(CipherVector, aabLength)
(*crd).Fr.GroupByEnc = make(CipherVector, gacbLength)
aabByteLength := (aabLength * 64) //CAREFUL: hardcoded 64 (size of el-gamal element C,K)
gacbByteLength := (gacbLength * 64)
aab := data[:aabByteLength]
gacb := data[aabByteLength : gacbByteLength+aabByteLength]
dtbgb := data[gacbByteLength+aabByteLength : gacbByteLength+aabByteLength+dtbgbLength]
(*crd).DetTagGroupBy = GroupingKey(string(dtbgb))
(*crd).Fr.AggregatingAttributes.FromBytes(aab, aabLength)
(*crd).Fr.GroupByEnc.FromBytes(gacb, gacbLength)
}
// ToBytes converts a ProcessResponse to a byte array
func (cv *ProcessResponse) ToBytes() ([]byte, int, int, int) {
b := make([]byte, 0)
pgaeb := make([]byte, 0)
pgaebLength := 0
gacb, gacbLength := (*cv).GroupByEnc.ToBytes()
aab, aabLength := (*cv).AggregatingAttributes.ToBytes()
if (*cv).WhereEnc != nil {
pgaeb, pgaebLength = (*cv).WhereEnc.ToBytes()
}
b = append(b, gacb...)
b = append(b, aab...)
b = append(b, pgaeb...)
return b, gacbLength, aabLength, pgaebLength
}
// FromBytes converts a byte array to a ProcessResponse. Note that you need to create the (empty) object beforehand.
func (cv *ProcessResponse) FromBytes(data []byte, gacbLength, aabLength, pgaebLength int) {
(*cv).AggregatingAttributes = make(CipherVector, aabLength)
(*cv).WhereEnc = make(CipherVector, pgaebLength)
(*cv).GroupByEnc = make(CipherVector, gacbLength)
gacbByteLength := (gacbLength * 64)
aabByteLength := (aabLength * 64) //CAREFUL: hardcoded 64 (size of el-gamal element C,K)
pgaebByteLength := (pgaebLength * 64)
gacb := data[:gacbByteLength]
aab := data[gacbByteLength : gacbByteLength+aabByteLength]
pgaeb := data[gacbByteLength+aabByteLength : gacbByteLength+aabByteLength+pgaebByteLength]
(*cv).GroupByEnc.FromBytes(gacb, gacbLength)
(*cv).AggregatingAttributes.FromBytes(aab, aabLength)
(*cv).WhereEnc.FromBytes(pgaeb, pgaebLength)
}
// ToBytes converts a ProcessResponseDet to a byte array
func (crd *ProcessResponseDet) ToBytes() ([]byte, int, int, int, int, int) {
b, gacbLength, aabLength, pgaebLength := (*crd).PR.ToBytes()
dtbgb := []byte((*crd).DetTagGroupBy)
dtbgbLength := len(dtbgb)
dtbw := []byte((*crd).DetTagGroupBy)
dtbwLength := len(dtbw)
b = append(b, dtbgb...)
b = append(b, dtbw...)
return b, gacbLength, aabLength, pgaebLength, dtbgbLength, dtbwLength
}
// FromBytes converts a byte array to a ProcessResponseDet. Note that you need to create the (empty) object beforehand.
func (crd *ProcessResponseDet) FromBytes(data []byte, gacbLength, aabLength, pgaebLength, dtbgbLength, dtbwLength int) {
(*crd).PR.AggregatingAttributes = make(CipherVector, aabLength)
(*crd).PR.WhereEnc = make(CipherVector, pgaebLength)
(*crd).PR.GroupByEnc = make(CipherVector, gacbLength)
aabByteLength := (aabLength * 64) //CAREFUL: hardcoded 64 (size of el-gamal element C,K)
pgaebByteLength := (pgaebLength * 64)
gacbByteLength := (gacbLength * 64)
gacb := data[:gacbByteLength]
aab := data[gacbByteLength : gacbByteLength+aabByteLength]
pgaeb := data[gacbByteLength+aabByteLength : gacbByteLength+aabByteLength+pgaebByteLength]
dtbgb := data[gacbByteLength+aabByteLength+pgaebByteLength : gacbByteLength+aabByteLength+pgaebByteLength+dtbgbLength]
dtbw := data[gacbByteLength+aabByteLength+pgaebByteLength+dtbgbLength : gacbByteLength+aabByteLength+pgaebByteLength+dtbgbLength+dtbgbLength+dtbwLength]
(*crd).DetTagGroupBy = GroupingKey(string(dtbgb))
(*crd).DetTagGroupBy = GroupingKey(string(dtbw))
(*crd).PR.AggregatingAttributes.FromBytes(aab, aabLength)
(*crd).PR.WhereEnc.FromBytes(pgaeb, pgaebLength)
(*crd).PR.GroupByEnc.FromBytes(gacb, gacbLength)
}
// FromDpResponseToSend converts a DpResponseToSend to a DpResponse
func (dr *DpResponse) FromDpResponseToSend(dprts DpResponseToSend) {
dr.GroupByClear = dprts.GroupByClear
if len(dprts.GroupByEnc) != 0 {
dr.GroupByEnc = MapBytesToMapCipherText(dprts.GroupByEnc)
}
dr.WhereClear = dprts.WhereClear
if len(dprts.WhereEnc) != 0 {
dr.WhereEnc = make(map[string]CipherText)
for i, v := range dprts.WhereEnc {
ct := CipherText{}
ct.FromBytes(v)
dr.WhereEnc[i] = ct
}
}
dr.AggregatingAttributesClear = dprts.AggregatingAttributesClear
if len(dprts.AggregatingAttributesEnc) != 0 {
dr.AggregatingAttributesEnc = make(map[string]CipherText)
for i, v := range dprts.AggregatingAttributesEnc {
ct := CipherText{}
ct.FromBytes(v)
dr.AggregatingAttributesEnc[i] = ct
}
}
}
// joinAttributes joins clear and encrypted attributes into one encrypted container (CipherVector)
func joinAttributes(clear, enc map[string]int64, identifier string, encryptionKey abstract.Point) CipherVector {
clearContainer := ConvertMapToData(clear, identifier, 0)
encContainer := ConvertMapToData(enc, identifier, len(clear))
result := make(CipherVector, 0)
for i := 0; i < len(clearContainer); i++ {
result = append(result, *EncryptInt(encryptionKey, int64(clearContainer[i])))
}
for i := 0; i < len(encContainer); i++ {
result = append(result, *EncryptInt(encryptionKey, int64(encContainer[i])))
}
return result
}
// FromDpClearResponseToProcess converts a DpClearResponse struct to a ProcessResponse struct
func (dcr *DpClearResponse) FromDpClearResponseToProcess(encryptionKey abstract.Point) ProcessResponse {
result := ProcessResponse{}
result.AggregatingAttributes = joinAttributes(dcr.AggregatingAttributesClear, dcr.AggregatingAttributesEnc, "s", encryptionKey)
result.WhereEnc = joinAttributes(dcr.WhereClear, dcr.WhereEnc, "w", encryptionKey)
result.GroupByEnc = joinAttributes(dcr.GroupByClear, dcr.GroupByEnc, "g", encryptionKey)
return result
}
// MapBytesToMapCipherText transform objects in a map from bytes to ciphertexts
func MapBytesToMapCipherText(mapBytes map[string][]byte) map[string]CipherText {
result := make(map[string]CipherText)
if len(mapBytes) != 0 {
for i, v := range mapBytes {
ct := CipherText{}
ct.FromBytes(v)
result[i] = ct
}
return result
}
return nil
} | lib/structs.go | 0.518546 | 0.413832 | structs.go | starcoder |
package v1alpha1
// AuthorizationRuleListerExpansion allows custom methods to be added to
// AuthorizationRuleLister.
type AuthorizationRuleListerExpansion interface{}
// AuthorizationRuleNamespaceListerExpansion allows custom methods to be added to
// AuthorizationRuleNamespaceLister.
type AuthorizationRuleNamespaceListerExpansion interface{}
// ClusterListerExpansion allows custom methods to be added to
// ClusterLister.
type ClusterListerExpansion interface{}
// ClusterNamespaceListerExpansion allows custom methods to be added to
// ClusterNamespaceLister.
type ClusterNamespaceListerExpansion interface{}
// ConsumerGroupListerExpansion allows custom methods to be added to
// ConsumerGroupLister.
type ConsumerGroupListerExpansion interface{}
// ConsumerGroupNamespaceListerExpansion allows custom methods to be added to
// ConsumerGroupNamespaceLister.
type ConsumerGroupNamespaceListerExpansion interface{}
// EventhubListerExpansion allows custom methods to be added to
// EventhubLister.
type EventhubListerExpansion interface{}
// EventhubNamespaceListerExpansion allows custom methods to be added to
// EventhubNamespaceLister.
type EventhubNamespaceListerExpansion interface{}
// NamespaceListerExpansion allows custom methods to be added to
// NamespaceLister.
type NamespaceListerExpansion interface{}
// NamespaceNamespaceListerExpansion allows custom methods to be added to
// NamespaceNamespaceLister.
type NamespaceNamespaceListerExpansion interface{}
// NamespaceAuthorizationRuleListerExpansion allows custom methods to be added to
// NamespaceAuthorizationRuleLister.
type NamespaceAuthorizationRuleListerExpansion interface{}
// NamespaceAuthorizationRuleNamespaceListerExpansion allows custom methods to be added to
// NamespaceAuthorizationRuleNamespaceLister.
type NamespaceAuthorizationRuleNamespaceListerExpansion interface{}
// NamespaceCustomerManagedKeyListerExpansion allows custom methods to be added to
// NamespaceCustomerManagedKeyLister.
type NamespaceCustomerManagedKeyListerExpansion interface{}
// NamespaceCustomerManagedKeyNamespaceListerExpansion allows custom methods to be added to
// NamespaceCustomerManagedKeyNamespaceLister.
type NamespaceCustomerManagedKeyNamespaceListerExpansion interface{}
// NamespaceDisasterRecoveryConfigListerExpansion allows custom methods to be added to
// NamespaceDisasterRecoveryConfigLister.
type NamespaceDisasterRecoveryConfigListerExpansion interface{}
// NamespaceDisasterRecoveryConfigNamespaceListerExpansion allows custom methods to be added to
// NamespaceDisasterRecoveryConfigNamespaceLister.
type NamespaceDisasterRecoveryConfigNamespaceListerExpansion interface{} | client/listers/eventhub/v1alpha1/expansion_generated.go | 0.596786 | 0.439807 | expansion_generated.go | starcoder |
package iso20022
// Specifies the payment terms of the underlying transaction.
type PaymentTerms6 struct {
// Due date specified for the payment terms.
DueDate *ISODate `xml:"DueDt,omitempty"`
// Payment period specified for these payment terms.
PaymentPeriod *PaymentPeriod1 `xml:"PmtPrd,omitempty"`
// Textual description of these payment terms.
Description []*Max140Text `xml:"Desc,omitempty"`
// Partial payment, expressed as a percentage, for the payment terms.
PartialPaymentPercent *PercentageRate `xml:"PrtlPmtPct,omitempty"`
// Direct debit mandate identification specified for these payment terms.
DirectDebitMandateIdentification []*Max35Text `xml:"DrctDbtMndtId,omitempty"`
// Amount used as a basis to calculate the discount amount for these payment terms.
BasisAmount *CurrencyAndAmount `xml:"BsisAmt,omitempty"`
// Amount of money that results from the application of an agreed discount percentage to the basis amount and payable to the creditor.
DiscountAmount *CurrencyAndAmount `xml:"DscntAmt,omitempty"`
// Percent rate used to calculate the discount for these payment terms.
DiscountPercentRate *PercentageRate `xml:"DscntPctRate,omitempty"`
// Amount of money that results from the application of an agreed penalty percentage to the basis amount and payable by the creditor.
PenaltyAmount *CurrencyAndAmount `xml:"PnltyAmt,omitempty"`
// Percent rate used to calculate the penalty for these payment terms.
PenaltyPercentRate *PercentageRate `xml:"PnltyPctRate,omitempty"`
}
func (p *PaymentTerms6) SetDueDate(value string) {
p.DueDate = (*ISODate)(&value)
}
func (p *PaymentTerms6) AddPaymentPeriod() *PaymentPeriod1 {
p.PaymentPeriod = new(PaymentPeriod1)
return p.PaymentPeriod
}
func (p *PaymentTerms6) AddDescription(value string) {
p.Description = append(p.Description, (*Max140Text)(&value))
}
func (p *PaymentTerms6) SetPartialPaymentPercent(value string) {
p.PartialPaymentPercent = (*PercentageRate)(&value)
}
func (p *PaymentTerms6) AddDirectDebitMandateIdentification(value string) {
p.DirectDebitMandateIdentification = append(p.DirectDebitMandateIdentification, (*Max35Text)(&value))
}
func (p *PaymentTerms6) SetBasisAmount(value, currency string) {
p.BasisAmount = NewCurrencyAndAmount(value, currency)
}
func (p *PaymentTerms6) SetDiscountAmount(value, currency string) {
p.DiscountAmount = NewCurrencyAndAmount(value, currency)
}
func (p *PaymentTerms6) SetDiscountPercentRate(value string) {
p.DiscountPercentRate = (*PercentageRate)(&value)
}
func (p *PaymentTerms6) SetPenaltyAmount(value, currency string) {
p.PenaltyAmount = NewCurrencyAndAmount(value, currency)
}
func (p *PaymentTerms6) SetPenaltyPercentRate(value string) {
p.PenaltyPercentRate = (*PercentageRate)(&value)
} | PaymentTerms6.go | 0.854111 | 0.436622 | PaymentTerms6.go | starcoder |
package enigma
import (
"fmt"
"strings"
)
// ReflectorConfig contains full configuration of a reflector
type ReflectorConfig struct {
Model ReflectorModel
WheelPosition byte
Wiring string
}
func (r ReflectorConfig) isEmpty() bool {
return r.Model == "" && r.WheelPosition == 0 && r.Wiring == ""
}
type reflector struct {
model ReflectorModel
letterMap map[int]int
wheelPosition int
}
func newReflector(model ReflectorModel) reflector {
wiring := model.getWiring()
if !Alphabet.isValidWiring(wiring) {
panic(fmt.Errorf("invalid reflector wiring %s", wiring))
}
letterMap := make(map[int]int, Alphabet.getSize())
for i, letter := range wiring {
letterIndex, ok := Alphabet.charToInt(byte(letter))
if !ok {
panic(fmt.Errorf("unsupported wiring letter %s", string(letter))) // should not happen, we already checked the wiring validity
}
letterMap[i] = letterIndex
letterMap[letterIndex] = i
}
return reflector{
model: model,
letterMap: letterMap,
wheelPosition: 0,
}
}
func (r *reflector) setWheelPosition(letter byte) error {
if !r.model.IsMovable() {
return fmt.Errorf("reflector %s is fixed, cannot change position", r.model)
}
index, ok := Alphabet.charToInt(letter)
if !ok {
return fmt.Errorf("unsupported reflector position %s", string(letter))
}
r.wheelPosition = index
return nil
}
func (r *reflector) setWiring(wiring string) error {
if !r.model.IsRewirable() {
return fmt.Errorf("reflector %s is not rewirable, cannot change wiring", r.model)
}
// UKW-D rewirable reflectors had different letter order (JY were always connected, the rest 12 pairs were configurable)
ukwdOrder := "AJZXWVUTSRQPONYMLKIHGFEDCB"
wiringMap := getDefaultLetterMap()
wiringMap[strings.IndexByte(ukwdOrder, 'J')] = strings.IndexByte(ukwdOrder, 'Y')
wiringMap[strings.IndexByte(ukwdOrder, 'Y')] = strings.IndexByte(ukwdOrder, 'J')
// rewire the reflector
pairs := strings.Split(wiring, " ")
expectedSize := Alphabet.getSize()/2 - 1
if len(pairs) != expectedSize {
return fmt.Errorf("incomplete wiring of the reflector, must include %d distinct pairs to cover the whole alphabet", expectedSize)
}
for _, pair := range pairs {
// validate the pair
if len(pair) != 2 {
return fmt.Errorf("invalid pair %s, must be a pair of letters", pair)
}
if pair[0] == pair[1] {
return fmt.Errorf("invalid pair %s, cannot connect reflector letter to itself", pair)
}
var letters [2]int
for i := 0; i < 2; i++ {
index := strings.IndexByte(ukwdOrder, pair[i])
if index == -1 {
return fmt.Errorf("invalid pair %s, unsupported letter %s", pair, string(pair[i]))
}
letters[i] = index
if mapped, ok := wiringMap[letters[i]]; ok && mapped != letters[i] {
if pair[i] == 'Y' || pair[i] == 'J' {
return fmt.Errorf("invalid pair %s, letters Y and J are hard-wired in UKW-D reflectors and cannot be changed", pair)
}
return fmt.Errorf("invalid pair %s, letter %s already wired", pair, string(pair[i]))
}
}
// set to map
wiringMap[letters[0]] = letters[1]
wiringMap[letters[1]] = letters[0]
}
r.letterMap = wiringMap
return nil
}
func (r *reflector) translate(input int) int {
rotatedOutput := r.letterMap[shift(input, r.wheelPosition)]
return shift(rotatedOutput, -r.wheelPosition) // don't forget to rotate back...
} | reflector.go | 0.649245 | 0.458046 | reflector.go | starcoder |
package main
import (
"bufio"
"fmt"
"log"
"os"
"strings"
)
type pos struct {
x, y int
}
func (p pos) add(o pos) pos {
return pos{p.x + o.x, p.y + o.y}
}
type maze struct {
grid map[pos]string
w, h int
}
func (m *maze) print() {
for y := 0; y < m.h; y++ {
for x := 0; x <= m.w; x++ {
fmt.Printf("%s", m.grid[pos{x, y}])
}
fmt.Println()
}
}
func (m *maze) isDoor(p pos) bool {
return isDoor(m.grid[p])
}
func (m *maze) isKey(p pos) bool {
return isKey(m.grid[p])
}
func isDoor(s string) bool {
return s[0] >= 'A' && s[0] <= 'Z'
}
func isKey(s string) bool {
return s[0] >= 'a' && s[0] <= 'z'
}
func loadLine(maze *maze, line string, o *pos, keys, doors map[pos]string) {
for i, rune := range line {
ch := string(rune)
pos := pos{i, maze.h}
maze.grid[pos] = ch
if ch == "@" {
o.x = i
o.y = maze.h
} else if isKey(ch) {
keys[pos] = ch
} else if isDoor(ch) {
doors[pos] = ch
}
maze.w = i
}
maze.h = maze.h + 1
}
func (p *pos) isValid(m *maze) bool {
return p.x >= 0 && p.y >= 0 && p.x < m.w && p.y < m.h
}
var osets = []pos{{-1, 0}, {0, -1}, {1, 0}, {0, 1}}
type queue []pos
func enqueueNeighbors(p pos, m *maze, visited map[pos]bool, q []pos) []pos {
for _, offset := range osets {
if neighbor := p.add(offset); neighbor.isValid(m) && !visited[neighbor] {
if m.grid[neighbor] != "#" {
visited[neighbor] = true
q = append(q, neighbor)
}
}
}
return q
}
type dist struct {
p pos
d int
reqKeys []string
}
type priorityQueue []*dist
func (pq priorityQueue) Len() int { return len(pq) }
func (pq priorityQueue) Less(i, j int) bool { return pq[i].d < pq[j].d }
func (pq priorityQueue) Swap(i, j int) { pq[i], pq[j] = pq[j], pq[i] }
func (pq *priorityQueue) Push(x interface{}) {
item := x.(*dist)
*pq = append(*pq, item)
}
func (pq priorityQueue) index(p pos) int {
for i, d := range pq {
if d.p == p {
return i
}
}
return -1
}
func (pq *priorityQueue) Pop() interface{} {
old := *pq
n := len(old)
item := old[n-1]
old[n-1] = nil // avoid memory leak
*pq = old[0 : n-1]
return item
}
func getNeighbors(p pos, m *maze) []pos {
neighbors := make([]pos, 0, 4)
for _, offset := range osets {
if neighbor := p.add(offset); neighbor.isValid(m) {
if m.grid[neighbor] != "#" {
neighbors = append(neighbors, neighbor)
}
}
}
return neighbors
}
func copyDoors(doors []string) []string {
cpy := make([]string, len(doors))
copy(cpy, doors)
return cpy
}
func (m *maze) distanceDFS(p pos, visited map[pos]bool, doorsInWay []string, curDist int, distance map[pos]dist) {
visited[p] = true
curDist++
for _, neighbor := range getNeighbors(p, m) {
if visited[neighbor] {
continue
}
if m.isDoor(neighbor) || m.isKey(neighbor) {
// record this door or key
if m.isDoor(neighbor) {
doorsInWay = append(copyDoors(doorsInWay), m.grid[neighbor])
}
distance[neighbor] = dist{neighbor, curDist, copyDoors(doorsInWay)}
m.distanceDFS(neighbor, visited, doorsInWay, curDist, distance)
} else {
// traverse this node
m.distanceDFS(neighbor, visited, doorsInWay, curDist, distance)
}
}
delete(visited, p)
}
func (m *maze) countDistanceToOthers(p pos) map[pos]dist {
log.Printf("Calculating adjency for %v @ %v", m.grid[p], p)
visited := make(map[pos]bool)
//enqueueNeighbors(p, m, visited, q)
distance := make(map[pos]dist)
doorsInWay := make([]string, 0, 100)
m.distanceDFS(p, visited, doorsInWay, 0, distance)
log.Printf("Adjacency for %v @ %v: %v", p, m.grid[p], distance)
/*
for len(q) > 0 {
//log.Printf("Queue: %v", q)
nQ := make([]pos, 0, 100)
for _, cand := range q {
val := m.grid[cand]
if isKey(val) || isDoor(val) {
distance = append(distance, dist{cand, rounds})
}
nQ = enqueueNeighbors(cand, m, visited, nQ)
}
q = nQ
rounds++
}
*/
return distance
}
func (m *maze) printCosts(cost map[pos]int) {
for k, v := range cost {
fmt.Printf("%v @ %v -> %v\n", m.grid[k], k, v)
}
}
// INFINITY is just a big number
const INFINITY = 32000000
/*
func (m *maze) solve(p pos, doors, keys map[pos]string) int {
cost := make(map[pos]int)
// Calculate the reachable distance between each point.
distTo := make(map[pos][]dist)
for k, v := range m.grid {
if v == "@" || isDoor(v) || isKey(v) {
distTo[k] = m.countDistanceToOthers(k)
cost[k] = INFINITY
}
}
log.Printf("----DISTANCE----")
for k, v := range distTo {
log.Printf("%v -> %v\n", k, v)
}
cost[p] = 0
foundKeys := make(map[pos]map[string]bool)
for k := range distTo {
foundKeys[k] = make(map[string]bool)
}
q := make(priorityQueue, 0, len(keys))
heap.Init(&q)
heap.Push(&q, &dist{p, 0})
prev := make(map[pos]pos)
for len(q) > 0 {
m.printCosts(cost)
n := heap.Pop(&q).(*dist)
if cost[n.p] < n.d {
continue
}
if k, ok := keys[n.p]; ok {
foundKeys[n.p][strings.ToUpper(k)] = true
}
if len(foundKeys) == len(keys) {
return cost[n.p]
}
log.Printf("Processing %v -> %v", n, m.grid[n.p])
for _, d := range distTo[n.p] {
log.Printf(" \\-> Checking %v -> %v", d.p, m.grid[d.p])
log.Printf(" \\-> Held keys %v", foundKeys[n.p])
if isDoor(m.grid[d.p]) && !foundKeys[n.p][m.grid[d.p]] {
log.Printf(" \\-> missing key.")
continue
}
for k := range foundKeys[n.p] {
foundKeys[d.p][k] = true
}
if isKey(m.grid[d.p]) {
foundKeys[d.p][strings.ToUpper(m.grid[d.p])] = true
}
alt := n.d + d.d
log.Printf(" n: %v alt: %v", d, alt)
if alt < cost[d.p] {
cost[d.p] = alt
prev[d.p] = d.p
log.Printf(" \\-> Adding search from %v -> %v", d.p, m.grid[d.p])
heap.Push(&q, &dist{d.p, alt})
}
}
}
log.Printf("%v", cost)
maxCost := 0
for k, v := range cost {
if isKey(m.grid[k]) {
maxCost += v
}
}
return maxCost
}
*/
func hasKey(m *maze, heldKeys map[string]bool, p pos) bool {
return heldKeys[m.grid[p]]
}
func pickupKey(m *maze, heldKeys map[string]bool, p pos) {
heldKeys[strings.ToUpper(m.grid[p])] = true
}
func dropKey(m *maze, heldKeys map[string]bool, p pos) {
delete(heldKeys, strings.ToUpper(m.grid[p]))
}
func hasNecessaryKeys(heldKeys map[string]bool, doors []string) bool {
for _, key := range doors {
if !heldKeys[key] {
return false
}
}
return true
}
func (m *maze) printStack(stack []pos) {
s := ""
for _, p := range stack {
s = fmt.Sprintf("%s -> %s", s, m.grid[p])
}
log.Printf("%s\n", s)
}
func (m *maze) runDfs(stack []pos, visited map[pos]bool, heldKeys map[string]bool, target int, distTo map[pos]map[pos]dist, dist int, toBeat int) int {
if dist > toBeat {
return toBeat
}
p := stack[len(stack)-1]
if len(heldKeys) == target {
//log.Printf("Searching from %v -> %v is last key at dist %v", p, m.grid[p], dist)
return dist
}
visited[p] = true
log.Printf("%v :: Searching from %v -> %v, dist:%v", stack, p, m.grid[p], dist)
//log.Printf("Adjacency: %v", distTo[p])
result := toBeat
waitFor := 0
resCh := make(chan int)
for _, dst := range distTo[p] {
if visited[dst.p] {
continue
}
log.Printf(" \\-> Considering %v -> %v, haveKeys %v", dst, m.grid[dst.p], heldKeys)
if m.isDoor(dst.p) || m.isKey(dst.p) {
if !hasNecessaryKeys(heldKeys, dst.reqKeys) {
//log.Printf(" \\-> Don't have necessary key for %v -> %v", dst.p, m.grid[dst.p])
} else {
newStack := make([]pos, len(stack)+1)
copy(newStack, stack)
newStack[len(stack)] = dst.p
if dist == 0 {
newVisited := make(map[pos]bool)
for k, v := range visited {
newVisited[k] = v
}
newKeys := make(map[string]bool)
for k, v := range heldKeys {
newKeys[k] = v
}
waitFor++
go func() {
resCh <- m.runDfs(newStack, newVisited, newKeys, target, distTo, dist+dst.d, toBeat)
}()
} else {
if m.isKey(dst.p) {
pickupKey(m, heldKeys, dst.p)
}
stop := make(chan int)
go func() {
stop <- m.runDfs(newStack, visited, heldKeys, target, distTo, dist+dst.d, toBeat)
}()
res := <-stop
//} else {
// m.runDfs(newStack, visited, heldKeys, target, distTo, dist+dst.d, toBeat)
//}
if res < result {
if len(heldKeys) == target {
log.Printf("New shortest path: %v", res)
//m.printStack(stack)
}
result = res
toBeat = res
}
if m.isKey(dst.p) {
dropKey(m, heldKeys, dst.p)
}
}
}
}
}
for waitFor > 0 {
res := <-resCh
log.Printf("New candidate: %v", res)
if res < result {
result = res
}
waitFor--
}
delete(visited, p)
return result
}
// Solve using DFS
func (m *maze) solve2(p pos, doors, keys map[pos]string) int {
visited := make(map[pos]bool)
heldKeys := make(map[string]bool)
distTo := make(map[pos]map[pos]dist)
for k, v := range m.grid {
if v == "@" || isDoor(v) || isKey(v) {
distTo[k] = m.countDistanceToOthers(k)
}
}
log.Printf("----------------------------------")
stack := make([]pos, 1)
stack[0] = p
return m.runDfs(stack, visited, heldKeys, len(keys), distTo, 0, 7902)
}
func main() {
maze := &maze{}
maze.grid = make(map[pos]string)
origin := &pos{}
keys := make(map[pos]string)
doors := make(map[pos]string)
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
loadLine(maze, scanner.Text(), origin, keys, doors)
}
if err := scanner.Err(); err != nil {
log.Println(err)
}
maze.print()
fmt.Printf("origin: %v\n", origin)
fmt.Printf("keys (%v): %v\n", len(keys), keys)
fmt.Printf("doors (%v): %v\n", len(doors), doors)
fmt.Println("---solving---")
steps := maze.solve2(*origin, doors, keys)
fmt.Printf("Steps needed %v\n", steps)
} | day18/main.go | 0.538498 | 0.435541 | main.go | starcoder |
package types
// Unique Layer identifier.
type LayerTree_LayerId string
// Unique snapshot identifier.
type LayerTree_SnapshotId string
// Rectangle where scrolling happens on the main thread.
type LayerTree_ScrollRect struct {
// Rectangle itself.
Rect DOM_Rect `json:"rect"`
// Reason for rectangle to force scrolling on the main thread
Type string `json:"type"`
}
// Sticky position constraints.
type LayerTree_StickyPositionConstraint struct {
// Layout rectangle of the sticky element before being shifted
StickyBoxRect DOM_Rect `json:"stickyBoxRect"`
// Layout rectangle of the containing block of the sticky element
ContainingBlockRect DOM_Rect `json:"containingBlockRect"`
// The nearest sticky layer that shifts the sticky box
NearestLayerShiftingStickyBox *LayerTree_LayerId `json:"nearestLayerShiftingStickyBox,omitempty"`
// The nearest sticky layer that shifts the containing block
NearestLayerShiftingContainingBlock *LayerTree_LayerId `json:"nearestLayerShiftingContainingBlock,omitempty"`
}
// Serialized fragment of layer picture along with its offset within the layer.
type LayerTree_PictureTile struct {
// Offset from owning layer left boundary
X float32 `json:"x"`
// Offset from owning layer top boundary
Y float32 `json:"y"`
// Base64-encoded snapshot data.
Picture string `json:"picture"`
}
// Information about a compositing layer.
type LayerTree_Layer struct {
// The unique id for this layer.
LayerId LayerTree_LayerId `json:"layerId"`
// The id of parent (not present for root).
ParentLayerId *LayerTree_LayerId `json:"parentLayerId,omitempty"`
// The backend id for the node associated with this layer.
BackendNodeId *DOM_BackendNodeId `json:"backendNodeId,omitempty"`
// Offset from parent layer, X coordinate.
OffsetX float32 `json:"offsetX"`
// Offset from parent layer, Y coordinate.
OffsetY float32 `json:"offsetY"`
// Layer width.
Width float32 `json:"width"`
// Layer height.
Height float32 `json:"height"`
// Transformation matrix for layer, default is identity matrix
Transform []float32 `json:"transform,omitempty"`
// Transform anchor point X, absent if no transform specified
AnchorX *float32 `json:"anchorX,omitempty"`
// Transform anchor point Y, absent if no transform specified
AnchorY *float32 `json:"anchorY,omitempty"`
// Transform anchor point Z, absent if no transform specified
AnchorZ *float32 `json:"anchorZ,omitempty"`
// Indicates how many time this layer has painted.
PaintCount int `json:"paintCount"`
// Indicates whether this layer hosts any content, rather than being used for transform/scrolling purposes only.
DrawsContent bool `json:"drawsContent"`
// Set if layer is not visible.
Invisible *bool `json:"invisible,omitempty"`
// Rectangles scrolling on main thread only.
ScrollRects []LayerTree_ScrollRect `json:"scrollRects,omitempty"`
// Sticky position constraint information
StickyPositionConstraint *LayerTree_StickyPositionConstraint `json:"stickyPositionConstraint,omitempty"`
}
// Array of timings, one per paint step.
type LayerTree_PaintProfile []float32 | types/layertree.go | 0.795896 | 0.409929 | layertree.go | starcoder |
package stats
import (
"context"
"sort"
"strconv"
"strings"
"sync"
)
// TrackerMock can collect stats for tests with labels ignored.
type TrackerMock struct {
mu sync.Mutex
values map[string]float64
labeledValues map[string]float64
}
var escaper = strings.NewReplacer("\n", `\\n`, `\`, `\\`, `"`, `\"`)
func labelsString(labels []string) string {
if len(labels) == 0 {
return ""
}
isKey := true
key := ""
if len(labels)%2 != 0 {
panic("malformed pairs")
}
type kv struct {
k, v string
}
lb := make([]kv, 0, len(labels)/2)
for _, l := range labels {
if isKey {
if l == "" {
panic("empty key received in labels")
}
key = l
isKey = false
} else {
lb = append(lb, kv{k: key, v: l})
isKey = true
}
}
sort.Slice(lb, func(i, j int) bool {
return lb[i].k < lb[j].v
})
res := ""
for _, i := range lb {
res += i.k + `="` + escaper.Replace(i.v) + `",`
}
return res[0 : len(res)-1]
}
// Add collects metric increment.
func (t *TrackerMock) Add(_ context.Context, name string, increment float64, labels ...string) {
t.mu.Lock()
defer t.mu.Unlock()
if t.values == nil {
t.values = make(map[string]float64)
t.labeledValues = make(map[string]float64)
}
t.values[name] += increment
t.labeledValues[name+"{"+labelsString(labels)+"}"] += increment
}
// Set collects absolute value.
func (t *TrackerMock) Set(_ context.Context, name string, absolute float64, labels ...string) {
t.mu.Lock()
defer t.mu.Unlock()
if t.values == nil {
t.values = make(map[string]float64)
t.labeledValues = make(map[string]float64)
}
t.values[name] = absolute
t.labeledValues[name+"{"+labelsString(labels)+"}"] = absolute
}
// Value returns collected value by name.
func (t *TrackerMock) Value(name string, labels ...string) float64 {
t.mu.Lock()
defer t.mu.Unlock()
if t.values == nil {
return 0
}
if len(labels) > 0 {
return t.labeledValues[name+"{"+labelsString(labels)+"}"]
}
return t.values[name]
}
// Int returns collected value as integer by name.
func (t *TrackerMock) Int(name string, labels ...string) int {
t.mu.Lock()
defer t.mu.Unlock()
if t.values == nil {
return 0
}
if len(labels) > 0 {
return int(t.labeledValues[name+"{"+labelsString(labels)+"}"])
}
return int(t.values[name])
}
// Values returns collected summarized values as a map.
func (t *TrackerMock) Values() map[string]float64 {
t.mu.Lock()
defer t.mu.Unlock()
if t.values == nil {
return map[string]float64{}
}
res := make(map[string]float64, len(t.values))
for k, v := range t.values {
res[k] = v
}
return res
}
// LabeledValues returns collected labeled values as a map.
func (t *TrackerMock) LabeledValues() map[string]float64 {
t.mu.Lock()
defer t.mu.Unlock()
if t.labeledValues == nil {
return map[string]float64{}
}
res := make(map[string]float64, len(t.labeledValues))
for k, v := range t.labeledValues {
res[k] = v
}
return res
}
// Metrics returns collected values in Prometheus format.
func (t *TrackerMock) Metrics() string {
t.mu.Lock()
defer t.mu.Unlock()
if t.labeledValues == nil {
return ""
}
res := make([]string, 0, len(t.labeledValues))
for k, v := range t.labeledValues {
res = append(res, k+" "+strconv.FormatFloat(v, 'g', -1, 64))
}
sort.Strings(res)
return strings.Join(res, "\n")
}
// StatsTracker is a provider.
func (t *TrackerMock) StatsTracker() Tracker {
return t
} | mock.go | 0.678966 | 0.42483 | mock.go | starcoder |
package quickhull
import (
"log"
"math"
"github.com/golang/geo/r3"
)
const (
defaultEpsilon = 0.0000001
)
// QuickHull can be used to calculate the convex hull of a point cloud.
// See: https://en.wikipedia.org/wiki/Quickhull
type QuickHull struct {
epsilon float64
epsilonSquared float64
planar bool
planarPointCloudTemp []r3.Vector
vertexData []r3.Vector
mesh meshBuilder
extremeValueIndices [6]int
diagnostics diagnostics
newFaceIndices []int
newHalfEdgeIndices []int
disabledFacePointVectors [][]int
}
type diagnostics struct {
failedHorizonEdges int // How many times QuickHull failed to solve the horizon edge. Failures lead to degenerated convex hulls.
}
// ConvexHull calculates the convex hull of the given point cloud using the Quickhull algorithm.
// If epsilon is <= 0 a default value will be used.
func (qh *QuickHull) ConvexHull(pointCloud []r3.Vector, ccw bool, useOriginalIndices bool, epsilon float64) ConvexHull {
qh.buildMesh(pointCloud, epsilon)
return newConvexHull(qh.mesh, qh.vertexData, ccw, useOriginalIndices)
}
// ConvexHull calculates the convex hull of the given point cloud using the Quickhull algorithm and returns it as a HalfEdgeMesh.
// If epsilon is <= 0 a default value will be used.
func (qh *QuickHull) ConvexHullAsMesh(pointCloud []r3.Vector, epsilon float64) HalfEdgeMesh {
qh.buildMesh(pointCloud, epsilon)
return newHalfEdgeMesh(qh.mesh, qh.vertexData)
}
func (qh *QuickHull) buildMesh(pointCloud []r3.Vector, epsilon float64) {
if len(pointCloud) == 0 {
return
}
if epsilon <= 0 {
epsilon = defaultEpsilon
}
qh.vertexData = pointCloud
// Very first: find extreme values and use them to compute the scale of the point cloud.
qh.extremeValueIndices = extremeValues(qh.vertexData)
scale := scale(qh.vertexData, qh.extremeValueIndices) // TODO: maybe pass extreme values
// Epsilon we use depends on the scale
qh.epsilon = epsilon * scale
qh.epsilonSquared = qh.epsilon * qh.epsilon
// Reset diagnostics
qh.diagnostics = diagnostics{}
qh.planar = false // The planar case happens when all the points appear to lie on a two dimensional subspace of R^3.
qh.createConvexHalfEdgeMesh()
if qh.planar {
extraPointIdx := len(qh.planarPointCloudTemp) - 1
for i := range qh.mesh.halfEdges {
if qh.mesh.halfEdges[i].EndVertex == extraPointIdx {
qh.mesh.halfEdges[i].EndVertex = 0
}
}
qh.vertexData = pointCloud
qh.planarPointCloudTemp = qh.planarPointCloudTemp[:0]
}
}
// This will update m_mesh from which we create the ConvexHull object that getConvexHull function returns
func (qh *QuickHull) createConvexHalfEdgeMesh() {
var visibleFaces []int
var horizontalEdges []int
type faceData struct {
faceIndex int
enteredFromHalfEdge int // If the Face turns out not to be visible, this half edge will be marked as horizon edge
}
var possiblyVisibleFaces []faceData
// Compute base tetrahedron
qh.mesh = qh.initialTetrahedron()
assertTrue(len(qh.mesh.faces) == 4)
var faceList []int
for i := 0; i < 4; i++ {
f := &qh.mesh.faces[i]
if len(f.pointsOnPositiveSide) > 0 {
faceList = append(faceList, i)
f.inFaceStack = true
}
}
// Process Faces until the Face list is empty.
iter := 0
for len(faceList) > 0 {
iter++
if iter == maxInt {
// Visible Face traversal marks visited Faces with iteration counter (to mark that the Face has been visited on this iteration) and the max value represents unvisited Faces. At this point we have to reset iteration counter. This shouldn't be an
// issue on 64 bit machines.
iter = 0
}
var topFaceIndex int
topFaceIndex, faceList = faceList[0], faceList[1:]
tf := &qh.mesh.faces[topFaceIndex]
tf.inFaceStack = false
assertTrue(tf.pointsOnPositiveSide == nil || len(tf.pointsOnPositiveSide) > 0)
if tf.pointsOnPositiveSide == nil || tf.isDisabled() {
continue
}
// Pick the most distant point to this triangle plane as the point to which we extrude
activePoint := qh.vertexData[tf.mostDistantPoint]
activePointIndex := tf.mostDistantPoint
// Clear outer vars
horizontalEdges = horizontalEdges[:0]
visibleFaces = visibleFaces[:0]
possiblyVisibleFaces = possiblyVisibleFaces[:0]
// Find out the Faces that have our active point on their positive side (these are the "visible Faces").
// The Face on top of the stack of course is one of them. At the same time, we create a list of horizon edges.
possiblyVisibleFaces = append(possiblyVisibleFaces, faceData{faceIndex: topFaceIndex, enteredFromHalfEdge: maxInt})
for len(possiblyVisibleFaces) > 0 {
fd := possiblyVisibleFaces[len(possiblyVisibleFaces)-1]
possiblyVisibleFaces = possiblyVisibleFaces[:len(possiblyVisibleFaces)-1]
pvf := &qh.mesh.faces[fd.faceIndex]
assertTrue(!pvf.isDisabled())
if pvf.visibilityCheckedOnIteration == iter {
if pvf.isVisibleFaceOnCurrentIteration {
continue
}
} else {
p := pvf.plane
pvf.visibilityCheckedOnIteration = iter
d := p.n.Dot(activePoint) + p.d
if d > 0 {
pvf.isVisibleFaceOnCurrentIteration = true
pvf.horizonEdgesOnCurrentIteration = 0
visibleFaces = append(visibleFaces, fd.faceIndex)
for _, heIndex := range qh.mesh.halfEdgeIndicesOfFace(*pvf) {
opp := qh.mesh.halfEdges[heIndex].Opp
if opp != fd.enteredFromHalfEdge {
possiblyVisibleFaces = append(possiblyVisibleFaces, faceData{faceIndex: qh.mesh.halfEdges[opp].Face, enteredFromHalfEdge: heIndex})
}
}
continue
}
assertTrue(fd.faceIndex != topFaceIndex)
}
// The Face is not visible. Therefore, the halfedge we came from is part of the horizon edge.
pvf.isVisibleFaceOnCurrentIteration = false
horizontalEdges = append(horizontalEdges, fd.enteredFromHalfEdge)
// Store which half edge is the horizon edge. The other half edges of the Face will not be part of the final mesh so their data slots can by recycled.
halfEdges := qh.mesh.halfEdgeIndicesOfFace(qh.mesh.faces[qh.mesh.halfEdges[fd.enteredFromHalfEdge].Face])
var ind byte
if halfEdges[0] != fd.enteredFromHalfEdge {
if halfEdges[1] == fd.enteredFromHalfEdge {
ind = 1
} else {
ind = 2
}
}
qh.mesh.faces[qh.mesh.halfEdges[fd.enteredFromHalfEdge].Face].horizonEdgesOnCurrentIteration |= 1 << ind
}
nHorizontalEdges := len(horizontalEdges)
// Order horizon edges so that they form a loop. This may fail due to numerical instability in which case we give up trying to solve horizon edge for this point and accept a minor degeneration in the convex hull.
if !qh.reorderHorizontalEdges(horizontalEdges) {
qh.diagnostics.failedHorizonEdges++
log.Println("Failed to solve horizon edge")
for i := range tf.pointsOnPositiveSide {
if tf.pointsOnPositiveSide[i] == activePointIndex {
tf.pointsOnPositiveSide = append(tf.pointsOnPositiveSide[:i], tf.pointsOnPositiveSide[i+1:]...)
break
}
}
/*
TODO: optimize
if len(tf.pointsOnPositiveSide) == 0 {
reclaimToIndexVectorPool(tf.m_pointsOnPositiveSide);
}
*/
continue
}
// Except for the horizon edges, all half edges of the visible Faces can be marked as disabled. Their data slots will be reused.
// The Faces will be disabled as well, but we need to remember the points that were on the positive side of them - therefore
// we save pointers to them.
qh.newFaceIndices = qh.newFaceIndices[:0]
qh.newHalfEdgeIndices = qh.newHalfEdgeIndices[:0]
qh.disabledFacePointVectors = qh.disabledFacePointVectors[:0]
var nDisabled int
for _, faceIdx := range visibleFaces {
disabledFace := qh.mesh.faces[faceIdx]
halfEdges := qh.mesh.halfEdgeIndicesOfFace(disabledFace)
for i := uint(0); i < 3; i++ {
if disabledFace.horizonEdgesOnCurrentIteration&(1<<i) == 0 {
if nDisabled < nHorizontalEdges*2 {
// Use on this iteration
qh.newHalfEdgeIndices = append(qh.newHalfEdgeIndices, halfEdges[i])
nDisabled++
} else {
// Mark for reusal on later iteration step
qh.mesh.disableHalfEdge(halfEdges[i])
}
}
}
// Disable the Face, but retain pointer to the points that were on the positive side of it. We need to assign those points
// to the new Faces we create shortly.
t := qh.mesh.disableFace(faceIdx)
if t != nil {
assertTrue(len(t) > 0)
qh.disabledFacePointVectors = append(qh.disabledFacePointVectors, t)
}
}
if nDisabled < nHorizontalEdges*2 {
nNewHalfEdgesNeeded := nHorizontalEdges*2 - nDisabled
for i := 0; i < nNewHalfEdgesNeeded; i++ {
qh.newHalfEdgeIndices = append(qh.newHalfEdgeIndices, qh.mesh.addHalfEdge())
}
}
// Create new Faces using the edgeloop
for i := 0; i < nHorizontalEdges; i++ {
ab := horizontalEdges[i]
horizonEdgeVertexIndices := qh.mesh.vertexIndicesOfHalfEdge(qh.mesh.halfEdges[ab])
a, b, c := horizonEdgeVertexIndices[0], horizonEdgeVertexIndices[1], activePointIndex
newFaceIdx := qh.mesh.addFace()
qh.newFaceIndices = append(qh.newFaceIndices, newFaceIdx)
ca, bc := qh.newHalfEdgeIndices[2*i+0], qh.newHalfEdgeIndices[2*i+1]
qh.mesh.halfEdges[ab].Next = bc
qh.mesh.halfEdges[bc].Next = ca
qh.mesh.halfEdges[ca].Next = ab
qh.mesh.halfEdges[bc].Face = newFaceIdx
qh.mesh.halfEdges[ca].Face = newFaceIdx
qh.mesh.halfEdges[ab].Face = newFaceIdx
qh.mesh.halfEdges[ca].EndVertex = a
qh.mesh.halfEdges[bc].EndVertex = c
newFace := &qh.mesh.faces[newFaceIdx]
planeNormal := triangleNormal(qh.vertexData[a], qh.vertexData[b], activePoint)
newFace.plane = newPlane(planeNormal, activePoint)
newFace.halfEdgeIndex = ab
var idx int
if i > 0 {
idx = i*2 - 1
} else {
idx = 2*nHorizontalEdges - 1
}
qh.mesh.halfEdges[ca].Opp = qh.newHalfEdgeIndices[idx]
qh.mesh.halfEdges[bc].Opp = qh.newHalfEdgeIndices[((i+1)*2)%(nHorizontalEdges*2)]
}
for _, disabledPoints := range qh.disabledFacePointVectors {
assertTrue(disabledPoints != nil)
for _, pointIdx := range disabledPoints {
if pointIdx == activePointIndex {
continue
}
for i := 0; i < nHorizontalEdges; i++ {
if qh.addPointToFace(&qh.mesh.faces[qh.newFaceIndices[i]], pointIdx) {
break
}
}
}
/* TODO: optimize
// The points are no longer needed: we can move them to the vector pool for reuse.
reclaimToIndexVectorPool(disabledPoints);
*/
}
// Increase Face stack size if needed
for _, newFaceIdx := range qh.newFaceIndices {
newFace := &qh.mesh.faces[newFaceIdx]
if newFace.pointsOnPositiveSide != nil {
assertTrue(len(newFace.pointsOnPositiveSide) > 0)
if !newFace.inFaceStack {
faceList = append(faceList, newFaceIdx)
newFace.inFaceStack = true
}
}
}
}
/* TODO: optimize
// Cleanup
m_indexVectorPool.clear();
*/
}
// Create a half edge mesh representing the base tetrahedron from which the QuickHull iteration proceeds. m_extremeValues must be properly set up when this is called.
func (qh *QuickHull) initialTetrahedron() meshBuilder {
nVertices := len(qh.vertexData)
// If we have at most 3 points, just return p1 degenerate tetrahedron:
if nVertices <= 3 {
v := [4]int{
0,
int(math.Min(1, float64(nVertices-1))),
int(math.Min(2, float64(nVertices-1))),
nVertices - 1,
}
n := triangleNormal(qh.vertexData[v[0]], qh.vertexData[v[1]], qh.vertexData[v[2]])
trianglePlane := newPlane(n, qh.vertexData[v[0]])
if trianglePlane.isPointOnPositiveSide(qh.vertexData[v[3]]) {
v[0], v[1] = v[1], v[0]
}
return newMeshBuilder(v[0], v[1], v[2], v[3])
}
// Find two most distant extreme points.
maxD := qh.epsilonSquared
var p1, p2 int
for i := 0; i < 6; i++ {
for j := i + 1; j < 6; j++ {
dv := qh.vertexData[qh.extremeValueIndices[i]].Sub(qh.vertexData[qh.extremeValueIndices[j]])
dSq := dv.X*dv.X + dv.Y*dv.Y + dv.Z*dv.Z
if dSq > maxD {
maxD = dSq
p1 = qh.extremeValueIndices[i]
p2 = qh.extremeValueIndices[j]
}
}
}
if maxD == qh.epsilonSquared {
// A degenerate case: the point cloud seems to consists of p1 single point
return newMeshBuilder(0, int(math.Min(1, float64(nVertices-1))), int(math.Min(2, float64(nVertices-1))), int(math.Min(3, float64(nVertices-1))))
}
assertTrue(p1 != p2)
// Find the most distant point to the line between the two chosen extreme points.
r := newRay(qh.vertexData[p1], qh.vertexData[p2].Sub(qh.vertexData[p1]))
maxD = qh.epsilonSquared
maxI := maxInt
for i, v := range qh.vertexData {
distToRay := squaredDistanceBetweenPointAndRay(v, r)
if distToRay > maxD {
maxD = distToRay
maxI = i
}
}
if maxD == qh.epsilonSquared {
// It appears that the point cloud belongs to a 1 dimensional subspace of R^3: convex hull has no volume => return a thin triangle
// Pick any point other than selectedPoints.first and selectedPoints.second as the third point of the triangle
var it r3.Vector
var p3 int
for i, v := range qh.vertexData {
if v != qh.vertexData[p1] && v != qh.vertexData[p2] {
it = v
p3 = i
}
}
if it == qh.vertexData[len(qh.vertexData)-1] {
p3 = p1
}
var p4 int
for i, v := range qh.vertexData {
if v != qh.vertexData[p1] && v != qh.vertexData[p2] && v != qh.vertexData[p3] {
it = v
p4 = i
}
}
if it == qh.vertexData[len(qh.vertexData)-1] {
p4 = p1
}
return newMeshBuilder(p1, p2, p3, p4)
}
// These three points form the base triangle for our tetrahedron.
assertTrue(p1 != maxI && p2 != maxI)
baseTriangle := [3]int{p1, p2, maxI}
baseTriangleVertices := [3]r3.Vector{qh.vertexData[baseTriangle[0]], qh.vertexData[baseTriangle[1]], qh.vertexData[baseTriangle[2]]}
// Next step is to find the 4th vertex of the tetrahedron.
// We naturally choose the point farthest away from the triangle plane.
maxD = qh.epsilon
maxI = 0
{
n := triangleNormal(baseTriangleVertices[0], baseTriangleVertices[1], baseTriangleVertices[2])
{
trianglePlane := newPlane(n, baseTriangleVertices[0])
for i := 0; i < nVertices; i++ {
d := math.Abs(signedDistanceToPlane(qh.vertexData[i], trianglePlane))
if d > maxD {
maxD = d
maxI = i
}
}
}
if maxD == qh.epsilon {
// All the points seem to lie on a 2D subspace of R^3. How to handle this?
// Well, let's add one extra point to the point cloud so that the convex hull will have volume.
qh.planar = true
n := triangleNormal(baseTriangleVertices[1], baseTriangleVertices[2], baseTriangleVertices[0])
qh.planarPointCloudTemp = qh.planarPointCloudTemp[:0]
qh.planarPointCloudTemp = append(qh.vertexData, qh.planarPointCloudTemp...)
extraPoint := n.Add(qh.vertexData[0])
qh.planarPointCloudTemp = append(qh.planarPointCloudTemp, extraPoint)
maxI = len(qh.planarPointCloudTemp) - 1
qh.vertexData = qh.planarPointCloudTemp
}
// Enforce CCW orientation (if user prefers clockwise orientation, swap two Vertices in each triangle when final mesh is created)
triPlane := newPlane(n, baseTriangleVertices[0])
if triPlane.isPointOnPositiveSide(qh.vertexData[maxI]) {
baseTriangle[0], baseTriangle[1] = baseTriangle[1], baseTriangle[0]
}
}
// Create a tetrahedron half edge mesh and compute planes defined by each triangle
mesh := newMeshBuilder(baseTriangle[0], baseTriangle[1], baseTriangle[2], maxI)
for i := range mesh.faces {
v := mesh.vertexIndicesOfFace(mesh.faces[i])
va := qh.vertexData[v[0]]
vb := qh.vertexData[v[1]]
vc := qh.vertexData[v[2]]
n := triangleNormal(va, vb, vc)
mesh.faces[i].plane = newPlane(n, va)
}
// Finally we assign a Face for each vertex outside the tetrahedron (Vertices inside the tetrahedron have no role anymore)
for i := 0; i < nVertices; i++ {
for j := range mesh.faces {
if qh.addPointToFace(&mesh.faces[j], i) {
break
}
}
}
return mesh
}
// Associates a point with a Face if the point resides on the positive side of the plane. Returns true if the points was on the positive side.
func (qh *QuickHull) addPointToFace(face *meshBuilderFace, pointIndex int) bool {
d := signedDistanceToPlane(qh.vertexData[pointIndex], face.plane)
if d > 0 && d*d > qh.epsilonSquared*face.plane.sqrNLength {
/* TODO: optimize
if Face.pointsOnPositiveSide == nil {
f.m_pointsOnPositiveSide = std::move(getIndexVectorFromPool());
}
*/
face.pointsOnPositiveSide = append(face.pointsOnPositiveSide, pointIndex)
if d > face.mostDistantPointDist {
face.mostDistantPointDist = d
face.mostDistantPoint = pointIndex
}
return true
}
return false
}
// Given a list of half edges, try to rearrange them so that they form a loop. Return true on success.
func (qh QuickHull) reorderHorizontalEdges(horizontalEdges []int) bool {
nEdges := len(horizontalEdges)
for i := 0; i < nEdges-1; i++ {
endVertex := qh.mesh.halfEdges[horizontalEdges[i]].EndVertex
var foundNext bool
for j := i + 1; j < nEdges; j++ {
beginVertex := qh.mesh.halfEdges[qh.mesh.halfEdges[horizontalEdges[j]].Opp].EndVertex
if beginVertex == endVertex {
horizontalEdges[i+1], horizontalEdges[j] = horizontalEdges[j], horizontalEdges[i+1]
foundNext = true
break
}
}
if !foundNext {
return false
}
}
return true
}
func assertTrue(b bool) {
if !b {
panic("Assertion failed")
}
} | quickhull.go | 0.724481 | 0.701866 | quickhull.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.