code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package pars
type seqParser struct {
parsers []Parser
}
//Seq returns a parser that matches all of its given parsers in order or none of them.
func Seq(parsers ...Parser) Parser {
return &seqParser{parsers: parsers}
}
func (s *seqParser) Parse(src *Reader) (interface{}, error) {
values := make([]interface{}, len(s.parsers))
for i, parser := range s.parsers {
val, err := parser.Parse(src)
if err != nil {
unreadParsers(s.parsers[:i], src)
return nil, seqError{index: i, innerError: err}
}
values[i] = val
}
return values, nil
}
func (s *seqParser) Unread(src *Reader) {
unreadParsers(s.parsers, src)
}
func (s *seqParser) Clone() Parser {
s2 := &seqParser{parsers: make([]Parser, len(s.parsers))}
for i, parser := range s.parsers {
s2.parsers[i] = parser.Clone()
}
return s2
}
type someParser struct {
prototype Parser
used []Parser
}
//Some returns a parser that matches a given parser zero or more times. Not matching at all is not an error.
func Some(parser Parser) Parser {
return &someParser{prototype: parser}
}
func (s *someParser) Parse(src *Reader) (interface{}, error) {
var values []interface{}
for {
next := s.prototype.Clone()
s.used = append(s.used, next)
nextVal, nextErr := next.Parse(src)
if nextErr != nil {
s.used = s.used[:len(s.used)-1]
break
}
values = append(values, nextVal)
}
return values, nil
}
func (s *someParser) Unread(src *Reader) {
unreadParsers(s.used, src)
s.used = nil
}
func (s *someParser) Clone() Parser {
return &someParser{prototype: s.prototype.Clone()}
}
//Many returns a parser that matches a given parser one or more times. Not matching at all is an error.
func Many(parser Parser) Parser {
return SplicingSeq(parser, Some(parser))
}
type orParser struct {
parsers []Parser
selected Parser
}
//Or returns a parser that matches the first of a given set of parsers. A later parser will not be tried if an earlier match was found.
//The returned parser uses the error message of the last parser verbatim.
func Or(parsers ...Parser) Parser {
return &orParser{parsers: parsers}
}
func (o *orParser) Parse(src *Reader) (val interface{}, err error) {
for _, parser := range o.parsers {
val, err = parser.Parse(src)
if err == nil {
o.selected = parser
return
}
}
return
}
func (o *orParser) Unread(src *Reader) {
if o.selected != nil {
o.selected.Unread(src)
o.selected = nil
}
}
func (o *orParser) Clone() Parser {
o2 := &orParser{parsers: make([]Parser, len(o.parsers))}
for i, parser := range o.parsers {
o2.parsers[i] = parser.Clone()
}
return o2
}
type exceptParser struct {
Parser
except Parser
}
//Except returns a parser that wraps another parser so that it fails if a third, excepted parser would succeed.
func Except(parser, except Parser) Parser {
return &exceptParser{Parser: parser, except: except}
}
func (e *exceptParser) Parse(src *Reader) (val interface{}, err error) {
_, err = e.except.Parse(src)
if err == nil {
e.except.Unread(src)
return nil, errExceptionMatched
}
val, err = e.Parser.Parse(src)
return
}
func (e *exceptParser) Clone() Parser {
return Except(e.Parser.Clone(), e.except.Clone())
}
type optionalParser struct {
read bool
Parser
}
//Optional returns a parser that reads exactly one result according to a given other parser. If it fails, the error is discarded and nil is returned.
func Optional(parser Parser) Parser {
return &optionalParser{Parser: parser}
}
func (o *optionalParser) Parse(src *Reader) (interface{}, error) {
val, err := o.Parser.Parse(src)
if err == nil {
o.read = true
return val, nil
}
return nil, nil
}
func (o *optionalParser) Unread(src *Reader) {
if o.read {
o.Parser.Unread(src)
o.read = false
}
}
func (o *optionalParser) Clone() Parser {
return &optionalParser{Parser: o.Parser.Clone()}
}
type discardLeftParser struct {
leftParser Parser
rightParser Parser
}
//DiscardLeft returns a parser that calls two other parsers but only returns the result of the second parser. Both parsers must succeed.
func DiscardLeft(left, right Parser) Parser {
return &discardLeftParser{leftParser: left, rightParser: right}
}
func (d *discardLeftParser) Parse(src *Reader) (interface{}, error) {
_, err := d.leftParser.Parse(src)
if err != nil {
return nil, err
}
val, err := d.rightParser.Parse(src)
if err != nil {
d.leftParser.Unread(src)
return nil, err
}
return val, err
}
func (d *discardLeftParser) Unread(src *Reader) {
d.rightParser.Unread(src)
d.leftParser.Unread(src)
}
func (d *discardLeftParser) Clone() Parser {
return DiscardLeft(d.leftParser.Clone(), d.rightParser.Clone())
}
type discardRightParser struct {
leftParser Parser
rightParser Parser
}
//DiscardRight returns a parser that calls two other parsers but only returns the result of the first parser. Both parsers must succeed.
func DiscardRight(left, right Parser) Parser {
return &discardRightParser{leftParser: left, rightParser: right}
}
func (d *discardRightParser) Parse(src *Reader) (interface{}, error) {
val, err := d.leftParser.Parse(src)
if err != nil {
return nil, err
}
_, err = d.rightParser.Parse(src)
if err != nil {
d.leftParser.Unread(src)
return nil, err
}
return val, nil
}
func (d *discardRightParser) Unread(src *Reader) {
d.rightParser.Unread(src)
d.leftParser.Unread(src)
}
func (d *discardRightParser) Clone() Parser {
return DiscardRight(d.leftParser.Clone(), d.rightParser.Clone())
}
//SplicingSeq returns a parser that works like a Seq but joins slices returned by its subparsers into a single slice.
func SplicingSeq(parsers ...Parser) Parser {
return Transformer(Seq(parsers...), splice)
}
func splice(val interface{}) (interface{}, error) {
results := val.([]interface{})
values := make([]interface{}, 0, len(results))
for _, result := range results {
if resultSlice, ok := result.([]interface{}); ok {
values = append(values, resultSlice...)
} else {
values = append(values, result)
}
}
return values, nil
}
//Sep returns a parser that parses a sequence of items according to a first parser that are separated by matches of a second parser.
func Sep(item, separator Parser) Parser {
return SplicingSeq(item, Some(DiscardLeft(separator, item)))
}
type recursiveParser struct {
parser Parser
factory func() Parser
}
//Recursive allows to recursively define a parser in terms of itself.
func Recursive(factory func() Parser) Parser {
return &recursiveParser{factory: factory}
}
func (r *recursiveParser) Parse(src *Reader) (interface{}, error) {
r.parser = r.factory()
val, err := r.parser.Parse(src)
if err != nil {
r.parser.Unread(src)
return nil, err
}
return val, nil
}
func (r *recursiveParser) Unread(src *Reader) {
if r.parser != nil {
r.parser.Unread(src)
r.parser = nil
}
}
func (r *recursiveParser) Clone() Parser {
return Recursive(r.factory)
} | combinators.go | 0.882915 | 0.449091 | combinators.go | starcoder |
package govote
import (
"math/rand"
"sort"
"time"
)
// returns lesser value of a and b
func min(a, b int) (r int) {
if a < b {
r = a
} else {
r = b
}
return
}
// returns greater value of a and b
func max(a, b int) (r int) {
if a > b {
r = a
} else {
r = b
}
return
}
// CPair represents two candidates' indices for pairwise comparison
type CPair struct{ A, B int }
// CScore represents a candidate and the candidate's score
type CScore struct {
Name string
Score int
}
// sortScoresAsc sorts a string-key/int-value map in ascending order,
// by value returning a slice of CScores
func sortScoresAsc(m map[string]int) (res []CScore) {
vs := newValSorter(m)
vs.sortAsc()
for i := 0; i < len(vs.keys); i++ {
res = append(res, CScore{vs.keys[i], vs.vals[i]})
}
return
}
// sortScoresDesc sorts a string-key/int-value map in descending order,
// by value returning a slice of CScores
func sortScoresDesc(m map[string]int) (res []CScore) {
vs := newValSorter(m)
vs.sortDesc()
for i := 0; i < len(vs.keys); i++ {
res = append(res, CScore{vs.keys[i], vs.vals[i]})
}
return
}
type valSorter struct {
keys []string
vals []int
}
func newValSorter(m map[string]int) *valSorter {
vs := &valSorter{
keys: make([]string, 0, len(m)),
vals: make([]int, 0, len(m)),
}
for k, v := range m {
vs.keys = append(vs.keys, k)
vs.vals = append(vs.vals, v)
}
return vs
}
func (vs *valSorter) sortAsc() {
sort.Sort(vs)
}
func (vs *valSorter) sortDesc() {
sort.Sort(sort.Reverse(vs))
}
func (vs *valSorter) Len() int { return len(vs.vals) }
func (vs *valSorter) Less(i, j int) bool { return vs.vals[i] < vs.vals[j] }
func (vs *valSorter) Swap(i, j int) {
vs.vals[i], vs.vals[j] = vs.vals[j], vs.vals[i]
vs.keys[i], vs.keys[j] = vs.keys[j], vs.keys[i]
}
const maxUint = ^uint(0)
const minUint = 0
const maxInt = int(maxUint >> 1)
const minInt = -maxInt - 1
// Returns integer between 0 and n
func randIntn(n int) int {
if n == 0 {
return 0
}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
return int(r.Intn(n))
}
// Removes duplicates from string slice
func removeDuplicates(xs *[]string) {
found := make(map[string]bool)
j := 0
for i, x := range *xs {
if !found[x] {
found[x] = true
(*xs)[j] = (*xs)[i]
j++
}
}
*xs = (*xs)[:j]
} | util.go | 0.732113 | 0.403244 | util.go | starcoder |
package z85
import (
"encoding/binary"
"errors"
"fmt"
)
// ErrLength results from encoding or decoding wrongly aligned input data.
var ErrLength = errors.New("z85: wrongly aligned input data")
// InvalidByteError values describe errors resulting from an invalid byte in a z85 encoded data.
type InvalidByteError byte
func (e InvalidByteError) Error() string {
return fmt.Sprintf("z85: invalid input byte: %#U", rune(e))
}
const (
minDigit = '!'
maxDigit = '}'
)
var (
digits = []byte(
"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ.-:+=^!/*?&<>()[]{}@%$#")
decodeLookup = [maxDigit - minDigit + 1]byte{}
)
func init() {
for i, d := range digits {
decodeLookup[d-minDigit] = byte(i + 1) // +1 to use 0 as an invalid byte marker
}
}
// Decode decodes z85 encoded src into DecodedLen(len(src)) bytes of dst, returning the
// number of bytes written to dst, always DecodedLen(len(src)).
// The len(src) must be divisible by 5, otherwise an ErrLength is returned.
// If Decode encounters invalid input bytes, it returns an InvalidByteError.
func Decode(dst, src []byte) (n int, err error) {
if len(src)%5 != 0 {
return 0, ErrLength
}
n = DecodedLen(len(src))
for len(src) > 0 {
var v uint32
for i := 0; i < 5; i++ {
digit := src[i]
if digit < minDigit || digit > maxDigit {
return 0, InvalidByteError(digit)
}
m := uint32(decodeLookup[digit-minDigit])
if m == 0 {
return 0, InvalidByteError(digit)
}
v = v*85 + (m - 1) // -1 readjust due to invalid byte marker
}
binary.BigEndian.PutUint32(dst, v)
src = src[5:]
dst = dst[4:]
}
return
}
// DecodedLen returns the length in bytes of the decoded data corresponding to n bytes of
// z85-encoded data.
func DecodedLen(n int) int {
return n * 4 / 5
}
// Encode encodes src into EncodedLen(len(src)) bytes of dst using z85 encoding, returning the
// number of bytes written to dst, always EncodedLen(len(src)).
// The len(src) must be divisible by 4, otherwise an ErrLength is returned.
func Encode(dst, src []byte) (n int, err error) {
if len(src)%4 != 0 {
return 0, ErrLength
}
n = EncodedLen(len(src))
for len(src) > 0 {
v := binary.BigEndian.Uint32(src)
for i := 4; i >= 0; i-- {
dst[i] = digits[v%85]
v /= 85
}
src = src[4:]
dst = dst[5:]
}
return
}
// EncodedLen returns the length in bytes of the z85 encoding of an input buffer of length n.
func EncodedLen(n int) int {
return n * 5 / 4
} | vendor/github.com/tilinna/z85/z85.go | 0.72662 | 0.432842 | z85.go | starcoder |
package mystrings
import (
"strings"
)
// Write a function that reverses a string. The input string is given as an array of characters s.
// You must do this by modifying the input array in-place with O(1) extra memory.
// Time complexity : O(N) to swap N/2 element.
// Space complexity : O(1), it's a constant space solution.
func ReverseString(s []byte) []byte {
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
s[i], s[j] = s[j], s[i]
}
return s
}
// Given a string s, reverse the order of characters in each word within a sentence while still preserving whitespace
// and initial word order.
// Time complexity : O(n). where n is the length of the string.
// Space complexity : O(n). resresres of size n is used.
func ReverseWords(s string) string {
r := []string{}
for _, w := range strings.Split(s, " ") {
r = append(r, reverse(w))
}
return strings.Join(r, " ")
}
func reverse(s string) string {
r := []rune(s)
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
r[i], r[j] = r[j], r[i]
}
return string(r)
}
// Given a string s, find the length of the longest substring without repeating characters.
// Time complexity : O(n)
// Space complexity (HashMap) : O(min(m,n)).
func LengthOfLongestSubstring(s string) int {
m := map[byte]int{}
result := 0
for l, r := 0, 0; r < len(s); r++ {
if index, ok := m[s[r]]; ok {
l = max(l, index)
}
result = max(result, r-l+1)
m[s[r]] = r + 1
}
return result
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
// Given two strings s1 and s2, return true if s2 contains a permutation of s1, or false otherwise.
// In other words, return true if one of s1's permutations is the substring of s2.
// Time complexity: O(l1+26*l1*(l2−l1)). hashmap contains atmost 26 keys(number of lowercase letters). where l1 is the length of string s1 and l2 is the length of string s2.
// Space complexity: O(1). Hashmaps contains at most 26 key-value pairs.
func CheckInclusion(s1 string, s2 string) bool {
m1 := map[byte]int{}
m2 := map[byte]int{}
for i := 0; i < len(s1); i++ {
m1[s1[i]]++
m2[s2[i]]++
}
for i := 0; i < len(s2)-len(s1); i++ {
if match(m1, m2) {
return true
}
m2[s2[i+len(s1)]]++
m2[s2[i]]--
}
return match(m1, m2)
}
func match(m1, m2 map[byte]int) bool {
for k, v := range m1 {
if m2[k] != v {
return false
}
}
return true
} | algorithmsI/mystrings/mystrings.go | 0.779238 | 0.504211 | mystrings.go | starcoder |
package graph
import (
"container/list"
"errors"
"sort"
)
// Vertex is a vertex for DAG.
type Vertex struct {
Value interface{}
}
// DAG is a Directed Acyclic Graph implemented with an adjacency list.
type DAG struct {
Root *Vertex
Adjacency map[*Vertex]*list.List
}
// NewDAG constructs a new graph with a single root vertex.
func NewDAG(v interface{}) *DAG {
root := &Vertex{Value: v}
return &DAG{
Root: root,
Adjacency: map[*Vertex]*list.List{root: list.New()},
}
}
// Add inserts a vertex holding a value v in the graph.
func (g *DAG) Add(v interface{}) *Vertex {
vert := &Vertex{Value: v}
g.Adjacency[vert] = list.New()
return vert
}
// Edges returns the outgoing edges for v.
func (g *DAG) Edges(v *Vertex) []*Vertex {
vs := make([]*Vertex, 0, g.Adjacency[v].Len())
g.walk(v, func(v *Vertex) error {
vs = append(vs, v)
return nil
})
return vs
}
// Get returns the vertex for the corresponding value.
func (g *DAG) Get(v interface{}) (*Vertex, bool) {
for vert := range g.Adjacency {
if v == vert.Value {
return vert, true
}
}
return nil, false
}
// AddEdge inserts an directed edge between two vertecies.
func (g *DAG) AddEdge(to, from *Vertex) error {
if _, ok := g.Adjacency[to]; !ok {
return errors.New("to vertex not found")
}
l, ok := g.Adjacency[from]
if !ok {
return errors.New("from vertex not found")
}
l.PushBack(to)
return g.cyclicCheck()
}
func (g *DAG) cyclicCheck() error {
visited := make(map[*Vertex]bool, len(g.Adjacency))
visiting := make(map[*Vertex]bool, len(g.Adjacency))
var walker WalkFunc
walker = func(v *Vertex) error {
if visited[v] {
return nil
}
if visiting[v] && !visited[v] {
return errors.New("cycle detected")
}
visiting[v] = true
if err := g.walk(v, walker); err != nil {
return err
}
visited[v] = true
return nil
}
return walker(g.Root)
}
// WalkFunc is a common func for all graph walking methods.
type WalkFunc func(*Vertex) error
func (g *DAG) walk(v *Vertex, fn WalkFunc) error {
if edges := g.Adjacency[v]; edges != nil {
for e := edges.Front(); e != nil; e = e.Next() {
if err := fn(e.Value.(*Vertex)); err != nil {
return err
}
}
}
return nil
}
type vertDepth struct {
vert *Vertex
depth int
}
type byDepth []vertDepth
func (d byDepth) Len() int { return len(d) }
func (d byDepth) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d byDepth) Less(i, j int) bool { return d[i].depth > d[j].depth } // sort by greater depth
func (g *DAG) walkByDepth(v *Vertex, fn WalkFunc) error {
edges := g.Adjacency[v]
if edges == nil {
return nil
}
vbd := byDepth{}
for e := edges.Front(); e != nil; e = e.Next() {
vert := e.Value.(*Vertex)
vbd = append(vbd, vertDepth{vert, g.depth(vert)})
}
sort.Sort(vbd)
for _, vd := range vbd {
if err := fn(vd.vert); err != nil {
return err
}
}
return nil
}
func (g *DAG) depth(v *Vertex) int {
edges := g.Adjacency[v]
if edges == nil {
return 0
}
max := 0
for e := edges.Front(); e != nil; e = e.Next() {
if depth := g.depth(e.Value.(*Vertex)); depth > max {
max = depth
}
}
return 1 + max
}
// BFS walks the graph in breadth-first order.
func (g *DAG) BFS(fn WalkFunc) error {
l := list.New()
l.PushBack(g.Root)
visited := map[*Vertex]bool{}
for e := l.Front(); e != nil; e = e.Next() {
v := e.Value.(*Vertex)
if visited[v] {
continue
}
if err := fn(v); err != nil {
return err
}
visited[v] = true
l.PushBackList(g.Adjacency[v])
}
return nil
}
// DFS walks the graph in depth-first order.
func (g *DAG) DFS(fn WalkFunc) error {
return g.dfs(g.Root, make(map[*Vertex]bool, len(g.Adjacency)), fn)
}
func (g *DAG) dfs(v *Vertex, visited map[*Vertex]bool, fn WalkFunc) error {
if visited[v] {
return nil
}
if err := fn(v); err != nil {
return err
}
visited[v] = true
walker := func(v *Vertex) error {
if visited[v] {
return nil
}
return g.dfs(v, visited, fn)
}
return g.walk(v, walker)
}
// ReverseDFS walks the graph in reverse depth-first order.
func (g *DAG) ReverseDFS(fn WalkFunc) error {
return g.rdfs(g.Root, make(map[*Vertex]bool, len(g.Adjacency)), fn)
}
func (g *DAG) rdfs(v *Vertex, visited map[*Vertex]bool, fn WalkFunc) error {
visited[v] = true
walker := func(v *Vertex) error {
if visited[v] {
return nil
}
return g.rdfs(v, visited, fn)
}
if err := g.walkByDepth(v, walker); err != nil {
return err
}
return fn(v)
} | graph/dag.go | 0.791378 | 0.439447 | dag.go | starcoder |
package Challenge2_Structurally_Unique_Binary_Search_Trees
/*
Given a number ‘n’, write a function to return all structurally unique Binary Search Trees (BST) that can store values 1 to ‘n’?
Input: 2
Output: List containing root nodes of all structurally unique BSTs.
Explanation: Here are the 2 structurally unique BSTs storing all numbers from 1 to 2:
Input: 3
Output: List containing root nodes of all structurally unique BSTs.
Explanation: Here are the 5 structurally unique BSTs storing all numbers from 1 to 3:
ref: https://leetcode-cn.com/problems/unique-binary-search-trees/
ref: https://leetcode-cn.com/problems/unique-binary-search-trees-ii/
*/
type TreeNode struct {
Val int
Left, Right *TreeNode
}
// https://leetcode-cn.com/problems/unique-binary-search-trees-ii/
func generateTrees(n int) []*TreeNode {
var bfs func(int, int) []*TreeNode
bfs = func(start, end int) []*TreeNode {
if start == end {
return []*TreeNode{{start, nil, nil}}
}
var res []*TreeNode
for i := start; i <= end; i++ {
if i == start {
right := bfs(i+1, end)
for _, v := range right {
root := &TreeNode{i, nil, nil}
root.Right = v
res = append(res, root)
}
continue
}
if i == end {
left := bfs(start, i-1)
for _, v := range left {
root := &TreeNode{i, nil, nil}
root.Left = v
res = append(res, root)
}
continue
}
left := bfs(start, i-1)
right := bfs(i+1, end)
for _, v1 := range left {
for _, v2 := range right {
root := &TreeNode{i, nil, nil}
root.Left = v1
root.Right = v2
res = append(res, root)
}
}
}
return res
}
return bfs(1, n)
}
// https://leetcode-cn.com/problems/unique-binary-search-trees/
func numTrees(n int) int {
var bfs func(int, int) int
var m = make(map[Range]int)
bfs = func(start, end int) int {
var res = 0
if start == end {
return 1
}
if v, ok := m[Range{start, end}]; ok {
return v
}
for i := start; i <= end; i++ {
if i == start {
res += bfs(i+1, end)
continue
}
if i == end {
res += bfs(start, i-1)
continue
}
left := bfs(start, i-1)
right := bfs(i+1, end)
res += left * right
}
m[Range{start, end}] = res
return res
}
return bfs(1, n)
}
type Range struct {
start int
end int
} | Pattern10 - Subsets/Challenge2-Structurally_Unique_Binary_Search_Trees/solution.go | 0.880013 | 0.525125 | solution.go | starcoder |
package fp
import ()
// FilterString return the values which are matched
func FilterString(f func(string, int) bool, input []string) (output []string) {
output = make([]string, 0)
for idx, data := range input {
if f(data, idx) {
output = append(output, data)
}
}
return
}
// FilterInt return the values which are matched
func FilterInt(f func(int, int) bool, input []int) (output []int) {
output = make([]int, 0)
for idx, data := range input {
if f(data, idx) {
output = append(output, data)
}
}
return
}
// FilterInt8 return the values which are matched
func FilterInt8(f func(int8, int) bool, input []int8) (output []int8) {
output = make([]int8, 0)
for idx, data := range input {
if f(data, idx) {
output = append(output, data)
}
}
return
}
// FilterInt16 return the values which are matched
func FilterInt16(f func(int16, int) bool, input []int16) (output []int16) {
output = make([]int16, 0)
for idx, data := range input {
if f(data, idx) {
output = append(output, data)
}
}
return
}
// FilterInt32 return the values which are matched
func FilterInt32(f func(int32, int) bool, input []int32) (output []int32) {
output = make([]int32, 0)
for idx, data := range input {
if f(data, idx) {
output = append(output, data)
}
}
return
}
// FilterInt64 return the values which are matched
func FilterInt64(f func(int64, int) bool, input []int64) (output []int64) {
output = make([]int64, 0)
for idx, data := range input {
if f(data, idx) {
output = append(output, data)
}
}
return
}
// FilterUint8 return the values which are matched
func FilterUint8(f func(uint8, int) bool, input []uint8) (output []uint8) {
output = make([]uint8, 0)
for idx, data := range input {
if f(data, idx) {
output = append(output, data)
}
}
return
}
// FilterUint16 return the values which are matched
func FilterUint16(f func(uint16, int) bool, input []uint16) (output []uint16) {
output = make([]uint16, 0)
for idx, data := range input {
if f(data, idx) {
output = append(output, data)
}
}
return
}
// FilterUint32 return the values which are matched
func FilterUint32(f func(uint32, int) bool, input []uint32) (output []uint32) {
output = make([]uint32, 0)
for idx, data := range input {
if f(data, idx) {
output = append(output, data)
}
}
return
}
// FilterUint64 return the values which are matched
func FilterUint64(f func(uint64, int) bool, input []uint64) (output []uint64) {
output = make([]uint64, 0)
for idx, data := range input {
if f(data, idx) {
output = append(output, data)
}
}
return
}
// FilterFloat32 return the values which are matched
func FilterFloat32(f func(float32, int) bool, input []float32) (output []float32) {
output = make([]float32, 0)
for idx, data := range input {
if f(data, idx) {
output = append(output, data)
}
}
return
}
// FilterFloat64 return the values which are matched
func FilterFloat64(f func(float64, int) bool, input []float64) (output []float64) {
output = make([]float64, 0)
for idx, data := range input {
if f(data, idx) {
output = append(output, data)
}
}
return
}
// FilterByte return the values which are matched
func FilterByte(f func(byte, int) bool, input []byte) (output []byte) {
output = make([]byte, 0)
for idx, data := range input {
if f(data, idx) {
output = append(output, data)
}
}
return
}
// Filter return the values which are matched
func Filter(f func(any, int) bool, input []any) (output []any) {
output = make([]any, 0)
for idx, data := range input {
if f(data, idx) {
output = append(output, data)
}
}
return
} | functional/filter.go | 0.579757 | 0.449936 | filter.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// ApprovalSettings
type ApprovalSettings struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// One of NoApproval, SingleStage or Serial. The NoApproval is used when isApprovalRequired is false.
approvalMode *string
// If approval is required, the one or two elements of this collection define each of the stages of approval. An empty array if no approval is required.
approvalStages []ApprovalStageable
// If false, then approval is not required for requests in this policy.
isApprovalRequired *bool
// If false, then approval is not required for a user who already has an assignment to extend their assignment.
isApprovalRequiredForExtension *bool
// Indicates whether the requestor is required to supply a justification in their request.
isRequestorJustificationRequired *bool
}
// NewApprovalSettings instantiates a new approvalSettings and sets the default values.
func NewApprovalSettings()(*ApprovalSettings) {
m := &ApprovalSettings{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreateApprovalSettingsFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateApprovalSettingsFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewApprovalSettings(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *ApprovalSettings) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetApprovalMode gets the approvalMode property value. One of NoApproval, SingleStage or Serial. The NoApproval is used when isApprovalRequired is false.
func (m *ApprovalSettings) GetApprovalMode()(*string) {
if m == nil {
return nil
} else {
return m.approvalMode
}
}
// GetApprovalStages gets the approvalStages property value. If approval is required, the one or two elements of this collection define each of the stages of approval. An empty array if no approval is required.
func (m *ApprovalSettings) GetApprovalStages()([]ApprovalStageable) {
if m == nil {
return nil
} else {
return m.approvalStages
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *ApprovalSettings) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["approvalMode"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetApprovalMode(val)
}
return nil
}
res["approvalStages"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateApprovalStageFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]ApprovalStageable, len(val))
for i, v := range val {
res[i] = v.(ApprovalStageable)
}
m.SetApprovalStages(res)
}
return nil
}
res["isApprovalRequired"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetIsApprovalRequired(val)
}
return nil
}
res["isApprovalRequiredForExtension"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetIsApprovalRequiredForExtension(val)
}
return nil
}
res["isRequestorJustificationRequired"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetIsRequestorJustificationRequired(val)
}
return nil
}
return res
}
// GetIsApprovalRequired gets the isApprovalRequired property value. If false, then approval is not required for requests in this policy.
func (m *ApprovalSettings) GetIsApprovalRequired()(*bool) {
if m == nil {
return nil
} else {
return m.isApprovalRequired
}
}
// GetIsApprovalRequiredForExtension gets the isApprovalRequiredForExtension property value. If false, then approval is not required for a user who already has an assignment to extend their assignment.
func (m *ApprovalSettings) GetIsApprovalRequiredForExtension()(*bool) {
if m == nil {
return nil
} else {
return m.isApprovalRequiredForExtension
}
}
// GetIsRequestorJustificationRequired gets the isRequestorJustificationRequired property value. Indicates whether the requestor is required to supply a justification in their request.
func (m *ApprovalSettings) GetIsRequestorJustificationRequired()(*bool) {
if m == nil {
return nil
} else {
return m.isRequestorJustificationRequired
}
}
// Serialize serializes information the current object
func (m *ApprovalSettings) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteStringValue("approvalMode", m.GetApprovalMode())
if err != nil {
return err
}
}
if m.GetApprovalStages() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetApprovalStages()))
for i, v := range m.GetApprovalStages() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err := writer.WriteCollectionOfObjectValues("approvalStages", cast)
if err != nil {
return err
}
}
{
err := writer.WriteBoolValue("isApprovalRequired", m.GetIsApprovalRequired())
if err != nil {
return err
}
}
{
err := writer.WriteBoolValue("isApprovalRequiredForExtension", m.GetIsApprovalRequiredForExtension())
if err != nil {
return err
}
}
{
err := writer.WriteBoolValue("isRequestorJustificationRequired", m.GetIsRequestorJustificationRequired())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *ApprovalSettings) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetApprovalMode sets the approvalMode property value. One of NoApproval, SingleStage or Serial. The NoApproval is used when isApprovalRequired is false.
func (m *ApprovalSettings) SetApprovalMode(value *string)() {
if m != nil {
m.approvalMode = value
}
}
// SetApprovalStages sets the approvalStages property value. If approval is required, the one or two elements of this collection define each of the stages of approval. An empty array if no approval is required.
func (m *ApprovalSettings) SetApprovalStages(value []ApprovalStageable)() {
if m != nil {
m.approvalStages = value
}
}
// SetIsApprovalRequired sets the isApprovalRequired property value. If false, then approval is not required for requests in this policy.
func (m *ApprovalSettings) SetIsApprovalRequired(value *bool)() {
if m != nil {
m.isApprovalRequired = value
}
}
// SetIsApprovalRequiredForExtension sets the isApprovalRequiredForExtension property value. If false, then approval is not required for a user who already has an assignment to extend their assignment.
func (m *ApprovalSettings) SetIsApprovalRequiredForExtension(value *bool)() {
if m != nil {
m.isApprovalRequiredForExtension = value
}
}
// SetIsRequestorJustificationRequired sets the isRequestorJustificationRequired property value. Indicates whether the requestor is required to supply a justification in their request.
func (m *ApprovalSettings) SetIsRequestorJustificationRequired(value *bool)() {
if m != nil {
m.isRequestorJustificationRequired = value
}
} | models/approval_settings.go | 0.582135 | 0.413951 | approval_settings.go | starcoder |
package palette
import "image/color"
// blendRGBA returns the interpolation between two sRGB colors with
// pre-multiplied alpha.
func blendRGBA(a, b color.RGBA, x float64) color.RGBA {
const linThresh = 5
diff8 := func(a, b uint8) uint8 {
if a < b {
return b - a
}
return a - b
}
if a.A == 255 && b.A == 255 && diff8(a.R, b.R) <= linThresh && diff8(a.G, b.G) <= linThresh && diff8(a.B, b.B) <= linThresh {
// Perform a quick linear interpolation.
blend8 := func(a, b uint8, x float64) uint8 {
c := float64(a)*(1-x) + float64(b)*x
if c <= 0 {
return 0
} else if c >= 255 {
return 255
}
return uint8(c)
}
return color.RGBA{
blend8(a.R, b.R, x),
blend8(a.G, b.G, x),
blend8(a.B, b.B, x),
255,
}
}
blend := func(a, b uint8, x float64, lim uint8) uint8 {
// Map to linear RGB, blend in linear RGB, and map
// back to sRGB.
al, bl := sRGB8ToLinear(a), sRGB8ToLinear(b)
cl := float64(al)*(1-x) + float64(bl)*x
if cl < 0 {
return 0
} else if cl >= 1<<16-1 {
return 255
}
out := linearTosRGB8(uint16(cl))
if out > lim {
out = lim
}
return out
}
linear := func(a, b uint8, x float64) uint8 {
c := int(float64(a)*(1-x) + float64(b)*x)
if c <= 0 {
return 0
} else if c >= 255 {
return 255
}
return uint8(c)
}
if a.A == b.A {
// No need to undo the alpha pre-multiplication.
return color.RGBA{
blend(a.R, b.R, x, a.A),
blend(a.G, b.G, x, a.A),
blend(a.B, b.B, x, a.A),
a.A,
}
}
// Un-premultiply the alpha, map to linear RGB, blend in
// linear RGB, map back to sRGB, and re-premultiply the alpha.
if a.A == 0 {
return color.RGBA{b.R, b.G, b.B, linear(a.A, b.A, x)}
} else if b.A == 0 {
return color.RGBA{a.R, a.G, a.B, linear(a.A, b.A, x)}
}
// TODO: This loses precision. Maybe use 16 bit sRGB?
a.R = uint8(uint16(a.R) * 255 / uint16(a.A))
a.G = uint8(uint16(a.G) * 255 / uint16(a.A))
a.B = uint8(uint16(a.B) * 255 / uint16(a.A))
b.R = uint8(uint16(b.R) * 255 / uint16(b.A))
b.G = uint8(uint16(b.G) * 255 / uint16(b.A))
b.B = uint8(uint16(b.B) * 255 / uint16(b.A))
c := color.RGBA{
blend(a.R, b.R, x, 255),
blend(a.G, b.G, x, 255),
blend(a.B, b.B, x, 255),
linear(a.A, b.A, x),
}
c.R = uint8(uint16(c.R) * uint16(c.A) / 255)
c.G = uint8(uint16(c.G) * uint16(c.A) / 255)
c.B = uint8(uint16(c.B) * uint16(c.A) / 255)
return c
} | benchplot/vendor/github.com/aclements/go-gg/palette/blend.go | 0.715126 | 0.480296 | blend.go | starcoder |
package utility
import (
"math/rand"
"time"
)
// ZeroTime represents 0 in epoch time
var ZeroTime time.Time = time.Unix(0, 0)
// MaxTime represents the latest useful golang date (219248499-12-06 15:30:07.999999999 +0000 UTC)
var MaxTime time.Time = time.Unix(1<<63-62135596801, 999999999)
// IsZeroTime checks that a time is either equal to golang ZeroTime or
// UTC ZeroTime.
func IsZeroTime(t time.Time) bool {
return t.Equal(ZeroTime) || t.IsZero()
}
// fromNanoSeconds returns milliseconds of a duration for queries in the database.
func FromNanoseconds(duration time.Duration) int64 {
return int64(duration) / 1000000
}
// fromNanoSeconds returns milliseconds of a duration for queries in the database.
func ToNanoseconds(duration time.Duration) time.Duration {
return duration * 1000000
}
// FromPythonTime returns a time.Time that corresponds to the float style
// python time which is <seconds>.<fractional_seconds> from unix epoch.
func FromPythonTime(pyTime float64) time.Time {
sec := int64(pyTime)
toNano := int64(1000000000)
asNano := int64(pyTime * float64(toNano))
nano := asNano % toNano
res := time.Unix(sec, nano)
return res
}
// ToPythonTime returns a number in the format that python's time.time() returns
// as a float with <seconds>.<fractional_seconds>
func ToPythonTime(t time.Time) float64 {
if IsZeroTime(t) {
return float64(0)
}
timeAsInt64 := t.UnixNano()
fromNano := float64(1000000000)
res := float64(timeAsInt64) / fromNano
return res
}
// JitterInterval returns a duration that some value between the
// interval and 2x the interval.
func JitterInterval(interval time.Duration) time.Duration {
return time.Duration(rand.Float64()*float64(interval)) + interval
}
// RoundPartOfDay produces a time value with the hour value
// rounded down to the most recent interval.
func RoundPartOfDay(n int) time.Time { return findPartHour(time.Now(), n) }
// RoundPartOfHour produces a time value with the minute value
// rounded down to the most recent interval.
func RoundPartOfHour(n int) time.Time { return findPartMin(time.Now(), n) }
// RoundPartOfMinute produces a time value with the second value
// rounded down to the most recent interval.
func RoundPartOfMinute(n int) time.Time { return findPartSec(time.Now(), n) }
// this implements the logic of RoundPartOfDay, but takes time as an
// argument for testability.
func findPartHour(now time.Time, num int) time.Time {
var hour int
if num > now.Hour() || num > 12 || num <= 0 {
hour = 0
} else {
hour = now.Hour() - (now.Hour() % num)
}
return time.Date(now.Year(), now.Month(), now.Day(), hour, 0, 0, 0, time.UTC)
}
// this implements the logic of RoundPartOfHour, but takes time as an
// argument for testability.
func findPartMin(now time.Time, num int) time.Time {
var min int
if num > now.Minute() || num > 30 || num <= 0 {
min = 0
} else {
min = now.Minute() - (now.Minute() % num)
}
return time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), min, 0, 0, time.UTC)
}
// this implements the logic of RoundPartOfMinute, but takes time as an
// argument for testability.
func findPartSec(now time.Time, num int) time.Time {
var sec int
if num > now.Second() || num > 30 || num <= 0 {
sec = 0
} else {
sec = now.Second() - (now.Second() % num)
}
return time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), now.Minute(), sec, 0, time.UTC)
}
// Creates and returns a time.Time corresponding to the start of the UTC day containing the given date.
func GetUTCDay(date time.Time) time.Time {
// Convert to UTC.
date = date.In(time.UTC)
// Create a new time.Time for the beginning of the day.
year, month, day := date.Date()
return time.Date(year, month, day, 0, 0, 0, 0, time.UTC)
}
// Creates and returns a time.Time corresponding to the start of the UTC hour containing the given date.
func GetUTCHour(date time.Time) time.Time {
// Convert to UTC.
date = date.In(time.UTC)
// Create a new time.Time for the beginning of the hour.
year, month, day := date.Date()
hour := date.Hour()
return time.Date(year, month, day, hour, 0, 0, 0, time.UTC)
} | vendor/github.com/evergreen-ci/timber/vendor/github.com/evergreen-ci/utility/time.go | 0.831451 | 0.546557 | time.go | starcoder |
package eval
import (
"errors"
"math"
// "fmt"
"expression-parsing/ast"
"expression-parsing/token"
)
var variables = make(map[string]float64)
func IsIntegral(value float64) bool {
return value == float64(int64(value))
}
func Evaluate(expr ast.Expression) (float64, error) {
if expr == nil {
return 0, errors.New("Undefined behaviour.")
}
switch expr := expr.(type) {
case *ast.Literal:
return expr.Value, nil
case *ast.Identifier:
value, ok := variables[expr.Token.Lexeme]
if !ok {
return 0, errors.New(expr.Token.Lexeme + " not defined.")
}
return value, nil
case *ast.Grouped:
return Evaluate(expr.Group)
case *ast.Prefix:
return EvaluatePrefix(expr)
case *ast.Infix:
return EvaluateInfix(expr)
case *ast.Assignment:
name := expr.Token.Lexeme
value, err := Evaluate(expr.Value)
if err != nil {
return 0, err
}
variables[name] = value
return value, nil
default:
return 0, nil
}
}
func EvaluatePrefix(expr *ast.Prefix) (float64, error) {
right, err := Evaluate(expr.Right)
if err != nil {
return 0, err
}
if expr.Op.Type == token.SUB {
return -right, nil
}
if expr.Op.Type == token.LNOT {
if right != 0 {
return 0, nil
}
return 1, nil
}
if IsIntegral(right) {
return float64(^int64(right)), nil
} else {
return 0, errors.New("Not operand must be integral.")
}
}
func EvaluateInfix(expr *ast.Infix) (float64, error) {
left, errLeft := Evaluate(expr.Left)
if errLeft != nil {
return 0, errLeft
}
right, errRight := Evaluate(expr.Right)
if errRight != nil {
return 0, errRight
}
switch expr.Op.Type {
case token.ADD:
return left + right, nil
case token.SUB:
return left - right, nil
case token.MUL:
return left * right, nil
case token.DIV:
if right != 0 {
return left / right, nil
}
return 0, errors.New("Division by zero.")
case token.MOD:
if IsIntegral(left) && IsIntegral(right) {
if right != 0 {
return float64(int64(left) % int64(right)), nil
}
return 0, errors.New("Division by zero.")
}
return 0, errors.New("Modulus operand must be integral.")
case token.EXP:
return math.Pow(left, right), nil
case token.OR:
if IsIntegral(left) && IsIntegral(right) {
return float64(int64(left) | int64(right)), nil
}
return 0, errors.New("Or operand must be integral.")
case token.XOR:
if IsIntegral(left) && IsIntegral(right) {
return float64(int64(left) ^ int64(right)), nil
}
return 0, errors.New("Xor operand must be integral.")
case token.AND:
if IsIntegral(left) && IsIntegral(right) {
return float64(int64(left) & int64(right)), nil
}
return 0, errors.New("And operand must be integral.")
case token.LEFT:
if IsIntegral(left) && IsIntegral(right) {
if right >= 0 {
return float64(int64(left) << uint64(right)), nil
}
return 0, errors.New("Left shift operand must be integral. Shift value must be unsigned.")
}
return 0, errors.New("Left shift operand must be integral. Shift value must be unsigned.")
case token.RIGHT:
if IsIntegral(left) && IsIntegral(right) {
if right >= 0 {
return float64(int64(left) >> uint64(right)), nil
}
return 0, errors.New("Right shift operand must be integral. Shift value must be unsigned.")
}
return 0, errors.New("Right shift operand must be integral. Shift value must be unsigned.")
case token.LOR:
if left != 0 || right != 0 {
return 1, nil
}
return 0, nil
case token.LAND:
if left != 0 && right != 0 {
return 1, nil
}
return 0, nil
case token.EQEQ:
if left == right {
return 1, nil
}
return 0, nil
case token.NEQ:
if left != right {
return 1, nil
}
return 0, nil
case token.GT:
if left > right {
return 1, nil
}
return 0, nil
case token.GTEQ:
if left >= right {
return 1, nil
}
return 0, nil
case token.LT:
if left < right {
return 1, nil
}
return 0, nil
case token.LTEQ:
if left <= right {
return 1, nil
}
return 0, nil
default:
return 0, nil
}
} | eval/eval.go | 0.553988 | 0.485234 | eval.go | starcoder |
package schema
import (
"go/ast"
"go/token"
"go/types"
"github.com/bflad/tfproviderlint/helper/astutils"
)
const (
TypeNameCustomizeDiffFunc = `CustomizeDiffFunc`
)
// IsFuncTypeCustomizeDiffFunc returns true if the FuncType matches expected parameters and results types
func IsFuncTypeCustomizeDiffFunc(node ast.Node, info *types.Info) bool {
funcType := astutils.FuncTypeFromNode(node)
if funcType == nil {
return false
}
return isFuncTypeCustomizeDiffFuncV1(funcType, info) || isFuncTypeCustomizeDiffFuncV2(funcType, info)
}
// IsTypeCustomizeDiffFunc returns if the type is CustomizeDiffFunc from the customdiff package
func IsTypeCustomizeDiffFunc(t types.Type) bool {
switch t := t.(type) {
case *types.Named:
return IsNamedType(t, TypeNameCustomizeDiffFunc)
case *types.Pointer:
return IsTypeCustomizeDiffFunc(t.Elem())
default:
return false
}
}
// isFuncTypeCustomizeDiffFuncV1 returns true if the FuncType matches expected parameters and results types of V1
func isFuncTypeCustomizeDiffFuncV1(funcType *ast.FuncType, info *types.Info) bool {
if !astutils.HasFieldListLength(funcType.Params, 2) {
return false
}
if !astutils.IsFieldListTypePackageType(funcType.Params, 0, info, PackagePathVersion(1), TypeNameResourceDiff) {
return false
}
if !astutils.IsFieldListType(funcType.Params, 1, astutils.IsExprTypeInterface) {
return false
}
if !astutils.HasFieldListLength(funcType.Results, 1) {
return false
}
return astutils.IsFieldListType(funcType.Results, 0, astutils.IsExprTypeError)
}
// isFuncTypeCustomizeDiffFuncV2 returns true if the FuncType matches expected parameters and results types of V2
func isFuncTypeCustomizeDiffFuncV2(funcType *ast.FuncType, info *types.Info) bool {
if !astutils.HasFieldListLength(funcType.Params, 3) {
return false
}
if !astutils.IsFieldListTypePackageType(funcType.Params, 0, info, "context", "Context") {
return false
}
if !astutils.IsFieldListTypePackageType(funcType.Params, 1, info, PackagePathVersion(2), TypeNameResourceDiff) {
return false
}
if !astutils.IsFieldListType(funcType.Params, 2, astutils.IsExprTypeInterface) {
return false
}
if !astutils.HasFieldListLength(funcType.Results, 1) {
return false
}
return astutils.IsFieldListType(funcType.Results, 0, astutils.IsExprTypeError)
}
// CustomizeDiffFuncInfo represents all gathered CustomizeDiffFunc data for easier access
type CustomizeDiffFuncInfo struct {
AstFuncDecl *ast.FuncDecl
AstFuncLit *ast.FuncLit
Body *ast.BlockStmt
Node ast.Node
Pos token.Pos
Type *ast.FuncType
TypesInfo *types.Info
}
// NewCustomizeDiffFuncInfo instantiates a CustomizeDiffFuncInfo
func NewCustomizeDiffFuncInfo(node ast.Node, info *types.Info) *CustomizeDiffFuncInfo {
result := &CustomizeDiffFuncInfo{
TypesInfo: info,
}
switch node := node.(type) {
case *ast.FuncDecl:
result.AstFuncDecl = node
result.Body = node.Body
result.Node = node
result.Pos = node.Pos()
result.Type = node.Type
case *ast.FuncLit:
result.AstFuncLit = node
result.Body = node.Body
result.Node = node
result.Pos = node.Pos()
result.Type = node.Type
}
return result
} | vendor/github.com/bflad/tfproviderlint/helper/terraformtype/helper/schema/type_customizedifffunc.go | 0.709221 | 0.601857 | type_customizedifffunc.go | starcoder |
package dfl
import (
"fmt"
"reflect"
"strings"
"github.com/pkg/errors"
)
// Assign is a BinaryOperator which sets the value of the right side to the attribute or variable defined by the left side.
type Assign struct {
*BinaryOperator
}
func (a Assign) Dfl(quotes []string, pretty bool, tabs int) string {
b := a.Builder(":=", quotes, tabs)
if pretty {
b = b.Indent(tabs + 1).Pretty(pretty).Tabs(tabs + 1).TrimRight(pretty)
switch a.Left.(type) {
case *Attribute:
switch a.Right.(type) {
case *Function, *Pipe:
return b.Dfl()
}
case *Variable:
switch a.Right.(type) {
case *Function, *Pipe:
return b.Dfl()
}
}
return a.BinaryOperator.Dfl(":=", quotes, pretty, tabs)
}
return b.Dfl()
}
func (a Assign) Sql(pretty bool, tabs int) string {
if pretty {
switch left := a.Left.(type) {
case *Variable:
str := strings.Repeat(" ", tabs) + "WHERE " + a.Right.Sql(pretty, tabs) + "\n"
str += strings.Repeat(" ", tabs) + "INTO TEMP TABLE " + left.Sql(pretty, tabs) + ";"
return str
}
return ""
}
switch left := a.Left.(type) {
case *Variable:
return "WHERE " + a.Right.Sql(pretty, tabs) + " INTO TEMP TABLE " + left.Sql(pretty, tabs) + ";" // #nosec
}
return ""
}
func (a Assign) Map() map[string]interface{} {
return a.BinaryOperator.Map("assign", a.Left, a.Right)
}
// Compile returns a compiled version of this node.
func (a Assign) Compile() Node {
left := a.Left.Compile()
right := a.Right.Compile()
return &Assign{&BinaryOperator{Left: left, Right: right}}
}
func (a Assign) Evaluate(vars map[string]interface{}, ctx interface{}, funcs FunctionMap, quotes []string) (map[string]interface{}, interface{}, error) {
switch lva := a.Left.(type) {
case Attribute:
vars, rv, err := a.Right.Evaluate(vars, ctx, funcs, quotes)
if err != nil {
return vars, rv, errors.Wrap(err, "error processing right value of "+a.Dfl(quotes, false, 0))
}
if t := reflect.TypeOf(ctx); t.Kind() != reflect.Map {
ctx = map[string]interface{}{}
}
path := lva.Name
obj := ctx
for len(path) > 0 {
if !strings.Contains(path, ".") {
reflect.ValueOf(obj).SetMapIndex(reflect.ValueOf(path), reflect.ValueOf(rv))
break
}
pair := strings.SplitN(path, ".", 2)
v := reflect.ValueOf(obj)
next := v.MapIndex(reflect.ValueOf(pair[0]))
if (reflect.TypeOf(next.Interface()).Kind() != reflect.Map) || (!v.IsValid()) || v.IsNil() {
m := map[string]interface{}{}
v.SetMapIndex(reflect.ValueOf(pair[0]), reflect.ValueOf(m))
obj = m
} else {
obj = next.Interface()
}
path = pair[1]
}
return vars, ctx, nil
case Variable:
vars, rv, err := a.Right.Evaluate(vars, ctx, funcs, quotes)
if err != nil {
return vars, rv, errors.Wrap(err, "error processing right value of "+a.Dfl(quotes, false, 0))
}
path := lva.Name
var obj interface{}
obj = vars
for len(path) > 0 {
if !strings.Contains(path, ".") {
reflect.ValueOf(obj).SetMapIndex(reflect.ValueOf(path), reflect.ValueOf(rv))
break
}
pair := strings.SplitN(path, ".", 2)
v := reflect.ValueOf(obj)
next := v.MapIndex(reflect.ValueOf(pair[0]))
if (reflect.TypeOf(next.Interface()).Kind() != reflect.Map) || (!v.IsValid()) || v.IsNil() {
m := map[string]interface{}{}
v.SetMapIndex(reflect.ValueOf(pair[0]), reflect.ValueOf(m))
obj = m
} else {
obj = next.Interface()
}
path = pair[1]
}
return vars, ctx, nil
}
return vars, ctx, errors.New("error evaluating declare. left value (" + a.Left.Dfl(quotes, false, 0) + ") must be an attribute node but is of type " + fmt.Sprint(reflect.TypeOf(a.Left)))
} | pkg/dfl/Assign.go | 0.686685 | 0.437343 | Assign.go | starcoder |
package main
// Note 1: This product namely Production equipment M0-M9 a total of 10 groups of
// storage data groups, each group has a total of 10-10-17 data, of which M0 The
// data group is the data group called by default for the products to be powered on.
// The data groups M1 and M2 are quickly called out for the product panel, and the
// M3- M9 is an ordinary storage array, and the starting address of a data group is
// calculated as 0050H + data group number * 0010H, for example: M3 The starting
// address of the data set is: 0050H + 3 * 0010H = 0080H.
// Note 2: Key lock function to write values 0 and 0 for non-locking, a lock.
// Note 3: 0-3,0-protected state is normal read operation, one of OVP, 2 as the
// OCP, 3 to OPP. Note 4: Constant Current Status of read values CV 0 state and 0,
// CC 1 for the state. Note 5: open Write function Off lose Readings of 0 and 1,
// 0 are off and 1 is on. Note 6: The backlight brightness level range of 0-5,0
// read darkest, level 5 the brightest. level Note 5: fast recall function data
// set is written to 0-9, after the corresponding write data is automatically
// transferred group of data
const (
voltageSetRegister = 0
currentSetRegister = 1
voltageOutRegister = 2
powerOutRegister = 3
supplyVoltageRegister = 4
lockRegister = 6
protectionRegister = 7
modeRegister = 8 // Constant Current or Voltage
onOffRegister = 9
ledBrightnessRegister = 10
modelRegister = 11
versionRegister = 12
loadMemoryRegister = 35 // loads presets
defaultVoltageSetRegister = 80
defaultCurrentSetRegister = 81
defaultOverVoltageProtectionRegister = 82
defaultOverCurrentProtectionRegister = 83
defaultOverPowerProtectionRegister = 84
defaultLedBrightnessRegister = 85
defaultDataRecallRegister = 86
defaultpowerOutputSwitchStateRegister = 87
m1VoltageSetRegister = 0x60
m1CurrentSetRegister = 0x61
m1OverVoltageProtectionRegister = 0x62
m1OverCurrentProtectionRegister = 0x63
m1OverPowerProtectionRegister = 0x64
m1LedBrightnessRegister = 0x65
m1DataRecallRegister = 0x66
m1powerOutputSwitchStateRegister = 0x67
m2VoltageSetRegister = 0x70
m2CurrentSetRegister = 0x71
m2OverVoltageProtectionRegister = 0x72
m2OverCurrentProtectionRegister = 0x73
m2OverPowerProtectionRegister = 0x74
m2LedBrightnessRegister = 0x75
m2DataRecallRegister = 0x76
m2powerOutputSwitchStateRegister = 0x77
m3VoltageSetRegister = 0x80
m3CurrentSetRegister = 0x81
m3OverVoltageProtectionRegister = 0x82
m3OverCurrentProtectionRegister = 0x83
m3OverPowerProtectionRegister = 0x84
m3LedBrightnessRegister = 0x85
m3DataRecallRegister = 0x86
m3powerOutputSwitchStateRegister = 0x87
m4VoltageSetRegister = 0x90
m4CurrentSetRegister = 0x91
m4OverVoltageProtectionRegister = 0x92
m4OverCurrentProtectionRegister = 0x93
m4OverPowerProtectionRegister = 0x94
m4LedBrightnessRegister = 0x95
m4DataRecallRegister = 0x96
m4powerOutputSwitchStateRegister = 0x97
m5VoltageSetRegister = 0xa0
m5CurrentSetRegister = 0xa1
m5OverVoltageProtectionRegister = 0xa2
m5OverCurrentProtectionRegister = 0xa3
m5OverPowerProtectionRegister = 0xa4
m5LedBrightnessRegister = 0xa5
m5DataRecallRegister = 0xa6
m5powerOutputSwitchStateRegister = 0xa7
m6VoltageSetRegister = 0xb0
m6CurrentSetRegister = 0xb1
m6OverVoltageProtectionRegister = 0xb2
m6OverCurrentProtectionRegister = 0xb3
m6OverPowerProtectionRegister = 0xb4
m6LedBrightnessRegister = 0xb5
m6DataRecallRegister = 0xb6
m6powerOutputSwitchStateRegister = 0xb7
m7VoltageSetRegister = 0xc0
m7CurrentSetRegister = 0xc1
m7OverVoltageProtectionRegister = 0xc2
m7OverCurrentProtectionRegister = 0xc3
m7OverPowerProtectionRegister = 0xc4
m7LedBrightnessRegister = 0xc5
m7DataRecallRegister = 0xc6
m7powerOutputSwitchStateRegister = 0xc7
m8VoltageSetRegister = 0xd0
m8CurrentSetRegister = 0xd1
m8OverVoltageProtectionRegister = 0xd2
m8OverCurrentProtectionRegister = 0xd3
m8OverPowerProtectionRegister = 0xd4
m8LedBrightnessRegister = 0xd5
m8DataRecallRegister = 0xd6
m8powerOutputSwitchStateRegister = 0xd7
m9VoltageSetRegister = 0xe0
m9CurrentSetRegister = 0xe1
m9OverVoltageProtectionRegister = 0xe2
m9OverCurrentProtectionRegister = 0xe3
m9OverPowerProtectionRegister = 0xe4
m9LedBrightnessRegister = 0xe5
m9DataRecallRegister = 0xe6
m9powerOutputSwitchStateRegister = 0xe7
) | registers.go | 0.599368 | 0.587914 | registers.go | starcoder |
package pt
import "math"
type SphericalHarmonic struct {
PositiveMaterial Material
NegativeMaterial Material
harmonicFunction func(Vector) float64
mesh *Mesh
}
func NewSphericalHarmonic(l, m int, pm, nm Material) Shape {
sh := &SphericalHarmonic{}
sh.PositiveMaterial = pm
sh.NegativeMaterial = nm
sh.harmonicFunction = shFunc(l, m)
sh.mesh = NewSDFMesh(sh, sh.BoundingBox(), 0.01)
return sh
}
func (s *SphericalHarmonic) Compile() {
s.mesh.Compile()
}
func (s *SphericalHarmonic) BoundingBox() Box {
const r = 1
return Box{Vector{-r, -r, -r}, Vector{r, r, r}}
}
func (s *SphericalHarmonic) Intersect(r Ray) Hit {
hit := s.mesh.Intersect(r)
if !hit.Ok() {
return NoHit
}
// TODO: refine T value
return Hit{s, hit.T, nil}
}
func (s *SphericalHarmonic) UV(p Vector) Vector {
return Vector{}
}
func (s *SphericalHarmonic) MaterialAt(p Vector) Material {
h := s.EvaluateHarmonic(p)
if h < 0 {
return s.NegativeMaterial
} else {
return s.PositiveMaterial
}
}
func (s *SphericalHarmonic) NormalAt(p Vector) Vector {
const e = 0.0001
x, y, z := p.X, p.Y, p.Z
n := Vector{
s.Evaluate(Vector{x - e, y, z}) - s.Evaluate(Vector{x + e, y, z}),
s.Evaluate(Vector{x, y - e, z}) - s.Evaluate(Vector{x, y + e, z}),
s.Evaluate(Vector{x, y, z - e}) - s.Evaluate(Vector{x, y, z + e}),
}
return n.Normalize()
}
func (s *SphericalHarmonic) EvaluateHarmonic(p Vector) float64 {
return s.harmonicFunction(p.Normalize())
}
func (s *SphericalHarmonic) Evaluate(p Vector) float64 {
return p.Length() - math.Abs(s.harmonicFunction(p.Normalize()))
}
func sh00(d Vector) float64 {
// 0.5 * sqrt(1/pi)
return 0.282095
}
func sh1n1(d Vector) float64 {
// -sqrt(3/(4pi)) * y
return -0.488603 * d.Y
}
func sh10(d Vector) float64 {
// sqrt(3/(4pi)) * z
return 0.488603 * d.Z
}
func sh1p1(d Vector) float64 {
// -sqrt(3/(4pi)) * x
return -0.488603 * d.X
}
func sh2n2(d Vector) float64 {
// 0.5 * sqrt(15/pi) * x * y
return 1.092548 * d.X * d.Y
}
func sh2n1(d Vector) float64 {
// -0.5 * sqrt(15/pi) * y * z
return -1.092548 * d.Y * d.Z
}
func sh20(d Vector) float64 {
// 0.25 * sqrt(5/pi) * (-x^2-y^2+2z^2)
return 0.315392 * (-d.X*d.X - d.Y*d.Y + 2.0*d.Z*d.Z)
}
func sh2p1(d Vector) float64 {
// -0.5 * sqrt(15/pi) * x * z
return -1.092548 * d.X * d.Z
}
func sh2p2(d Vector) float64 {
// 0.25 * sqrt(15/pi) * (x^2 - y^2)
return 0.546274 * (d.X*d.X - d.Y*d.Y)
}
func sh3n3(d Vector) float64 {
// -0.25 * sqrt(35/(2pi)) * y * (3x^2 - y^2)
return -0.590044 * d.Y * (3.0*d.X*d.X - d.Y*d.Y)
}
func sh3n2(d Vector) float64 {
// 0.5 * sqrt(105/pi) * x * y * z
return 2.890611 * d.X * d.Y * d.Z
}
func sh3n1(d Vector) float64 {
// -0.25 * sqrt(21/(2pi)) * y * (4z^2-x^2-y^2)
return -0.457046 * d.Y * (4.0*d.Z*d.Z - d.X*d.X - d.Y*d.Y)
}
func sh30(d Vector) float64 {
// 0.25 * sqrt(7/pi) * z * (2z^2 - 3x^2 - 3y^2)
return 0.373176 * d.Z * (2.0*d.Z*d.Z - 3.0*d.X*d.X - 3.0*d.Y*d.Y)
}
func sh3p1(d Vector) float64 {
// -0.25 * sqrt(21/(2pi)) * x * (4z^2-x^2-y^2)
return -0.457046 * d.X * (4.0*d.Z*d.Z - d.X*d.X - d.Y*d.Y)
}
func sh3p2(d Vector) float64 {
// 0.25 * sqrt(105/pi) * z * (x^2 - y^2)
return 1.445306 * d.Z * (d.X*d.X - d.Y*d.Y)
}
func sh3p3(d Vector) float64 {
// -0.25 * sqrt(35/(2pi)) * x * (x^2-3y^2)
return -0.590044 * d.X * (d.X*d.X - 3.0*d.Y*d.Y)
}
func sh4n4(d Vector) float64 {
// 0.75 * sqrt(35/pi) * x * y * (x^2-y^2)
return 2.503343 * d.X * d.Y * (d.X*d.X - d.Y*d.Y)
}
func sh4n3(d Vector) float64 {
// -0.75 * sqrt(35/(2pi)) * y * z * (3x^2-y^2)
return -1.770131 * d.Y * d.Z * (3.0*d.X*d.X - d.Y*d.Y)
}
func sh4n2(d Vector) float64 {
// 0.75 * sqrt(5/pi) * x * y * (7z^2-1)
return 0.946175 * d.X * d.Y * (7.0*d.Z*d.Z - 1.0)
}
func sh4n1(d Vector) float64 {
// -0.75 * sqrt(5/(2pi)) * y * z * (7z^2-3)
return -0.669047 * d.Y * d.Z * (7.0*d.Z*d.Z - 3.0)
}
func sh40(d Vector) float64 {
// 3/16 * sqrt(1/pi) * (35z^4-30z^2+3)
z2 := d.Z * d.Z
return 0.105786 * (35.0*z2*z2 - 30.0*z2 + 3.0)
}
func sh4p1(d Vector) float64 {
// -0.75 * sqrt(5/(2pi)) * x * z * (7z^2-3)
return -0.669047 * d.X * d.Z * (7.0*d.Z*d.Z - 3.0)
}
func sh4p2(d Vector) float64 {
// 3/8 * sqrt(5/pi) * (x^2 - y^2) * (7z^2 - 1)
return 0.473087 * (d.X*d.X - d.Y*d.Y) * (7.0*d.Z*d.Z - 1.0)
}
func sh4p3(d Vector) float64 {
// -0.75 * sqrt(35/(2pi)) * x * z * (x^2 - 3y^2)
return -1.770131 * d.X * d.Z * (d.X*d.X - 3.0*d.Y*d.Y)
}
func sh4p4(d Vector) float64 {
// 3/16*sqrt(35/pi) * (x^2 * (x^2 - 3y^2) - y^2 * (3x^2 - y^2))
x2 := d.X * d.X
y2 := d.Y * d.Y
return 0.625836 * (x2*(x2-3.0*y2) - y2*(3.0*x2-y2))
}
func shFunc(l, m int) func(Vector) float64 {
var f func(Vector) float64
if l == 0 && m == 0 {
f = sh00
} else if l == 1 && m == -1 {
f = sh1n1
} else if l == 1 && m == 0 {
f = sh10
} else if l == 1 && m == 1 {
f = sh1p1
} else if l == 2 && m == -2 {
f = sh2n2
} else if l == 2 && m == -1 {
f = sh2n1
} else if l == 2 && m == 0 {
f = sh20
} else if l == 2 && m == 1 {
f = sh2p1
} else if l == 2 && m == 2 {
f = sh2p2
} else if l == 3 && m == -3 {
f = sh3n3
} else if l == 3 && m == -2 {
f = sh3n2
} else if l == 3 && m == -1 {
f = sh3n1
} else if l == 3 && m == 0 {
f = sh30
} else if l == 3 && m == 1 {
f = sh3p1
} else if l == 3 && m == 2 {
f = sh3p2
} else if l == 3 && m == 3 {
f = sh3p3
} else if l == 4 && m == -4 {
f = sh4n4
} else if l == 4 && m == -3 {
f = sh4n3
} else if l == 4 && m == -2 {
f = sh4n2
} else if l == 4 && m == -1 {
f = sh4n1
} else if l == 4 && m == 0 {
f = sh40
} else if l == 4 && m == 1 {
f = sh4p1
} else if l == 4 && m == 2 {
f = sh4p2
} else if l == 4 && m == 3 {
f = sh4p3
} else if l == 4 && m == 4 {
f = sh4p4
} else {
panic("unsupported spherical harmonic")
}
return f
} | pt/sh.go | 0.653127 | 0.614365 | sh.go | starcoder |
package ent
import (
"context"
"errors"
"fmt"
"time"
"github.com/empiricaly/recruitment/internal/ent/participant"
"github.com/empiricaly/recruitment/internal/ent/participation"
"github.com/empiricaly/recruitment/internal/ent/run"
"github.com/empiricaly/recruitment/internal/ent/step"
"github.com/empiricaly/recruitment/internal/ent/steprun"
"github.com/facebook/ent/dialect/sql/sqlgraph"
"github.com/facebook/ent/schema/field"
)
// StepRunCreate is the builder for creating a StepRun entity.
type StepRunCreate struct {
config
mutation *StepRunMutation
hooks []Hook
}
// SetCreatedAt sets the created_at field.
func (src *StepRunCreate) SetCreatedAt(t time.Time) *StepRunCreate {
src.mutation.SetCreatedAt(t)
return src
}
// SetNillableCreatedAt sets the created_at field if the given value is not nil.
func (src *StepRunCreate) SetNillableCreatedAt(t *time.Time) *StepRunCreate {
if t != nil {
src.SetCreatedAt(*t)
}
return src
}
// SetUpdatedAt sets the updated_at field.
func (src *StepRunCreate) SetUpdatedAt(t time.Time) *StepRunCreate {
src.mutation.SetUpdatedAt(t)
return src
}
// SetNillableUpdatedAt sets the updated_at field if the given value is not nil.
func (src *StepRunCreate) SetNillableUpdatedAt(t *time.Time) *StepRunCreate {
if t != nil {
src.SetUpdatedAt(*t)
}
return src
}
// SetStatus sets the status field.
func (src *StepRunCreate) SetStatus(s steprun.Status) *StepRunCreate {
src.mutation.SetStatus(s)
return src
}
// SetStartedAt sets the startedAt field.
func (src *StepRunCreate) SetStartedAt(t time.Time) *StepRunCreate {
src.mutation.SetStartedAt(t)
return src
}
// SetNillableStartedAt sets the startedAt field if the given value is not nil.
func (src *StepRunCreate) SetNillableStartedAt(t *time.Time) *StepRunCreate {
if t != nil {
src.SetStartedAt(*t)
}
return src
}
// SetEndedAt sets the endedAt field.
func (src *StepRunCreate) SetEndedAt(t time.Time) *StepRunCreate {
src.mutation.SetEndedAt(t)
return src
}
// SetNillableEndedAt sets the endedAt field if the given value is not nil.
func (src *StepRunCreate) SetNillableEndedAt(t *time.Time) *StepRunCreate {
if t != nil {
src.SetEndedAt(*t)
}
return src
}
// SetIndex sets the index field.
func (src *StepRunCreate) SetIndex(i int) *StepRunCreate {
src.mutation.SetIndex(i)
return src
}
// SetParticipantsCount sets the participantsCount field.
func (src *StepRunCreate) SetParticipantsCount(i int) *StepRunCreate {
src.mutation.SetParticipantsCount(i)
return src
}
// SetHitID sets the hitID field.
func (src *StepRunCreate) SetHitID(s string) *StepRunCreate {
src.mutation.SetHitID(s)
return src
}
// SetNillableHitID sets the hitID field if the given value is not nil.
func (src *StepRunCreate) SetNillableHitID(s *string) *StepRunCreate {
if s != nil {
src.SetHitID(*s)
}
return src
}
// SetUrlToken sets the urlToken field.
func (src *StepRunCreate) SetUrlToken(s string) *StepRunCreate {
src.mutation.SetUrlToken(s)
return src
}
// SetID sets the id field.
func (src *StepRunCreate) SetID(s string) *StepRunCreate {
src.mutation.SetID(s)
return src
}
// AddCreatedParticipantIDs adds the createdParticipants edge to Participant by ids.
func (src *StepRunCreate) AddCreatedParticipantIDs(ids ...string) *StepRunCreate {
src.mutation.AddCreatedParticipantIDs(ids...)
return src
}
// AddCreatedParticipants adds the createdParticipants edges to Participant.
func (src *StepRunCreate) AddCreatedParticipants(p ...*Participant) *StepRunCreate {
ids := make([]string, len(p))
for i := range p {
ids[i] = p[i].ID
}
return src.AddCreatedParticipantIDs(ids...)
}
// AddParticipantIDs adds the participants edge to Participant by ids.
func (src *StepRunCreate) AddParticipantIDs(ids ...string) *StepRunCreate {
src.mutation.AddParticipantIDs(ids...)
return src
}
// AddParticipants adds the participants edges to Participant.
func (src *StepRunCreate) AddParticipants(p ...*Participant) *StepRunCreate {
ids := make([]string, len(p))
for i := range p {
ids[i] = p[i].ID
}
return src.AddParticipantIDs(ids...)
}
// AddParticipationIDs adds the participations edge to Participation by ids.
func (src *StepRunCreate) AddParticipationIDs(ids ...string) *StepRunCreate {
src.mutation.AddParticipationIDs(ids...)
return src
}
// AddParticipations adds the participations edges to Participation.
func (src *StepRunCreate) AddParticipations(p ...*Participation) *StepRunCreate {
ids := make([]string, len(p))
for i := range p {
ids[i] = p[i].ID
}
return src.AddParticipationIDs(ids...)
}
// SetStepID sets the step edge to Step by id.
func (src *StepRunCreate) SetStepID(id string) *StepRunCreate {
src.mutation.SetStepID(id)
return src
}
// SetStep sets the step edge to Step.
func (src *StepRunCreate) SetStep(s *Step) *StepRunCreate {
return src.SetStepID(s.ID)
}
// SetRunID sets the run edge to Run by id.
func (src *StepRunCreate) SetRunID(id string) *StepRunCreate {
src.mutation.SetRunID(id)
return src
}
// SetNillableRunID sets the run edge to Run by id if the given value is not nil.
func (src *StepRunCreate) SetNillableRunID(id *string) *StepRunCreate {
if id != nil {
src = src.SetRunID(*id)
}
return src
}
// SetRun sets the run edge to Run.
func (src *StepRunCreate) SetRun(r *Run) *StepRunCreate {
return src.SetRunID(r.ID)
}
// Mutation returns the StepRunMutation object of the builder.
func (src *StepRunCreate) Mutation() *StepRunMutation {
return src.mutation
}
// Save creates the StepRun in the database.
func (src *StepRunCreate) Save(ctx context.Context) (*StepRun, error) {
var (
err error
node *StepRun
)
src.defaults()
if len(src.hooks) == 0 {
if err = src.check(); err != nil {
return nil, err
}
node, err = src.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*StepRunMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err = src.check(); err != nil {
return nil, err
}
src.mutation = mutation
node, err = src.sqlSave(ctx)
mutation.done = true
return node, err
})
for i := len(src.hooks) - 1; i >= 0; i-- {
mut = src.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, src.mutation); err != nil {
return nil, err
}
}
return node, err
}
// SaveX calls Save and panics if Save returns an error.
func (src *StepRunCreate) SaveX(ctx context.Context) *StepRun {
v, err := src.Save(ctx)
if err != nil {
panic(err)
}
return v
}
// defaults sets the default values of the builder before save.
func (src *StepRunCreate) defaults() {
if _, ok := src.mutation.CreatedAt(); !ok {
v := steprun.DefaultCreatedAt()
src.mutation.SetCreatedAt(v)
}
if _, ok := src.mutation.UpdatedAt(); !ok {
v := steprun.DefaultUpdatedAt()
src.mutation.SetUpdatedAt(v)
}
}
// check runs all checks and user-defined validators on the builder.
func (src *StepRunCreate) check() error {
if _, ok := src.mutation.CreatedAt(); !ok {
return &ValidationError{Name: "created_at", err: errors.New("ent: missing required field \"created_at\"")}
}
if _, ok := src.mutation.UpdatedAt(); !ok {
return &ValidationError{Name: "updated_at", err: errors.New("ent: missing required field \"updated_at\"")}
}
if _, ok := src.mutation.Status(); !ok {
return &ValidationError{Name: "status", err: errors.New("ent: missing required field \"status\"")}
}
if v, ok := src.mutation.Status(); ok {
if err := steprun.StatusValidator(v); err != nil {
return &ValidationError{Name: "status", err: fmt.Errorf("ent: validator failed for field \"status\": %w", err)}
}
}
if _, ok := src.mutation.Index(); !ok {
return &ValidationError{Name: "index", err: errors.New("ent: missing required field \"index\"")}
}
if _, ok := src.mutation.ParticipantsCount(); !ok {
return &ValidationError{Name: "participantsCount", err: errors.New("ent: missing required field \"participantsCount\"")}
}
if _, ok := src.mutation.UrlToken(); !ok {
return &ValidationError{Name: "urlToken", err: errors.New("ent: missing required field \"urlToken\"")}
}
if v, ok := src.mutation.ID(); ok {
if err := steprun.IDValidator(v); err != nil {
return &ValidationError{Name: "id", err: fmt.Errorf("ent: validator failed for field \"id\": %w", err)}
}
}
if _, ok := src.mutation.StepID(); !ok {
return &ValidationError{Name: "step", err: errors.New("ent: missing required edge \"step\"")}
}
return nil
}
func (src *StepRunCreate) sqlSave(ctx context.Context) (*StepRun, error) {
_node, _spec := src.createSpec()
if err := sqlgraph.CreateNode(ctx, src.driver, _spec); err != nil {
if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
}
return nil, err
}
return _node, nil
}
func (src *StepRunCreate) createSpec() (*StepRun, *sqlgraph.CreateSpec) {
var (
_node = &StepRun{config: src.config}
_spec = &sqlgraph.CreateSpec{
Table: steprun.Table,
ID: &sqlgraph.FieldSpec{
Type: field.TypeString,
Column: steprun.FieldID,
},
}
)
if id, ok := src.mutation.ID(); ok {
_node.ID = id
_spec.ID.Value = id
}
if value, ok := src.mutation.CreatedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: steprun.FieldCreatedAt,
})
_node.CreatedAt = value
}
if value, ok := src.mutation.UpdatedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: steprun.FieldUpdatedAt,
})
_node.UpdatedAt = value
}
if value, ok := src.mutation.Status(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeEnum,
Value: value,
Column: steprun.FieldStatus,
})
_node.Status = value
}
if value, ok := src.mutation.StartedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: steprun.FieldStartedAt,
})
_node.StartedAt = &value
}
if value, ok := src.mutation.EndedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: steprun.FieldEndedAt,
})
_node.EndedAt = &value
}
if value, ok := src.mutation.Index(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeInt,
Value: value,
Column: steprun.FieldIndex,
})
_node.Index = value
}
if value, ok := src.mutation.ParticipantsCount(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeInt,
Value: value,
Column: steprun.FieldParticipantsCount,
})
_node.ParticipantsCount = value
}
if value, ok := src.mutation.HitID(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: steprun.FieldHitID,
})
_node.HitID = &value
}
if value, ok := src.mutation.UrlToken(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: steprun.FieldUrlToken,
})
_node.UrlToken = value
}
if nodes := src.mutation.CreatedParticipantsIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: steprun.CreatedParticipantsTable,
Columns: []string{steprun.CreatedParticipantsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeString,
Column: participant.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges = append(_spec.Edges, edge)
}
if nodes := src.mutation.ParticipantsIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2M,
Inverse: false,
Table: steprun.ParticipantsTable,
Columns: steprun.ParticipantsPrimaryKey,
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeString,
Column: participant.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges = append(_spec.Edges, edge)
}
if nodes := src.mutation.ParticipationsIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2M,
Inverse: false,
Table: steprun.ParticipationsTable,
Columns: []string{steprun.ParticipationsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeString,
Column: participation.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges = append(_spec.Edges, edge)
}
if nodes := src.mutation.StepIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.O2O,
Inverse: false,
Table: steprun.StepTable,
Columns: []string{steprun.StepColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeString,
Column: step.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges = append(_spec.Edges, edge)
}
if nodes := src.mutation.RunIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
Inverse: true,
Table: steprun.RunTable,
Columns: []string{steprun.RunColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeString,
Column: run.FieldID,
},
},
}
for _, k := range nodes {
edge.Target.Nodes = append(edge.Target.Nodes, k)
}
_spec.Edges = append(_spec.Edges, edge)
}
return _node, _spec
}
// StepRunCreateBulk is the builder for creating a bulk of StepRun entities.
type StepRunCreateBulk struct {
config
builders []*StepRunCreate
}
// Save creates the StepRun entities in the database.
func (srcb *StepRunCreateBulk) Save(ctx context.Context) ([]*StepRun, error) {
specs := make([]*sqlgraph.CreateSpec, len(srcb.builders))
nodes := make([]*StepRun, len(srcb.builders))
mutators := make([]Mutator, len(srcb.builders))
for i := range srcb.builders {
func(i int, root context.Context) {
builder := srcb.builders[i]
builder.defaults()
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*StepRunMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err := builder.check(); err != nil {
return nil, err
}
builder.mutation = mutation
nodes[i], specs[i] = builder.createSpec()
var err error
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, srcb.builders[i+1].mutation)
} else {
// Invoke the actual operation on the latest mutation in the chain.
if err = sqlgraph.BatchCreate(ctx, srcb.driver, &sqlgraph.BatchCreateSpec{Nodes: specs}); err != nil {
if cerr, ok := isSQLConstraintError(err); ok {
err = cerr
}
}
}
mutation.done = true
if err != nil {
return nil, err
}
return nodes[i], nil
})
for i := len(builder.hooks) - 1; i >= 0; i-- {
mut = builder.hooks[i](mut)
}
mutators[i] = mut
}(i, ctx)
}
if len(mutators) > 0 {
if _, err := mutators[0].Mutate(ctx, srcb.builders[0].mutation); err != nil {
return nil, err
}
}
return nodes, nil
}
// SaveX calls Save and panics if Save returns an error.
func (srcb *StepRunCreateBulk) SaveX(ctx context.Context) []*StepRun {
v, err := srcb.Save(ctx)
if err != nil {
panic(err)
}
return v
} | internal/ent/steprun_create.go | 0.562417 | 0.448849 | steprun_create.go | starcoder |
package gengo
import (
"io"
"github.com/ipld/go-ipld-prime/schema"
)
func NewGeneratorForKindStruct(t schema.Type) typedNodeGenerator {
return generateKindStruct{
t.(schema.TypeStruct),
generateKindedRejections_Map{
mungeTypeNodeIdent(t),
string(t.Name()),
},
}
}
type generateKindStruct struct {
Type schema.TypeStruct
generateKindedRejections_Map
// FUTURE: probably some adjunct config data should come with here as well.
// FUTURE: perhaps both a global one (e.g. output package name) and a per-type one.
}
func (gk generateKindStruct) EmitNativeType(w io.Writer) {
// The data is actually the content type, just embedded in an unexported field,
// which means we get immutability, plus initializing the object is essentially a memmove.
doTemplate(`
type {{ .Type | mungeTypeNodeIdent }} struct{
d {{ .Type | mungeTypeNodeIdent }}__Content
}
`, w, gk)
}
func (gk generateKindStruct) EmitNativeAccessors(w io.Writer) {
doTemplate(`
{{- $type := .Type -}} {{- /* ranging modifies dot, unhelpfully */ -}}
{{- range $field := .Type.Fields -}}
func (x {{ $type | mungeTypeNodeIdent }}) Field{{ $field.Name | titlize }}()
{{- if or $field.IsOptional $field.IsNullable }}Maybe{{end}}{{ $field.Type | mungeTypeNodeIdent }} {
return x.d.{{ $field.Name | titlize }}
}
{{end}}
`, w, gk)
}
func (gk generateKindStruct) EmitNativeBuilder(w io.Writer) {
doTemplate(`
type {{ .Type | mungeTypeNodeIdent }}__Content struct {
{{- range $field := .Type.Fields}}
{{ $field.Name | titlize }} {{if or $field.IsOptional $field.IsNullable }}Maybe{{end}}{{ $field.Type | mungeTypeNodeIdent }}
{{- end}}
}
func (b {{ .Type | mungeTypeNodeIdent }}__Content) Build() ({{ .Type | mungeTypeNodeIdent }}, error) {
{{- range $field := .Type.Fields -}}
{{- if or $field.IsOptional $field.IsNullable }}
{{- /* if both modifiers present, anything goes */ -}}
{{- else if $field.IsOptional }}
if b.{{ $field.Name | titlize }}.Maybe == schema.Maybe_Null {
return {{ $field.Type | mungeTypeNodeIdent }}{}, fmt.Errorf("cannot be absent")
}
{{- else if $field.IsNullable }}
if b.{{ $field.Name | titlize }}.Maybe == schema.Maybe_Absent {
return {{ $field.Type | mungeTypeNodeIdent }}{}, fmt.Errorf("cannot be null")
}
{{- end}}
{{- end}}
x := {{ .Type | mungeTypeNodeIdent }}{b}
// FUTURE : want to support customizable validation.
// but 'if v, ok := x.(schema.Validatable); ok {' doesn't fly: need a way to work on concrete types.
return x, nil
}
func (b {{ .Type | mungeTypeNodeIdent }}__Content) MustBuild() {{ .Type | mungeTypeNodeIdent }} {
if x, err := b.Build(); err != nil {
panic(err)
} else {
return x
}
}
`, w, gk)
}
func (gk generateKindStruct) EmitNativeMaybe(w io.Writer) {
doTemplate(`
type Maybe{{ .Type | mungeTypeNodeIdent }} struct {
Maybe schema.Maybe
Value {{ .Type | mungeTypeNodeIdent }}
}
func (m Maybe{{ .Type | mungeTypeNodeIdent }}) Must() {{ .Type | mungeTypeNodeIdent }} {
if m.Maybe != schema.Maybe_Value {
panic("unbox of a maybe rejected")
}
return m.Value
}
`, w, gk)
} | schema/gen/go/genKindStruct.go | 0.531453 | 0.423995 | genKindStruct.go | starcoder |
package hck
import (
"io"
"golang.org/x/net/html"
)
// index retrieves the index of the first matching node.
// It returns -1 if no match is found.
func index(ns []*Node, m Matcher) int {
for i, n := range ns {
if m.Match(n) {
return i
}
}
return -1
}
// splice copies ns, modifies it and retrieves the copy.
// Starting at i, del nodes are replaced with n.
func splice(ns []*Node, i, del int, n ...*Node) []*Node {
if del < 0 {
return nil
}
dest := make([]*Node, len(ns)+len(n)-del)
si := i
di := copy(dest[:si], ns[:si])
di += del - copy(dest[di:], n)
di += copy(dest[di:], ns[si:])
return dest
}
type Siblings []*Node
// Index retrieves the index of the first matching node.
// It returns -1 if no match is found.
func (s Siblings) Index(m Matcher) int {
return index([]*Node(s), m)
}
// Splice copies the siblings and modifies it by deleting del nodes starting at i
// and inserting ns there.
func (s Siblings) Splice(i, del int, ns ...*Node) Siblings {
return splice([]*Node(s), i, del, ns...)
}
// convert nodes to /x/net/html.Node siblings.
// Document node children are integrated as siblings.
// Nils are skipped.
func (s Siblings) convert(parent *html.Node) (first, last *html.Node) {
var prev *html.Node
for _, n := range s {
if n == nil {
continue
}
if n.Type == html.DocumentNode {
start, end := n.Children.convert(parent)
if prev != nil {
prev.NextSibling = start
} else {
first = start
}
prev = end
continue
}
h := n.convert()
h.Parent = parent
h.PrevSibling = prev
if prev != nil {
prev.NextSibling = h
} else {
first = h
}
prev = h
}
return first, prev
}
// Render nodes to a writer.
// nil nodes are skipped.
func (s Siblings) Render(w io.Writer) error {
doc := &html.Node{
Type: html.DocumentNode,
}
first, last := s.convert(doc)
doc.FirstChild = first
doc.LastChild = last
return html.Render(w, doc)
}
// SplitBefore retrieves siblings up to and starting with the first node from which a match is reachable.
// If no match is found, back is empty.
func (s Siblings) SplitBefore(m Matcher) (front, back Siblings) {
f := Document(s...).Find(m)
if f.Next() == nil {
return s, nil
}
p := f.Path()
i := s.Index(p[1])
return s[:i:i], s[i:]
}
// Node is an alternative to golang.org/x/net/html.Node intended for dom mutation.
// It stores a minimal amount of references that have to be updated on transformations.
type Node struct {
Children Siblings
Namespace string
Data string
Attributes
Type html.NodeType
}
// Parse a tree from r.
func Parse(r io.Reader) (*Node, error) {
dom, err := html.Parse(r)
if err != nil {
return nil, err
}
return Convert(dom), nil
}
// Convert a /x/net/html.Node to a Node.
func Convert(h *html.Node) *Node {
var children Siblings
for c := h.FirstChild; c != nil; c = c.NextSibling {
children = append(children, Convert(c))
}
return &Node{
Children: children,
Namespace: h.Namespace,
Data: h.Data,
Attributes: Attributes(h.Attr),
Type: h.Type,
}
}
// Clone retrieves a copy of the node.
func (n *Node) Clone() *Node {
if n == nil {
return n
}
return &Node{
Children: append(Siblings{}, n.Children...),
Namespace: n.Namespace,
Data: n.Data,
Attributes: append(Attributes{}, n.Attributes...),
Type: n.Type,
}
}
// Swap state with another node and retrieve that node.
func (n *Node) Swap(n2 *Node) *Node {
n.Children, n2.Children = n2.Children, n.Children
n.Namespace, n2.Namespace = n2.Namespace, n.Namespace
n.Data, n2.Data = n2.Data, n.Data
n.Attributes, n2.Attributes = n2.Attributes, n.Attributes
n.Type, n2.Type = n2.Type, n.Type
return n2
}
// convert a Node to a /x/net/html.Node.
func (n *Node) convert() *html.Node {
if n == nil {
return nil
}
h := &html.Node{
Namespace: n.Namespace,
Data: n.Data,
Type: n.Type,
}
// normalize strings
h.DataAtom, h.Data = atomize(n.Data)
n.Attributes.atomize()
h.Attr = []html.Attribute(n.Attributes)
// add children
h.FirstChild, h.LastChild = n.Children.convert(h)
return h
}
// Convert a Node to a /x/net/html.Node.
// If a child node is an ancestor of its own parent, an error will be returned.
func (n *Node) Convert() (*html.Node, error) {
if n.HasCycle() {
return nil, loopError{}
}
return n.convert(), nil
}
// Attribute retrieves a pointer to the Attribute with the given key and namespace.
// If none exists, nil is returned.
func (n *Node) Attribute(key, namespace string) *html.Attribute {
if n == nil {
return nil
}
return n.Attributes.find(key, namespace)
}
// Attr retrieves the value of an attribute.
func (n *Node) Attr(key string) string {
return n.Attributes.Get(key, "")
}
// Attr retrieves the value of an attribute with a namespace.
func (n *Node) AttrNS(key, namespace string) string {
return n.Attributes.Get(key, namespace)
}
func (n *Node) SetAttr(key, value string) (was string) {
return n.Attributes.Set(key, "", value)
}
func (n *Node) SetAttrNS(key, namespace, value string) (was string) {
return n.Attributes.Set(key, namespace, value)
}
func (n *Node) Match(m *Node) bool {
return n == m
}
func (n *Node) Render(w io.Writer) error {
doc, err := n.Convert()
if err != nil {
return err
}
if n.Type != html.DocumentNode {
tmp := &html.Node{
Type: html.DocumentNode,
FirstChild: doc,
LastChild: doc,
}
doc.Parent = tmp
doc = tmp
}
return html.Render(w, doc)
}
// HasCycle reports whether any reachable node is the ancestor of its own parents.
func (n *Node) HasCycle() bool {
return n.hasCycle(nil, make(map[*Node][]*Node))
}
func (n *Node) hasCycle(parent *Node, parents map[*Node][]*Node) bool {
if n == nil {
return false
}
known := parents[n]
parents[n] = append(known, parent)
check := []*Node{n}
for next := check; len(next) > 0; {
for _, c := range check {
if c == nil {
continue
}
for _, k := range known {
if k == nil {
continue
}
if k == n {
return true
}
}
next = append(next, c.Children...)
}
check = append(check[:0:0], next...)
next = next[:0]
}
return false
}
type loopError struct{}
func (e loopError) Error() string {
return "graph contains loops"
} | nodes.go | 0.770119 | 0.404566 | nodes.go | starcoder |
package delta
// LineSource indicates the origin of the solution line.
type LineSource string
// These are valid values for LineSource.
const (
Unknown LineSource = ""
LineFromA LineSource = "<"
LineFromB LineSource = ">"
LineFromBoth LineSource = "="
LineFromBothEdit LineSource = "~"
)
// DiffSolution contains a set of lines, where each element of
// lines comprises the left and right line, and whether the change
// was from A or B.
type DiffSolution struct {
Lines [][3]string
}
func (d *DiffSolution) addLineA(a string) {
d.addLine(a, "", LineFromA)
}
func (d *DiffSolution) addLineB(b string) {
d.addLine("", b, LineFromB)
}
func (d *DiffSolution) addLine(a, b string, l LineSource) {
d.Lines = append(d.Lines, [3]string{a, b, string(l)})
}
func (d *DiffSolution) addSolution(e *DiffSolution) {
d.Lines = append(d.Lines, e.Lines...)
}
// PostProcess loops over the solution. For each changed region, see if we can
// move it forward. i.e. if we have the following changeset:
// a [b c d] b c
// then we move the modified region forward so we have instead:
// a b c [d b c]
// this heuristic only moves additions or deletions (but never both in a move).
func (d *DiffSolution) PostProcess() {
lastChangeStartIndex := -1
lastChangeType := Unknown
lastLineType := LineFromBoth
for i, word := range d.Lines {
currentLineType := LineSource(word[2])
// we've reached the end of a region. Now we try find a section to move forward.
if currentLineType == LineFromBoth && currentLineType != lastLineType {
if lastChangeType != LineFromB && lastChangeType != LineFromA {
// don't try to move if it wasn't an addition or deletion
goto ContinueProcessing
}
// walk the change region to find a match
p1 := lastChangeStartIndex
p2 := i
for ((lastChangeType == LineFromA && d.Lines[p1][0] == d.Lines[p2][0]) ||
(lastChangeType == LineFromB && d.Lines[p1][1] == d.Lines[p2][1])) &&
LineSource(d.Lines[p2][2]) == LineFromBoth {
d.Lines[p1], d.Lines[p2] = d.Lines[p2], d.Lines[p1]
p1++
p2++
if p2 >= len(d.Lines) {
break
}
}
}
// we've reached the beginning of a region. Update pointers.
if lastLineType != currentLineType {
lastChangeStartIndex = i
lastChangeType = currentLineType
}
ContinueProcessing:
lastLineType = currentLineType
}
} | lib/solution.go | 0.570451 | 0.480662 | solution.go | starcoder |
package processor
import (
"context"
"fmt"
"time"
"github.com/benthosdev/benthos/v4/internal/component/metrics"
"github.com/benthosdev/benthos/v4/internal/component/processor"
"github.com/benthosdev/benthos/v4/internal/docs"
"github.com/benthosdev/benthos/v4/internal/interop"
"github.com/benthosdev/benthos/v4/internal/log"
"github.com/benthosdev/benthos/v4/internal/message"
)
func init() {
Constructors[TypeResource] = TypeSpec{
constructor: NewResource,
Categories: []string{
"Utility",
},
Summary: `
Resource is a processor type that runs a processor resource identified by its label.`,
Description: `
This processor allows you to reference the same configured processor resource in multiple places, and can also tidy up large nested configs. For example, the config:
` + "```yaml" + `
pipeline:
processors:
- bloblang: |
root.message = this
root.meta.link_count = this.links.length()
root.user.age = this.user.age.number()
` + "```" + `
Is equivalent to:
` + "```yaml" + `
pipeline:
processors:
- resource: foo_proc
processor_resources:
- label: foo_proc
bloblang: |
root.message = this
root.meta.link_count = this.links.length()
root.user.age = this.user.age.number()
` + "```" + `
You can find out more about resources [in this document.](/docs/configuration/resources)`,
Config: docs.FieldString("", "").HasDefault(""),
}
}
//------------------------------------------------------------------------------
// Resource is a processor that returns the result of a processor resource.
type Resource struct {
mgr interop.Manager
name string
log log.Modular
}
// NewResource returns a resource processor.
func NewResource(
conf Config, mgr interop.Manager, log log.Modular, stats metrics.Type,
) (processor.V1, error) {
if !mgr.ProbeProcessor(conf.Resource) {
return nil, fmt.Errorf("processor resource '%v' was not found", conf.Resource)
}
return &Resource{
mgr: mgr,
name: conf.Resource,
log: log,
}, nil
}
//------------------------------------------------------------------------------
// ProcessMessage applies the processor to a message, either creating >0
// resulting messages or a response to be sent back to the message source.
func (r *Resource) ProcessMessage(msg *message.Batch) (msgs []*message.Batch, res error) {
if err := r.mgr.AccessProcessor(context.Background(), r.name, func(p processor.V1) {
msgs, res = p.ProcessMessage(msg)
}); err != nil {
r.log.Errorf("Failed to obtain processor resource '%v': %v", r.name, err)
return nil, err
}
return msgs, res
}
// CloseAsync shuts down the processor and stops processing requests.
func (r *Resource) CloseAsync() {
}
// WaitForClose blocks until the processor has closed down.
func (r *Resource) WaitForClose(timeout time.Duration) error {
return nil
} | internal/old/processor/resource.go | 0.768038 | 0.419886 | resource.go | starcoder |
package box2d
import (
"fmt"
"math"
)
/// Distance joint definition. This requires defining an
/// anchor point on both bodies and the non-zero length of the
/// distance joint. The definition uses local anchor points
/// so that the initial configuration can violate the constraint
/// slightly. This helps when saving and loading a game.
/// @warning Do not use a zero or short length.
type B2DistanceJointDef struct {
B2JointDef
/// The local anchor point relative to bodyA's origin.
LocalAnchorA B2Vec2
/// The local anchor point relative to bodyB's origin.
LocalAnchorB B2Vec2
/// The natural length between the anchor points.
Length float64
/// The mass-spring-damper frequency in Hertz. A value of 0
/// disables softness.
FrequencyHz float64
/// The damping ratio. 0 = no damping, 1 = critical damping.
DampingRatio float64
}
func MakeB2DistanceJointDef() B2DistanceJointDef {
res := B2DistanceJointDef{
B2JointDef: MakeB2JointDef(),
}
res.Type = B2JointType.E_distanceJoint
res.LocalAnchorA.Set(0.0, 0.0)
res.LocalAnchorB.Set(0.0, 0.0)
res.Length = 1.0
res.FrequencyHz = 0.0
res.DampingRatio = 0.0
return res
}
/// A distance joint constrains two points on two bodies
/// to remain at a fixed distance from each other. You can view
/// this as a massless, rigid rod.
type B2DistanceJoint struct {
*B2Joint
M_frequencyHz float64
M_dampingRatio float64
M_bias float64
// Solver shared
M_localAnchorA B2Vec2
M_localAnchorB B2Vec2
M_gamma float64
M_impulse float64
M_length float64
// Solver temp
M_indexA int
M_indexB int
M_u B2Vec2
M_rA B2Vec2
M_rB B2Vec2
M_localCenterA B2Vec2
M_localCenterB B2Vec2
M_invMassA float64
M_invMassB float64
M_invIA float64
M_invIB float64
M_mass float64
}
/// The local anchor point relative to bodyA's origin.
func (joint B2DistanceJoint) GetLocalAnchorA() B2Vec2 {
return joint.M_localAnchorA
}
/// The local anchor point relative to bodyB's origin.
func (joint B2DistanceJoint) GetLocalAnchorB() B2Vec2 {
return joint.M_localAnchorB
}
func (joint *B2DistanceJoint) SetLength(length float64) {
joint.M_length = length
}
func (joint B2DistanceJoint) GetLength() float64 {
return joint.M_length
}
func (joint *B2DistanceJoint) SetFrequency(hz float64) {
joint.M_frequencyHz = hz
}
func (joint B2DistanceJoint) GetFrequency() float64 {
return joint.M_frequencyHz
}
func (joint *B2DistanceJoint) SetDampingRatio(ratio float64) {
joint.M_dampingRatio = ratio
}
func (joint B2DistanceJoint) GetDampingRatio() float64 {
return joint.M_dampingRatio
}
// 1-D constrained system
// m (v2 - v1) = lambda
// v2 + (beta/h) * x1 + gamma * lambda = 0, gamma has units of inverse mass.
// x2 = x1 + h * v2
// 1-D mass-damper-spring system
// m (v2 - v1) + h * d * v2 + h * k *
// C = norm(p2 - p1) - L
// u = (p2 - p1) / norm(p2 - p1)
// Cdot = dot(u, v2 + cross(w2, r2) - v1 - cross(w1, r1))
// J = [-u -cross(r1, u) u cross(r2, u)]
// K = J * invM * JT
// = invMass1 + invI1 * cross(r1, u)^2 + invMass2 + invI2 * cross(r2, u)^2
func (joint *B2DistanceJointDef) Initialize(b1 *B2Body, b2 *B2Body, anchor1 B2Vec2, anchor2 B2Vec2) {
joint.BodyA = b1
joint.BodyB = b2
joint.LocalAnchorA = joint.BodyA.GetLocalPoint(anchor1)
joint.LocalAnchorB = joint.BodyB.GetLocalPoint(anchor2)
d := B2Vec2Sub(anchor2, anchor1)
joint.Length = d.Length()
}
func MakeB2DistanceJoint(def *B2DistanceJointDef) *B2DistanceJoint {
res := B2DistanceJoint{
B2Joint: MakeB2Joint(def),
}
res.M_localAnchorA = def.LocalAnchorA
res.M_localAnchorB = def.LocalAnchorB
res.M_length = def.Length
res.M_frequencyHz = def.FrequencyHz
res.M_dampingRatio = def.DampingRatio
res.M_impulse = 0.0
res.M_gamma = 0.0
res.M_bias = 0.0
return &res
}
func (joint *B2DistanceJoint) InitVelocityConstraints(data B2SolverData) {
joint.M_indexA = joint.M_bodyA.M_islandIndex
joint.M_indexB = joint.M_bodyB.M_islandIndex
joint.M_localCenterA = joint.M_bodyA.M_sweep.LocalCenter
joint.M_localCenterB = joint.M_bodyB.M_sweep.LocalCenter
joint.M_invMassA = joint.M_bodyA.M_invMass
joint.M_invMassB = joint.M_bodyB.M_invMass
joint.M_invIA = joint.M_bodyA.M_invI
joint.M_invIB = joint.M_bodyB.M_invI
cA := data.Positions[joint.M_indexA].C
aA := data.Positions[joint.M_indexA].A
vA := data.Velocities[joint.M_indexA].V
wA := data.Velocities[joint.M_indexA].W
cB := data.Positions[joint.M_indexB].C
aB := data.Positions[joint.M_indexB].A
vB := data.Velocities[joint.M_indexB].V
wB := data.Velocities[joint.M_indexB].W
qA := MakeB2RotFromAngle(aA)
qB := MakeB2RotFromAngle(aB)
joint.M_rA = B2RotVec2Mul(qA, B2Vec2Sub(joint.M_localAnchorA, joint.M_localCenterA))
joint.M_rB = B2RotVec2Mul(qB, B2Vec2Sub(joint.M_localAnchorB, joint.M_localCenterB))
joint.M_u = B2Vec2Sub(B2Vec2Sub(B2Vec2Add(cB, joint.M_rB), cA), joint.M_rA)
// Handle singularity.
length := joint.M_u.Length()
if length > B2_linearSlop {
joint.M_u.OperatorScalarMulInplace(1.0 / length)
} else {
joint.M_u.Set(0.0, 0.0)
}
crAu := B2Vec2Cross(joint.M_rA, joint.M_u)
crBu := B2Vec2Cross(joint.M_rB, joint.M_u)
invMass := joint.M_invMassA + joint.M_invIA*crAu*crAu + joint.M_invMassB + joint.M_invIB*crBu*crBu
// Compute the effective mass matrix.
if invMass != 0.0 {
joint.M_mass = 1.0 / invMass
} else {
joint.M_mass = 0
}
if joint.M_frequencyHz > 0.0 {
C := length - joint.M_length
// Frequency
omega := 2.0 * B2_pi * joint.M_frequencyHz
// Damping coefficient
d := 2.0 * joint.M_mass * joint.M_dampingRatio * omega
// Spring stiffness
k := joint.M_mass * omega * omega
// magic formulas
h := data.Step.Dt
joint.M_gamma = h * (d + h*k)
if joint.M_gamma != 0.0 {
joint.M_gamma = 1.0 / joint.M_gamma
} else {
joint.M_gamma = 0.0
}
joint.M_bias = C * h * k * joint.M_gamma
invMass += joint.M_gamma
if invMass != 0.0 {
joint.M_mass = 1.0 / invMass
} else {
joint.M_mass = 0.0
}
} else {
joint.M_gamma = 0.0
joint.M_bias = 0.0
}
if data.Step.WarmStarting {
// Scale the impulse to support a variable time step.
joint.M_impulse *= data.Step.DtRatio
P := B2Vec2MulScalar(joint.M_impulse, joint.M_u)
vA.OperatorMinusInplace(B2Vec2MulScalar(joint.M_invMassA, P))
wA -= joint.M_invIA * B2Vec2Cross(joint.M_rA, P)
vB.OperatorPlusInplace(B2Vec2MulScalar(joint.M_invMassB, P))
wB += joint.M_invIB * B2Vec2Cross(joint.M_rB, P)
} else {
joint.M_impulse = 0.0
}
// Note: mutation on value, not ref; but OK because Velocities is an array
data.Velocities[joint.M_indexA].V = vA
data.Velocities[joint.M_indexA].W = wA
data.Velocities[joint.M_indexB].V = vB
data.Velocities[joint.M_indexB].W = wB
}
func (joint *B2DistanceJoint) SolveVelocityConstraints(data B2SolverData) {
vA := data.Velocities[joint.M_indexA].V
wA := data.Velocities[joint.M_indexA].W
vB := data.Velocities[joint.M_indexB].V
wB := data.Velocities[joint.M_indexB].W
// Cdot = dot(u, v + cross(w, r))
vpA := B2Vec2Add(vA, B2Vec2CrossScalarVector(wA, joint.M_rA))
vpB := B2Vec2Add(vB, B2Vec2CrossScalarVector(wB, joint.M_rB))
Cdot := B2Vec2Dot(joint.M_u, B2Vec2Sub(vpB, vpA))
impulse := -joint.M_mass * (Cdot + joint.M_bias + joint.M_gamma*joint.M_impulse)
joint.M_impulse += impulse
P := B2Vec2MulScalar(impulse, joint.M_u)
vA.OperatorMinusInplace(B2Vec2MulScalar(joint.M_invMassA, P))
wA -= joint.M_invIA * B2Vec2Cross(joint.M_rA, P)
vB.OperatorPlusInplace(B2Vec2MulScalar(joint.M_invMassB, P))
wB += joint.M_invIB * B2Vec2Cross(joint.M_rB, P)
// Note: mutation on value, not ref; but OK because Velocities is an array
data.Velocities[joint.M_indexA].V = vA
data.Velocities[joint.M_indexA].W = wA
data.Velocities[joint.M_indexB].V = vB
data.Velocities[joint.M_indexB].W = wB
}
func (joint *B2DistanceJoint) SolvePositionConstraints(data B2SolverData) bool {
if joint.M_frequencyHz > 0.0 {
// There is no position correction for soft distance constraints.
return true
}
cA := data.Positions[joint.M_indexA].C
aA := data.Positions[joint.M_indexA].A
cB := data.Positions[joint.M_indexB].C
aB := data.Positions[joint.M_indexB].A
qA := MakeB2RotFromAngle(aA)
qB := MakeB2RotFromAngle(aB)
rA := B2RotVec2Mul(qA, B2Vec2Sub(joint.M_localAnchorA, joint.M_localCenterA))
rB := B2RotVec2Mul(qB, B2Vec2Sub(joint.M_localAnchorB, joint.M_localCenterB))
u := B2Vec2Sub(B2Vec2Sub(B2Vec2Add(cB, rB), cA), rA)
length := u.Normalize()
C := length - joint.M_length
C = B2FloatClamp(C, -B2_maxLinearCorrection, B2_maxLinearCorrection)
impulse := -joint.M_mass * C
P := B2Vec2MulScalar(impulse, u)
cA.OperatorMinusInplace(B2Vec2MulScalar(joint.M_invMassA, P))
aA -= joint.M_invIA * B2Vec2Cross(rA, P)
cB.OperatorPlusInplace(B2Vec2MulScalar(joint.M_invMassB, P))
aB += joint.M_invIB * B2Vec2Cross(rB, P)
// Note: mutation on value, not ref; but OK because Positions is an array
data.Positions[joint.M_indexA].C = cA
data.Positions[joint.M_indexA].A = aA
data.Positions[joint.M_indexB].C = cB
data.Positions[joint.M_indexB].A = aB
return math.Abs(C) < B2_linearSlop
}
func (joint B2DistanceJoint) GetAnchorA() B2Vec2 {
return joint.M_bodyA.GetWorldPoint(joint.M_localAnchorA)
}
func (joint B2DistanceJoint) GetAnchorB() B2Vec2 {
return joint.M_bodyB.GetWorldPoint(joint.M_localAnchorB)
}
func (joint B2DistanceJoint) GetReactionForce(inv_dt float64) B2Vec2 {
return B2Vec2MulScalar((inv_dt * joint.M_impulse), joint.M_u)
}
func (joint B2DistanceJoint) GetReactionTorque(inv_dt float64) float64 {
return 0.0
}
func (joint B2DistanceJoint) Dump() {
indexA := joint.M_bodyA.M_islandIndex
indexB := joint.M_bodyB.M_islandIndex
fmt.Printf(" b2DistanceJointDef jd;\n")
fmt.Printf(" jd.bodyA = bodies[%d];\n", indexA)
fmt.Printf(" jd.bodyB = bodies[%d];\n", indexB)
fmt.Printf(" jd.collideConnected = bool(%d);\n", joint.M_collideConnected)
fmt.Printf(" jd.localAnchorA.Set(%.15lef, %.15lef);\n", joint.M_localAnchorA.X, joint.M_localAnchorA.Y)
fmt.Printf(" jd.localAnchorB.Set(%.15lef, %.15lef);\n", joint.M_localAnchorB.X, joint.M_localAnchorB.Y)
fmt.Printf(" jd.length = %.15lef;\n", joint.M_length)
fmt.Printf(" jd.frequencyHz = %.15lef;\n", joint.M_frequencyHz)
fmt.Printf(" jd.dampingRatio = %.15lef;\n", joint.M_dampingRatio)
fmt.Printf(" joints[%d] = m_world.CreateJoint(&jd);\n", joint.M_index)
} | DynamicsB2JointDistance.go | 0.847527 | 0.842734 | DynamicsB2JointDistance.go | starcoder |
package main
import (
"fmt"
"sync"
"time"
)
// merge(): a simple function which merge the two slices into one slice
func merge(left []int, right []int) []int {
result := make([]int, len(left)+len(right))
leftArrayIndex, rightArrayIndex := 0, 0
for resultArrayIndex := 0; resultArrayIndex < len(result); resultArrayIndex++ {
if leftArrayIndex >= len(left) {
result[resultArrayIndex] = right[rightArrayIndex]
rightArrayIndex++
continue
} else if rightArrayIndex >= len(right) {
result[resultArrayIndex] = left[leftArrayIndex]
leftArrayIndex++
continue
}
if left[leftArrayIndex] < right[rightArrayIndex] {
result[resultArrayIndex] = left[leftArrayIndex]
leftArrayIndex++
} else {
result[resultArrayIndex] = right[rightArrayIndex]
rightArrayIndex++
}
}
return result
}
// parallel implementation of merge sort with goroutines
func mergeSortParallel(arr []int) []int {
/*
max := 2048
we cand add condition like this
if len(arr) < max{
mergeSortSequential(arr)
}
it will use normal mergesort after array became small
because for that small size, creating gorouting and waiting for scheduler
is became more expansive in parallel mergesort
*/
length := len(arr)
if length < 2 {
return arr
}
// WaitGroup waits for goroutines to finish
var waitGroup sync.WaitGroup
// adding total counts of goroutines to do wait
waitGroup.Add(1)
left := arr[0 : length/2]
right := arr[length/2:]
// left part of array will be handle by another goroutine
go func() {
defer waitGroup.Done()
left = mergeSortParallel(left)
}()
// right part of array will handle by this main goroutine
right = mergeSortParallel(right)
// waiting for goroutin to finish it's work
waitGroup.Wait()
return merge(left, right)
}
func main() {
arr := []int{3, 5, 1, 6, 1, 7, 2, 4, 5, 1}
start := time.Now() // adding timestamp when excecution of mergesort start
arr = mergeSortParallel(arr)
end := time.Now() // adding timestamp when excecution of mergesort finish
fmt.Println("sorted array is")
fmt.Println(arr)
fmt.Println("time taken by algorithm is")
fmt.Println(end.Sub(start))
}
/*
input/output sample
sorted array is
[1 1 1 2 3 4 5 5 6 7]
time taken by algorithm is
51.359µs
Time Complexity: O(Log(n)) in worst case
Space Complexity: O(n) in worst case
*/ | Go/sort/merge_sort_parallel.go | 0.524395 | 0.544983 | merge_sort_parallel.go | starcoder |
package geom
import (
"fmt"
"math"
)
type Rect struct {
Min, Max Coord
}
// this rect contains nothing
func NilRect() (r Rect) {
r.Min.X = math.Inf(1)
r.Min.Y = math.Inf(1)
r.Max.X = math.Inf(-1)
r.Max.Y = math.Inf(-1)
return
}
func (r Rect) Width() float64 {
return r.Max.X - r.Min.X
}
func (r Rect) Height() float64 {
return r.Max.Y - r.Min.Y
}
func (r Rect) Size() (w, h float64) {
return r.Max.X - r.Min.X, r.Max.Y - r.Min.Y
}
func (r Rect) Center() (center Coord) {
center.X = 0.5 * (r.Min.X + r.Max.X)
center.Y = 0.5 * (r.Min.Y + r.Max.Y)
return
}
func (r Rect) ContainsCoord(p Coord) bool {
return r.Min.QuadPP(p) && r.Max.QuadMM(p)
}
func (r Rect) ContainsRect(o Rect) bool {
return r.ContainsCoord(o.Min) && r.ContainsCoord(o.Max)
}
func (r *Rect) Translate(offset Coord) {
r.Min = r.Min.Plus(offset)
r.Max = r.Max.Plus(offset)
}
func (r *Rect) Scale(xf, yf float64) {
r.Min.Scale(xf, yf)
r.Max.Scale(xf, yf)
if xf < 0 {
r.Min.X, r.Max.X = r.Max.X, r.Min.X
}
if yf < 0 {
r.Min.Y, r.Max.Y = r.Max.Y, r.Min.Y
}
}
func (r *Rect) ExpandToContain(ch <-chan Coord) {
for p := range ch {
r.ExpandToContainCoord(p)
}
}
func (r *Rect) ExpandToContainCoord(p Coord) {
r.Min.X = minf(r.Min.X, p.X)
r.Min.Y = minf(r.Min.Y, p.Y)
r.Max.X = maxf(r.Max.X, p.X)
r.Max.Y = maxf(r.Max.Y, p.Y)
}
func (r *Rect) ExpandToContainRect(q Rect) {
r.ExpandToContainCoord(q.Min)
r.ExpandToContainCoord(q.Max)
}
func (r Rect) Bounds() (bounds Rect) {
bounds = r
return
}
func (r Rect) Equals(oi interface{}) bool {
or, ok := oi.(Rect)
return ok && RectsEqual(r, or)
}
func RectsIntersect(r1, r2 Rect) bool {
ov := func(min1, max1, min2, max2 float64) (overlap bool) {
if min1 <= min2 && max1 >= min2 {
return true
}
if min1 <= max2 && max1 >= max2 {
return true
}
if min2 <= min1 && max2 >= min1 {
return true
}
if min2 <= max1 && max2 >= max1 {
return true
}
return false
}
dbg("RI(%v, %v)", r1, r2)
xoverlap := ov(r1.Min.X, r1.Max.X, r2.Min.X, r2.Max.X)
yoverlap := ov(r1.Min.Y, r1.Max.Y, r2.Min.Y, r2.Max.Y)
dbg("%v %v", xoverlap, yoverlap)
return xoverlap && yoverlap
}
func RectsIntersectStrict(r1, r2 Rect) bool {
ov := func(min1, max1, min2, max2 float64) (overlap bool) {
if min1 < min2 && max1 > min2 {
return true
}
if min1 < max2 && max1 > max2 {
return true
}
if min2 < min1 && max2 > min1 {
return true
}
if min2 < max1 && max2 > max1 {
return true
}
return false
}
dbg("RI(%v, %v)", r1, r2)
xoverlap := ov(r1.Min.X, r1.Max.X, r2.Min.X, r2.Max.X)
yoverlap := ov(r1.Min.Y, r1.Max.Y, r2.Min.Y, r2.Max.Y)
dbg("%v %v", xoverlap, yoverlap)
return xoverlap && yoverlap
}
func RectsIntersection(r1, r2 Rect) (ri Rect) {
ri.Min.X = math.Max(r1.Min.X, r2.Min.X)
ri.Min.Y = math.Max(r1.Min.Y, r2.Min.Y)
ri.Max.X = math.Min(r1.Max.X, r2.Max.X)
ri.Max.Y = math.Min(r1.Max.Y, r2.Max.Y)
return
}
func RectsEqual(r1, r2 Rect) bool {
if !r1.Min.EqualsCoord(r2.Min) {
return false
}
if !r1.Max.EqualsCoord(r2.Max) {
return false
}
return true
}
func (r Rect) String() string {
return fmt.Sprintf("{%v %v}", r.Min, r.Max)
} | vendor/github.com/skelterjohn/geom/rect.go | 0.751739 | 0.503845 | rect.go | starcoder |
package grid
import (
"github.com/adamcolton/geom/calc/cmpr"
"github.com/adamcolton/geom/d2"
"github.com/adamcolton/geom/geomerr"
)
// Pt is a cell in a grid
type Pt struct {
X, Y int
}
// Area always returns a positive value.
func (pt Pt) Area() int {
a := pt.X * pt.Y
if a < 0 {
return -a
}
return a
}
// D2 converts a Pt to a d2.D2.
func (pt Pt) D2() d2.D2 {
return d2.D2{float64(pt.X), float64(pt.Y)}
}
// Abs returns a Pt where both X and Y are positive
func (pt Pt) Abs() Pt {
if pt.X < 0 {
pt.X = -pt.X
}
if pt.Y < 0 {
pt.Y = -pt.Y
}
return pt
}
// Add two Pts
func (pt Pt) Add(pt2 Pt) Pt {
return Pt{
X: pt.X + pt2.X,
Y: pt.Y + pt2.Y,
}
}
// Subtract two points
func (pt Pt) Subtract(pt2 Pt) Pt {
return Pt{
X: pt.X - pt2.X,
Y: pt.Y - pt2.Y,
}
}
// Multiply a Pt by a scale value
func (pt Pt) Multiply(scale float64) Pt {
return Pt{
X: int(float64(pt.X) * scale),
Y: int(float64(pt.Y) * scale),
}
}
// To creates an Iterator between two points
func (pt Pt) To(pt2 Pt) Iterator {
return Range{pt, pt2}.Iter()
}
// Iter creates an Iterator from the origin to this Pt
func (pt Pt) Iter() Iterator {
return Pt{}.To(pt)
}
// Scale is used to convert a Grid Pt to two float64 values, often
type Scale struct {
X, Y, DX, DY float64
}
// T returns the scaled values corresponding to the point. Typically these are
// used as parametric values.
func (s Scale) T(pt Pt) (float64, float64) {
return float64(pt.X)*s.X + s.DX, float64(pt.Y)*s.Y + s.DY
}
// AssertEqual fulfils geomtest.AssertEqualizer
func (pt Pt) AssertEqual(actual interface{}, t cmpr.Tolerance) error {
if err := geomerr.NewTypeMismatch(pt, actual); err != nil {
return err
}
pt2 := actual.(Pt)
if pt.X != pt2.X || pt.Y != pt2.Y {
return geomerr.NotEqual(pt, pt2)
}
return nil
}
const (
// TwoMask sets the one bit to zero, the result is always divisible by 2.
TwoMask int = (^int(0)) ^ 1
)
// Mask performs and AND operation with the mask on the X and Y value of the
// given point.
func (pt Pt) Mask(and int) Pt {
return Pt{
X: (pt.X) & and,
Y: (pt.Y) & and,
}
}
// Index returns the point reached at idx when incrementing by X and wrapping Y.
func (pt Pt) Index(idx int) Pt {
idx = idx % pt.Area()
return Pt{
X: idx % pt.X,
Y: idx / pt.X,
}
} | d2/grid/pt.go | 0.877201 | 0.604019 | pt.go | starcoder |
package leetcode
/*
Approach:
Using the max area rectangle in skyline approach
with a mono stack
*/
func maximalRectangle(matrix [][]byte) int {
R := len(matrix)
if R == 0 {
return 0
}
C := len(matrix[0])
dp := make([][]int, R)
for r := 0; r < R; r++ {
dp[r] = make([]int, C+1)
}
for c := 0; c < C; c++ {
curr := 0
for r := 0; r < R; r++ {
if matrix[r][c] == '0' {
curr = 0
continue
}
curr++
dp[r][c] = curr
}
}
res := 0
for r := 0; r < R; r++ {
stack := []int{}
for c := 0; c < C+1; c++ {
for len(stack) > 0 && dp[r][stack[len(stack)-1]] >= dp[r][c] {
h := dp[r][stack[len(stack)-1]]
stack = stack[:len(stack)-1]
var w int
if len(stack) > 0 {
w = c - stack[len(stack)-1] - 1
} else {
w = c
}
if w*h > res {
res = w * h
}
}
stack = append(stack, c)
}
}
return res
}
/*
Approach:
Create dp where dp[i][j] is the sum of continues 1s to the left in row i including for j itself
For each column of the dp, for each row, look for continues rows without zero, take their min times the count of rows
That's the maximum area we can achieve
Note that we need to start at each row once
Also, we can break if we find a row that is zero
*/
func maximalRectangle2(matrix [][]byte) int {
R := len(matrix)
if R == 0 {
return 0
}
C := len(matrix[0])
dp := make([][]int, R)
for r := 0; r < R; r++ {
dp[r] = make([]int, C)
}
for r := 0; r < R; r++ {
for c := 0; c < C; c++ {
if matrix[r][c] == '0' {
continue
}
if c > 0 {
dp[r][c] = 1 + dp[r][c-1]
} else {
dp[r][c] = 1
}
}
}
res := 0
for c := 0; c < C; c++ {
for r := 0; r < R; r++ {
curr, count := 0, 0
for r1 := r; r1 < R; r1++ {
if curr == 0 {
if dp[r1][c] == 0 {
break
} else {
curr = dp[r1][c]
count = 1
if curr > res {
res = curr
}
}
} else {
if dp[r1][c] == 0 {
curr = 0
count = 0
break
} else {
if dp[r1][c] > res {
res = dp[r1][c]
}
if dp[r1][c] < curr {
curr = dp[r1][c]
}
count++
if count*curr > res {
res = count * curr
}
}
}
}
}
}
return res
} | go/maximal_rectangle.go | 0.609989 | 0.495911 | maximal_rectangle.go | starcoder |
package som
import (
"container/heap"
"fmt"
"math"
"gonum.org/v1/gonum/mat"
)
// Metric is distance metric
type Metric int
const (
// Euclidean metric
Euclidean Metric = iota
)
// Distance calculates given metric distance between vectors a and b and returns it.
// If unsupported metric is requested it returns default distance which is Euclidean distance.
// It returns error if the supplied vectors are either nil or have different dimensions
func Distance(m Metric, a, b []float64) (float64, error) {
if a == nil || b == nil {
return 0.0, fmt.Errorf("invalid vectors supplied. a: %v, b: %v", a, b)
}
if len(a) != len(b) {
return 0.0, fmt.Errorf("incorrect vector dims. a: %d, b: %d", len(a), len(b))
}
switch m {
case Euclidean:
return euclideanVec(a, b), nil
default:
return euclideanVec(a, b), nil
}
}
// DistanceMx calculates given metric distance matrix for the supplied matrix.
// Distance matrix is also known in literature as dissimilarity matrix.
// DistanceMx returns a hollow symmetric matrix where an item x_ij stores the distance between
// vectors stored in rows i and j. If an unknown metric is supplied Euclidean distance is computed.
// It returns error if the supplied matrix is nil.
func DistanceMx(m Metric, mat *mat.Dense) (*mat.Dense, error) {
if mat == nil {
return nil, fmt.Errorf("invalid matrix supplied: %v", mat)
}
switch m {
case Euclidean:
return euclideanMx(mat), nil
default:
return euclideanMx(mat), nil
}
}
// ClosestVec finds the index of the closest vector to v in the list of vectors
// stored as rows in matrix m using the supplied distance metric.
// If unsupported metric is requested, ClosestVec falls over to euclidean metric.
// If several vectors of the same distance are found, it returns the index of the first one from the top.
// ClosestVec returns error if either v or m are nil or if the v dimension is different from
// the number of m columns. When the ClosestVec fails with error returned index is set to -1.
func ClosestVec(m Metric, v []float64, mat *mat.Dense) (int, error) {
if len(v) == 0 {
return -1, fmt.Errorf("invalid vector: %v", v)
}
if mat == nil {
return -1, fmt.Errorf("invalid matrix: %v", mat)
}
rows, _ := mat.Dims()
closest := 0
dist := math.MaxFloat64
for i := 0; i < rows; i++ {
d, err := Distance(m, v, mat.RawRowView(i))
if err != nil {
return -1, err
}
if d < dist {
dist = d
closest = i
}
}
return closest, nil
}
// ClosestNVec finds the N closest vectors to v in the list of vectors stored in m rows
// using the supplied distance metric. It returns a slice which contains indices to the m
// rows. The length of the slice is the same as number of requested closest vectors - n.
// ClosestNVec fails in the same way as ClosestVec. If n is higher than the number of
// rows in m, or if it is not a positive integer, it fails with error too.
func ClosestNVec(m Metric, n int, v []float64, mat *mat.Dense) ([]int, error) {
if len(v) == 0 {
return nil, fmt.Errorf("invalid vector: %v", v)
}
if mat == nil {
return nil, fmt.Errorf("invalid matrix: %v", mat)
}
rows, _ := mat.Dims()
if n <= 0 || n > rows {
return nil, fmt.Errorf("invalid number of closest vectors requested: %d", n)
}
closest := make([]int, n)
switch {
case n == 1:
idx, err := ClosestVec(m, v, mat)
if err != nil {
return nil, err
}
closest[0] = idx
default:
h, _ := newFloat64Heap(n)
rows, _ := mat.Dims()
for i := 0; i < rows; i++ {
d, err := Distance(m, v, mat.RawRowView(i))
if err != nil {
return nil, err
}
f := &float64Item{val: d, index: i}
heap.Push(h, f)
}
for j := 0; j < n; j++ {
closest[j] = (heap.Pop(h).(*float64Item)).index
}
}
return closest, nil
}
// BMUs returns a slice which contains indices of the Best Match Unit (BMU) codebook vectors for each
// vector stored in data rows. Each item in the returned slice correspnds to index of BMU in codebook for
// a particular data sample. If some data row has more than one BMU the index of the first one found is used.
// It returns error if either the data or codebook are nil or if their dimensions are mismatched.
func BMUs(data, codebook *mat.Dense) ([]int, error) {
if data == nil {
return nil, fmt.Errorf("invalid data supplied: %v", data)
}
if codebook == nil {
return nil, fmt.Errorf("invalid codebook supplied: %v", codebook)
}
rows, _ := data.Dims()
bmus := make([]int, rows)
for i := 0; i < rows; i++ {
idx, err := ClosestVec(Euclidean, data.RawRowView(i), codebook)
if err != nil {
return nil, err
}
bmus[i] = idx
}
return bmus, nil
}
// euclideanVec computes euclidean distance between vectors a and b.
func euclideanVec(a, b []float64) float64 {
d := 0.0
for i := 0; i < len(a); i++ {
d += (a[i] - b[i]) * (a[i] - b[i])
}
return math.Sqrt(d)
}
// euclideanMx computes a matrix of euclidean distances between each row in m
func euclideanMx(m *mat.Dense) *mat.Dense {
rows, _ := m.Dims()
out := mat.NewDense(rows, rows, nil)
for row := 0; row < rows-1; row++ {
dist := 0.0
a := m.RawRowView(row)
for i := row + 1; i < rows; i++ {
if i != row {
b := m.RawRowView(i)
dist = euclideanVec(a, b)
out.Set(row, i, dist)
out.Set(i, row, dist)
}
}
}
return out
} | som/distance.go | 0.834811 | 0.565359 | distance.go | starcoder |
package tilecover
import (
"fmt"
"log"
"github.com/paulmach/orb"
"github.com/paulmach/orb/maptile"
)
// Geometry returns the covering set of tiles for the given geometry.
func Geometry(g orb.Geometry, z maptile.Zoom) maptile.Set {
if g == nil {
return nil
}
switch g := g.(type) {
case orb.Point:
return Point(g, z)
case orb.MultiPoint:
return MultiPoint(g, z)
case orb.LineString:
return LineString(g, z)
case orb.MultiLineString:
return MultiLineString(g, z)
case orb.Ring:
return Ring(g, z)
case orb.Polygon:
return Polygon(g, z)
case orb.MultiPolygon:
return MultiPolygon(g, z)
case orb.Collection:
return Collection(g, z)
case orb.Bound:
return Bound(g, z)
}
panic(fmt.Sprintf("geometry type not supported: %T", g))
}
// GeometryCount returns the covering set of tiles for the given geometry.
func GeometryCount(g orb.Geometry, z maptile.Zoom) int64 {
if g == nil {
return 0
}
switch g := g.(type) {
case orb.Point:
return int64(len(Point(g, z)))
case orb.MultiPoint:
return int64(len(MultiPoint(g, z)))
case orb.LineString:
return LineStringCount(g, z)
case orb.MultiLineString:
return MultiLineStringCount(g, z)
case orb.Ring:
return RingCount(g, z)
case orb.Polygon:
return PolygonCount(g, z)
case orb.MultiPolygon:
return MultiPolygonCount(g, z)
case orb.Collection:
return CollectionCount(g, z)
case orb.Bound:
return BoundCount(g, z)
}
panic(fmt.Sprintf("geometry type not supported: %T", g))
}
// GeometryChannel returns the covering set of tiles for the given geometry.
func GeometryChannel(g orb.Geometry, z maptile.Zoom, ch chan<- maptile.Tile) {
defer func() {
if recover() != nil {
log.Println("buffer got closed...")
}
}()
if g == nil {
return
}
switch g := g.(type) {
case orb.Point:
PointChannel(g, z, ch)
case orb.MultiPoint:
MultiPointChannel(g, z, ch)
case orb.LineString:
LineStringChannel(g, z, ch)
case orb.MultiLineString:
MultiLineStringChannel(g, z, ch)
case orb.Ring:
RingChannel(g, z, ch)
case orb.Polygon:
PolygonChannel(g, z, ch)
case orb.MultiPolygon:
MultiPolygonChannel(g, z, ch)
case orb.Collection:
CollectionChannel(g, z, ch)
case orb.Bound:
BoundChannel(g, z, ch)
}
}
// Point creates a tile cover for the point, i.e. just the tile
// containing the point.
func Point(ll orb.Point, z maptile.Zoom) maptile.Set {
return maptile.Set{
maptile.At(ll, z): true,
}
}
// PointChannel creates a tile cover for the point, i.e. just the tile
// containing the point.
func PointChannel(ll orb.Point, z maptile.Zoom, ch chan<- maptile.Tile) {
defer func() {
if recover() != nil {
log.Println("buffer got closed...")
}
}()
ch <- maptile.At(ll, z)
}
// MultiPoint creates a tile cover for the set of points,
func MultiPoint(mp orb.MultiPoint, z maptile.Zoom) maptile.Set {
set := make(maptile.Set)
for _, p := range mp {
set[maptile.At(p, z)] = true
}
return set
}
// MultiPointChannel creates a tile cover for the point, i.e. just the tile
// containing the point.
func MultiPointChannel(mp orb.MultiPoint, z maptile.Zoom, ch chan<- maptile.Tile) {
defer func() {
if recover() != nil {
log.Println("buffer got closed...")
}
}()
for _, p := range mp {
ch <- maptile.At(p, z)
}
}
// Bound creates a tile cover for the bound. i.e. all the tiles
// that intersect the bound.
func Bound(b orb.Bound, z maptile.Zoom) maptile.Set {
lo := maptile.At(b.Min, z)
hi := maptile.At(b.Max, z)
result := make(maptile.Set, (hi.X-lo.X+1)*(lo.Y-hi.Y+1))
for x := lo.X; x <= hi.X; x++ {
for y := hi.Y; y <= lo.Y; y++ {
result[maptile.Tile{X: x, Y: y, Z: z}] = true
}
}
return result
}
// BoundCount creates a tile cover for the bound. i.e. all the tiles
// that intersect the bound.
func BoundCount(b orb.Bound, z maptile.Zoom) int64 {
lo := maptile.At(b.Min, z)
hi := maptile.At(b.Max, z)
cnt := int64(hi.X-lo.X+1) * int64(lo.Y-hi.Y+1)
if cnt == 0 {
return 1
}
return cnt
}
// BoundChannel creates a tile cover for the bound. i.e. all the tiles
// that intersect the bound.
func BoundChannel(b orb.Bound, z maptile.Zoom, ch chan<- maptile.Tile) {
defer func() {
if recover() != nil {
log.Println("buffer got closed...")
}
}()
lo := maptile.At(b.Min, z)
hi := maptile.At(b.Max, z)
for x := lo.X; x <= hi.X; x++ {
for y := hi.Y; y <= lo.Y; y++ {
ch <- maptile.Tile{X: x, Y: y, Z: z}
}
}
}
// Collection returns the covering set of tiles for the
// geoemtry collection.
func Collection(c orb.Collection, z maptile.Zoom) maptile.Set {
set := make(maptile.Set)
for _, g := range c {
set.Merge(Geometry(g, z))
}
return set
}
// CollectionCount returns the covering set of tiles for the
// geoemtry collection.
func CollectionCount(c orb.Collection, z maptile.Zoom) int64 {
var cnt int64
for _, g := range c {
cnt += GeometryCount(g, z)
}
return cnt
}
// CollectionChannel returns the covering set of tiles for the
// geoemtry collection.
func CollectionChannel(c orb.Collection, z maptile.Zoom, ch chan<- maptile.Tile) {
defer close(ch)
for _, g := range c {
GeometryChannel(g, z, ch)
}
} | maptile/tilecover/helpers.go | 0.720172 | 0.545528 | helpers.go | starcoder |
package elastic
import (
"math"
"github.com/unchartedsoftware/veldt"
"github.com/unchartedsoftware/veldt/binning"
"github.com/unchartedsoftware/veldt/util/json"
)
// BinnedTopHits represents an elasticsearch implementation of the binned top
// hits tile.
type BinnedTopHits struct {
Elastic
Bivariate
TopHits
}
// NewBinnedTopHits instantiates and returns a new tile struct.
func NewBinnedTopHits(host, port string) veldt.TileCtor {
return func() (veldt.Tile, error) {
b := &BinnedTopHits{}
b.Host = host
b.Port = port
return b, nil
}
}
// Parse parses the provided JSON object and populates the tiles attributes.
func (b *BinnedTopHits) Parse(params map[string]interface{}) error {
err := b.TopHits.Parse(params)
if err != nil {
return err
}
return b.Bivariate.Parse(params)
}
// Create generates a tile from the provided URI, tile coordinate and query
// parameters.
func (b *BinnedTopHits) Create(uri string, coord *binning.TileCoord, query veldt.Query) ([]byte, error) {
// create search service
search, err := b.CreateSearchService(uri)
if err != nil {
return nil, err
}
// create root query
q, err := b.CreateQuery(query)
if err != nil {
return nil, err
}
// add tiling query
q.Must(b.Bivariate.GetQuery(coord))
// set the query
search.Query(q)
// get aggs
topHitsAggs := b.TopHits.GetAggs()
aggs := b.Bivariate.GetAggsWithNested(coord, "top-hits", topHitsAggs["top-hits"])
// set the aggregation
search.Aggregation("x", aggs["x"])
// send query
res, err := search.Do()
if err != nil {
return nil, err
}
// get bins
buckets, err := b.Bivariate.GetBins(coord, &res.Aggregations)
if err != nil {
return nil, err
}
// convert hit bins
bins := make([][]map[string]interface{}, len(buckets))
for i, bucket := range buckets {
if bucket != nil {
hits, err := b.TopHits.GetTopHits(&bucket.Aggregations)
if err != nil {
return nil, err
}
bins[i] = hits
}
}
// bin width
binSize := binning.MaxTileResolution / float64(b.Resolution)
halfSize := float64(binSize / 2)
// convert to point array
points := make([]float32, len(bins)*2)
numPoints := 0
for i, bin := range bins {
if bin != nil {
x := float32(float64(i%b.Resolution)*binSize + halfSize)
y := float32(math.Floor(float64(i/b.Resolution))*binSize + halfSize)
points[numPoints*2] = x
points[numPoints*2+1] = y
numPoints++
}
}
//encode
return json.Marshal(map[string]interface{}{
"points": points[0 : numPoints*2],
"hits": bins,
})
} | generation/elastic/binned_top_hits.go | 0.78469 | 0.402157 | binned_top_hits.go | starcoder |
package neural
import (
"fmt"
"math"
"math/rand"
)
// Hidden or output layer of a neural network
type Layer struct {
Nodes []*Node
Inputs []float64
}
type Learner interface {
Learn(outputs, targets []float64)
}
// Defines the feed forward neural network
// Layers includes all hidden layers + the output layer
type Network struct {
Inputs []float64
Layers []*Layer
}
// Creates a feed forward neural network with all inputs, hidden layers,
// and output layer nodes.
func NewNetwork(inputs, outputs int, hiddenLayers []int, hiddenAct, outAct ActivationFunction) *Network {
net := &Network{
Inputs: make([]float64, inputs),
Layers: make([]*Layer, len(hiddenLayers)+1),
}
prevNodes := inputs
for h, hidden := range hiddenLayers {
if hidden < 1 {
panic(fmt.Sprintf("NewNetwork ERROR number of hidden nodes %d in layer %d is below 1", hidden, h))
}
net.Layers[h] = createLayer(hidden, prevNodes, hiddenAct)
prevNodes = hidden
}
// Add the output as the last layer
net.Layers[len(hiddenLayers)] = createLayer(outputs, prevNodes, outAct)
return net
}
// Trains the sample data set on the neural network updating weights for each sample
// using the stochastic gradient descent method.
func (n *Network) Train(samples, targets [][]float64, learner Learner) {
for d, sample := range samples {
outputs := n.Evaluate(sample)
learner.Learn(outputs, targets[d])
}
}
// Push the inputs into the neural network. Returning the final outputs
func (n *Network) Evaluate(inputs []float64) []float64 {
if len(inputs) != len(n.Inputs) {
panic(fmt.Sprintf("Network.PushInputs failed input lengths do not match %d %d", len(inputs), len(n.Inputs)))
}
copy(n.Inputs, inputs)
outputs := n.Inputs
for i := 0; i < len(n.Layers); i++ {
outputs = propagateInputs(outputs, n.Layers[i])
}
return outputs
}
// Initializes weights in the network for each layer in the network
// Hidden layers are assigned weights randomly bound by high and low values.
// Output node's weights are set to 1.
func (n *Network) RandomizeWeights(low, high float64, randSrc rand.Source) {
rnd := rand.New(randSrc)
for _, layer := range n.Layers {
randomizeWeights(layer.Nodes, low, high, rnd)
}
}
// Adds inputs to a layer, computes each node of that layer,
// SKips the first input of the node because it is the bias.
func propagateInputs(inputs []float64, layer *Layer) []float64 {
outputs := make([]float64, len(layer.Nodes))
layer.Inputs[0] = 1 // x_j0 always 1
copy(layer.Inputs[1:], inputs)
for i := 0; i < len(layer.Nodes); i++ {
outputs[i] = layer.Nodes[i].Compute()
}
return outputs
}
// Creates a new neural layer, with the specified number of nodes and inputs
func createLayer(nodes, inputs int, act ActivationFunction) *Layer {
// Add room for the x_j0 and w_0 bias
inputs++
layer := &Layer{
Inputs: make([]float64, inputs),
Nodes: make([]*Node, nodes),
}
for i := 0; i < nodes; i++ {
layer.Nodes[i] = &Node{
Inputs: layer.Inputs,
Weights: make([]float64, inputs),
Activation: act,
}
}
return layer
}
// Initializes the weights to random values bounded by the high and low range.
func randomizeWeights(nodes []*Node, low, high float64, rnd *rand.Rand) {
rng := high - low
for _, node := range nodes {
for i := 0; i < len(node.Weights); i++ {
// Convert [0,1) range to low,high range for weights
node.Weights[i] = ((rnd.Float64()) * rng) - math.Abs(low)
}
}
} | network.go | 0.837055 | 0.712895 | network.go | starcoder |
package ondatra
import (
opb "github.com/openconfig/ondatra/proto"
)
const (
maxFlowLabel uint32 = (1 << 20) - 1
maxPort uint32 = (1 << 16) - 1
)
// UIntRange is a range of unsigned integers.
type UIntRange struct {
pb *opb.UIntRange
}
// WithMin sets the minimum value of the range.
func (r *UIntRange) WithMin(min uint32) *UIntRange {
r.pb.Min = min
return r
}
// WithMax sets the maximum value of the range.
func (r *UIntRange) WithMax(max uint32) *UIntRange {
r.pb.Max = max
return r
}
// WithStep sets the distance between adjacent values in the range; must not be zero.
func (r *UIntRange) WithStep(step uint32) *UIntRange {
r.pb.Step = step
return r
}
// WithCount sets the number of values in the range; must not be zero.
func (r *UIntRange) WithCount(count uint32) *UIntRange {
r.pb.Count = count
return r
}
// WithRandom sets the values in the range to be chosen randomly.
func (r *UIntRange) WithRandom() *UIntRange {
r.pb.Random = true
return r
}
// AddressRange is a range of addresses.
type AddressRange struct {
AddressIncRange
}
// WithMin sets the minimum value of the range.
func (r *AddressRange) WithMin(min string) *AddressRange {
r.AddressIncRange.WithMin(min)
return r
}
// WithMax sets the maximum value of the range.
func (r *AddressRange) WithMax(max string) *AddressRange {
r.AddressIncRange.WithMax(max)
return r
}
// WithCount sets the number of values in the range; must not be zero.
func (r *AddressRange) WithCount(count uint32) *AddressRange {
r.AddressIncRange.WithCount(count)
return r
}
// WithStep sets the step address between values in the range.
// If not specified, the default depends on whether the range is random.
// For non-random ranges, step defaults to the largest step that fits the specified count of values.
// For random ranges, the step defaults to a single address.
func (r *AddressRange) WithStep(step string) *AddressRange {
r.AddressIncRange.WithStep(step)
return r
}
// WithRandom sets the values in the range to be chosen randomly.
func (r *AddressRange) WithRandom() *AddressRange {
r.pb.Random = true
return r
}
// AddressIncRange is a range network addresses that increment by a fixed step.
type AddressIncRange struct {
pb *opb.AddressRange
}
// WithMin sets the minimum value of the range.
func (r *AddressIncRange) WithMin(addr string) *AddressIncRange {
r.pb.Min = addr
return r
}
// WithMax sets the maximum value of the range.
func (r *AddressIncRange) WithMax(max string) *AddressIncRange {
r.pb.Max = max
return r
}
// WithCount sets the number of values in the range; must not be zero.
func (r *AddressIncRange) WithCount(count uint32) *AddressIncRange {
r.pb.Count = count
return r
}
// WithStep sets the step address between values in the range.
// If not specified, defaults to the largest step that fits the specified count of values.
func (r *AddressIncRange) WithStep(step string) *AddressIncRange {
r.pb.Step = step
return r
}
// StringIncRange is an range of strings that increments by a fixed step.
type StringIncRange struct {
pb *opb.StringIncRange
}
// WithStart sets the start of the range.
func (r *StringIncRange) WithStart(start string) *StringIncRange {
r.pb.Start = start
return r
}
// WithStep sets the step between values in the range.
func (r *StringIncRange) WithStep(step string) *StringIncRange {
r.pb.Step = step
return r
}
// UInt32IncRange is an range of 32-bit integers that increments by a fixed
// step.
type UInt32IncRange struct {
pb *opb.UInt32IncRange
}
// WithStart sets the start of the range.
func (r *UInt32IncRange) WithStart(start uint32) *UInt32IncRange {
r.pb.Start = start
return r
}
// WithStep sets the step between values in the range.
func (r *UInt32IncRange) WithStep(step uint32) *UInt32IncRange {
r.pb.Step = step
return r
}
func intRangeSingle(i uint32) *opb.UIntRange {
return &opb.UIntRange{Min: i, Max: i, Count: 1}
}
func addrRangeSingle(a string) *opb.AddressRange {
return &opb.AddressRange{Min: a, Max: a, Count: 1}
}
func newPortRange() *opb.UIntRange {
return &opb.UIntRange{Min: 1, Max: maxPort}
}
func newFlowLabelRange() *opb.UIntRange {
return &opb.UIntRange{Max: maxFlowLabel}
}
func newMACAddrRange() *opb.AddressRange {
return &opb.AddressRange{Min: "00:00:00:00:00:01", Max: "FF:FF:FF:FF:FF:FE"}
}
func newIPv4AddrRange() *opb.AddressRange {
return &opb.AddressRange{Min: "0.0.0.1", Max: "255.255.255.254"}
}
func newIPv6AddrRange() *opb.AddressRange {
return &opb.AddressRange{Min: "::1", Max: "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"}
} | range.go | 0.826502 | 0.594257 | range.go | starcoder |
package sketchy
import (
"github.com/tdewolff/canvas"
)
const defaultCapacity = 4
type QuadTree struct {
capacity int
points []IndexPoint
boundary Rect
nw *QuadTree
ne *QuadTree
se *QuadTree
sw *QuadTree
}
func NewQuadTree(r Rect) *QuadTree {
return &QuadTree{
capacity: defaultCapacity,
boundary: r,
}
}
func NewQuadTreeWithCapacity(r Rect, c int) *QuadTree {
return &QuadTree{
capacity: c,
boundary: r,
}
}
func (q *QuadTree) Insert(p IndexPoint) bool {
if !q.boundary.ContainsPoint(p.Point) {
return false
}
if len(q.points) < q.capacity && q.ne == nil {
q.points = append(q.points, p)
return true
}
if q.ne == nil {
q.subdivide()
}
if q.se.Insert(p) {
return true
}
if q.sw.Insert(p) {
return true
}
if q.nw.Insert(p) {
return true
}
if q.ne.Insert(p) {
return true
}
return false
}
func (q *QuadTree) Search(p IndexPoint) *IndexPoint {
var result *IndexPoint
if !q.boundary.ContainsPoint(p.Point) {
return nil
}
for _, point := range q.points {
if point.Point.IsEqual(p.Point) {
return &point
}
}
if q.ne == nil {
return nil
}
result = q.ne.Search(p)
if result != nil {
return result
}
result = q.se.Search(p)
if result != nil {
return result
}
result = q.sw.Search(p)
if result != nil {
return result
}
result = q.nw.Search(p)
if result != nil {
return result
}
return nil
}
func (q *QuadTree) UpdateIndex(p IndexPoint, index int) *IndexPoint {
var result *IndexPoint
if !q.boundary.ContainsPoint(p.Point) {
return nil
}
for i := range q.points {
if q.points[i].Point.IsEqual(p.Point) {
q.points[i].Index = index
return &q.points[i]
}
}
if q.ne == nil {
return nil
}
result = q.ne.UpdateIndex(p, index)
if result != nil {
return result
}
result = q.se.UpdateIndex(p, index)
if result != nil {
return result
}
result = q.sw.UpdateIndex(p, index)
if result != nil {
return result
}
result = q.nw.UpdateIndex(p, index)
if result != nil {
return result
}
return nil
}
func (q *QuadTree) Query(r Rect) []Point {
var results = []Point{}
if q.boundary.IsDisjoint(r) {
return results
}
for _, p := range q.points {
if r.ContainsPoint(p.Point) {
results = append(results, p.Point)
}
}
if q.ne == nil {
return results
}
results = append(results, q.ne.Query(r)...)
results = append(results, q.se.Query(r)...)
results = append(results, q.sw.Query(r)...)
results = append(results, q.nw.Query(r)...)
return results
}
func (q *QuadTree) QueryExcludeIndex(r Rect, index int) []Point {
var results = []Point{}
if q.boundary.IsDisjoint(r) {
return results
}
for _, p := range q.points {
if r.ContainsPoint(p.Point) && p.Index != index {
results = append(results, p.Point)
}
}
if q.ne == nil {
return results
}
results = append(results, q.ne.QueryExcludeIndex(r, index)...)
results = append(results, q.se.QueryExcludeIndex(r, index)...)
results = append(results, q.sw.QueryExcludeIndex(r, index)...)
results = append(results, q.nw.QueryExcludeIndex(r, index)...)
return results
}
func (q *QuadTree) QueryCircle(center Point, radius float64) []Point {
rect := Rect{
X: center.X - radius,
Y: center.Y - radius,
W: radius,
H: radius,
}
rectQuery := q.Query(rect)
var results []Point
R2 := radius * radius
for _, p := range rectQuery {
if SquaredDistance(center, p) < R2 {
results = append(results, p)
}
}
return results
}
func (q *QuadTree) QueryCircleExcludeIndex(center Point, radius float64, index int) []Point {
rect := Rect{
X: center.X - radius,
Y: center.Y - radius,
W: radius,
H: radius,
}
rectQuery := q.QueryExcludeIndex(rect, index)
var results []Point
R2 := radius * radius
for _, p := range rectQuery {
if SquaredDistance(center, p) < R2 {
results = append(results, p)
}
}
return results
}
func (q *QuadTree) NearestNeighbors(point IndexPoint, k int) []IndexPoint {
var result []IndexPoint
if k <= 0 {
return result
}
ph := NewMaxPointHeap()
q.pushOnHeap(point, ph, k)
points := ph.ReportReversed()
for _, p := range points {
result = append(result, p.ToIndexPoint())
}
return result
}
func (q *QuadTree) Clear() {
q.points = []IndexPoint{}
q.ne = nil
q.se = nil
q.sw = nil
q.nw = nil
}
func (q *QuadTree) Size() int {
var count int
count += len(q.points)
if q.ne == nil {
return count
}
count += q.ne.Size()
count += q.se.Size()
count += q.sw.Size()
count += q.nw.Size()
return count
}
func (q *QuadTree) Draw(ctx *canvas.Context) {
q.boundary.Draw(ctx)
if q.ne == nil {
return
}
q.ne.Draw(ctx)
q.se.Draw(ctx)
q.sw.Draw(ctx)
q.nw.Draw(ctx)
}
func (q *QuadTree) DrawWithPoints(s float64, ctx *canvas.Context) {
for _, p := range q.points {
p.Draw(s, ctx)
}
q.boundary.Draw(ctx)
if q.ne == nil {
return
}
q.ne.DrawWithPoints(s, ctx)
q.se.DrawWithPoints(s, ctx)
q.sw.DrawWithPoints(s, ctx)
q.nw.DrawWithPoints(s, ctx)
}
func (q *QuadTree) subdivide() {
x := q.boundary.X
y := q.boundary.Y
w := q.boundary.W / 2
h := q.boundary.H / 2
q.ne = NewQuadTree(Rect{X: x + w, Y: y, W: w, H: h})
q.se = NewQuadTree(Rect{X: x + w, Y: y + h, W: w, H: h})
q.sw = NewQuadTree(Rect{X: x, Y: y + h, W: w, H: h})
q.nw = NewQuadTree(Rect{X: x, Y: y, W: w, H: h})
}
func (q *QuadTree) pushOnHeap(target IndexPoint, h *PointHeap, k int) {
for _, p := range q.points {
if p.Index == target.Index {
continue
}
metric := SquaredDistance(target.Point, p.Point)
mp := MetricPoint{
Metric: metric,
Index: p.Index,
Point: p.Point,
}
if h.Len() < k {
h.Push(mp)
} else {
if metric < h.Peek().Metric {
_ = h.Pop()
h.Push(mp)
}
}
}
if q.ne == nil {
return
}
q.ne.pushOnHeap(target, h, k)
q.se.pushOnHeap(target, h, k)
q.sw.pushOnHeap(target, h, k)
q.nw.pushOnHeap(target, h, k)
} | quadtree.go | 0.743354 | 0.409693 | quadtree.go | starcoder |
package exec
import (
"reflect"
)
//Tracker abstraction to track mutation
type Tracker struct {
init []uint64
Mutation []uint64
Nested []*Tracker
}
//Set sets mutation for filed pos
func (t *Tracker) Set(pos []uint16) {
Uint64s(t.Mutation).SetBit(int(pos[0]))
if len(pos) > 1 {
t.Nested[pos[0]].Set(pos[1:])
}
}
//Changed returns true if changes
func (t *Tracker) Changed(pos ...uint16) bool {
if len(pos) > 1 {
return t.Changed(pos[1:]...)
}
return Uint64s(t.Mutation).HasBit(int(pos[0]))
}
//Reset reset modification status
func (t *Tracker) Reset() {
copy(t.Mutation, t.init)
if len(t.Nested) == 0 {
return
}
for i := range t.Nested {
if t.Nested[i] == nil {
continue
}
t.Nested[i].Reset()
}
}
func (t *Tracker) Clone() *Tracker {
var result = &Tracker{
init: t.init,
Mutation: make([]uint64, len(t.init)),
Nested: make([]*Tracker, len(t.Nested)),
}
for i, item := range t.Nested {
if item == nil {
continue
}
result.Nested[i] = t.Nested[i].Clone()
}
return result
}
//NewTracker creates a tracker
func NewTracker(target reflect.Type) *Tracker {
if target.Kind() == reflect.Ptr {
target = target.Elem()
}
if target.Kind() != reflect.Struct {
return nil
}
fieldCount := target.NumField()
var result = &Tracker{
init: make([]uint64, 1+fieldCount/64),
Mutation: make([]uint64, 1+fieldCount/64),
Nested: make([]*Tracker, fieldCount),
}
for i := 0; i < fieldCount; i++ {
filed := target.Field(i)
fType := filed.Type
if fType.Kind() == reflect.Ptr {
fType = fType.Elem()
}
if fType.Kind() != reflect.Struct {
continue
}
result.Nested[i] = NewTracker(fType)
}
return result
}
type Uint64s []uint64
//HasBit returns true if a bit at position in set
func (o Uint64s) HasBit(pos int) bool {
return o[index(pos)] & (1 << pos % 64) != 0
}
//ClearBit clears bit at position in set
func (o Uint64s) ClearBit(pos int) {
o[index(pos)] &= ^(1 << (pos % 64))
}
//SetBit sets bit at position in set
func (o Uint64s) SetBit(pos int) {
o[index(pos)] |= 1 << (pos % 64)
}
func index(pos int) int {
return pos / 64
} | exec/tracker.go | 0.599133 | 0.42054 | tracker.go | starcoder |
package marker
import (
"fmt"
"regexp"
"sort"
"strings"
)
// MatcherFunc returns a Match which contains information about found patterns
type MatcherFunc func(string) Match
// Match contains information about found patterns by MatcherFunc
type Match struct {
Template string
Patterns []string
}
// MatchAll creates a MatcherFunc that matches all patterns in given string
func MatchAll(pattern string) MatcherFunc {
return func(str string) Match {
count := strings.Count(str, pattern)
return Match{
Template: strings.ReplaceAll(str, pattern, "%s"),
Patterns: fillSlice(make([]string, count), pattern),
}
}
}
// MatchN creates a MatcherFunc that matches first n patterns in given string
func MatchN(pattern string, n int) MatcherFunc {
return func(str string) Match {
count := min(n, strings.Count(str, pattern))
return Match{
Template: strings.Replace(str, pattern, "%s", n),
Patterns: fillSlice(make([]string, count), pattern),
}
}
}
// MatchMultiple creates a MatcherFunc that matches all string patterns from given slice in given string
func MatchMultiple(patternsToMatch []string) MatcherFunc {
return func(str string) Match {
patternMatchIndexes := findPatternMatchIndexes(str, patternsToMatch)
patterns := getPatternsInOrder(patternMatchIndexes)
return Match{
Template: replaceMultiple(str, patternsToMatch, "%s"),
Patterns: patterns,
}
}
}
// MatchRegexp creates a MatcherFunc that matches given regexp in given string
func MatchRegexp(r *regexp.Regexp) MatcherFunc {
return func(str string) Match {
return Match{
Template: r.ReplaceAllString(str, "%s"),
Patterns: r.FindAllString(str, -1),
}
}
}
// MatchTimestamp creates a MatcherFunc that matches given time layout pattern in given string
func MatchTimestamp(layout string) MatcherFunc {
return func(str string) Match {
r := timestampLayoutRegexps[layout]
return MatchRegexp(r)(str)
}
}
// MatchSurrounded creates a MatcherFunc that matches the patterns surrounded by given opening and closure strings
func MatchSurrounded(opening string, closure string) MatcherFunc {
return func(str string) Match {
metaEscapedOpening := regexp.QuoteMeta(opening)
metaEscapedClosure := regexp.QuoteMeta(closure)
matchPattern := fmt.Sprintf("%s[^%s]*%s", metaEscapedOpening, metaEscapedOpening, metaEscapedClosure)
r, _ := regexp.Compile(matchPattern)
return MatchRegexp(r)(str)
}
}
// MatchBracketSurrounded is a helper utility for easy matching of bracket surrounded text
func MatchBracketSurrounded() MatcherFunc {
return MatchSurrounded("[", "]")
}
// MatchParensSurrounded is a helper utility for easy matching text surrounded in parentheses
func MatchParensSurrounded() MatcherFunc {
return MatchSurrounded("(", ")")
}
// MatchEmail creates a MatcherFunc that matches emails which meets the conditions of RFC5322 standard
func MatchEmail() MatcherFunc {
return func(str string) Match {
return MatchRegexp(EmailRegexp)(str)
}
}
var daysOfWeek = [14]string{"monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday",
"Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"}
// MatchDaysOfWeek creates a MatcherFunc that matches days of the week in given string
func MatchDaysOfWeek() MatcherFunc {
return func(str string) Match {
return MatchMultiple(daysOfWeek[:])(str)
}
}
func findPatternMatchIndexes(str string, patternsToMatch []string) map[int]string {
patternMatchIndexes := make(map[int]string)
pattern := strings.Join(patternsToMatch , "|")
patternRegex := regexp.MustCompile(pattern)
indices := patternRegex.FindAllStringIndex(str, -1)
for _, v := range(indices) {
start, end := v[0], v[1]
patternMatchIndexes[start] = str[start:end]
}
return patternMatchIndexes
}
func getPatternsInOrder(patternMatchIndexes map[int]string) []string {
matchIndexes := getKeys(patternMatchIndexes)
sort.Ints(matchIndexes)
patterns := make([]string, 0, len(patternMatchIndexes))
for _, index := range matchIndexes {
patterns = append(patterns, patternMatchIndexes[index])
}
return patterns
}
func getKeys(m map[int]string) []int {
keys := make([]int, 0, len(m))
for key := range m {
keys = append(keys, key)
}
return keys
}
func replaceMultiple(str string, patterns []string, with string) string {
for _, patternToMatch := range patterns {
str = strings.ReplaceAll(str, patternToMatch, with)
}
return str
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func fillSlice(s []string, v string) []string {
for i := range s {
s[i] = v
}
return s
} | matcher.go | 0.730578 | 0.416263 | matcher.go | starcoder |
package main
import (
"fmt"
"math"
)
// cFunc for continuous function. A type definition for convenience.
type cFunc func(float64) float64
func main() {
fmt.Println("integral:", glq(math.Exp, -3, 3, 5))
}
// glq integrates f from a to b by Guass-Legendre quadrature using n nodes.
// For the task, it also shows the intermediate values determining the nodes:
// the n roots of the order n Legendre polynomal and the corresponding n
// weights used for the integration.
func glq(f cFunc, a, b float64, n int) float64 {
x, w := glqNodes(n, f)
show := func(label string, vs []float64) {
fmt.Printf("%8s: ", label)
for _, v := range vs {
fmt.Printf("%8.5f ", v)
}
fmt.Println()
}
show("nodes", x)
show("weights", w)
var sum float64
bma2 := (b - a) * .5
bpa2 := (b + a) * .5
for i, xi := range x {
sum += w[i] * f(bma2*xi+bpa2)
}
return bma2 * sum
}
// glqNodes computes both nodes and weights for a Gauss-Legendre
// Quadrature integration. Parameters are n, the number of nodes
// to compute and f, a continuous function to integrate. Return
// values have len n.
func glqNodes(n int, f cFunc) (node []float64, weight []float64) {
p := legendrePoly(n)
pn := p[n]
n64 := float64(n)
dn := func(x float64) float64 {
return (x*pn(x) - p[n-1](x)) * n64 / (x*x - 1)
}
node = make([]float64, n)
for i := range node {
x0 := math.Cos(math.Pi * (float64(i+1) - .25) / (n64 + .5))
node[i] = newtonRaphson(pn, dn, x0)
}
weight = make([]float64, n)
for i, x := range node {
dnx := dn(x)
weight[i] = 2 / ((1 - x*x) * dnx * dnx)
}
return
}
// legendrePoly constructs functions that implement Lengendre polynomials.
// This is done by function composition by recurrence relation (Bonnet's.)
// For given n, n+1 functions are returned, computing P0 through Pn.
func legendrePoly(n int) []cFunc {
r := make([]cFunc, n+1)
r[0] = func(float64) float64 { return 1 }
r[1] = func(x float64) float64 { return x }
for i := 2; i <= n; i++ {
i2m1 := float64(i*2 - 1)
im1 := float64(i - 1)
rm1 := r[i-1]
rm2 := r[i-2]
invi := 1 / float64(i)
r[i] = func(x float64) float64 {
return (i2m1*x*rm1(x) - im1*rm2(x)) * invi
}
}
return r
}
// newtonRaphson is general purpose, although totally primitive, simply
// panicking after a fixed number of iterations without convergence to
// a fixed error. Parameter f must be a continuous function,
// df its derivative, x0 an initial guess.
func newtonRaphson(f, df cFunc, x0 float64) float64 {
for i := 0; i < 30; i++ {
x1 := x0 - f(x0)/df(x0)
if math.Abs(x1-x0) <= math.Abs(x0*1e-15) {
return x1
}
x0 = x1
}
panic("no convergence")
} | lang/Go/numerical-integration-gauss-legendre-quadrature.go | 0.808105 | 0.592519 | numerical-integration-gauss-legendre-quadrature.go | starcoder |
package d3
import (
"math"
"strconv"
"strings"
"github.com/adamcolton/geom/angle"
"github.com/adamcolton/geom/calc/cmpr"
)
// Q is a quaternion used for rotations. B, C and D correspond to the X, Y and Z
// axis.
type Q struct {
A, B, C, D float64
}
// QX returns Q rotated around the X axis.
func QX(ang angle.Rad) Q {
s, c := (ang / 2.0).Sincos()
return Q{c, -s, 0, 0}
}
// QY returns Q rotated around the Y axis.
func QY(ang angle.Rad) Q {
s, c := (ang / 2.0).Sincos()
return Q{c, 0, -s, 0}
}
// QY returns Q rotated around the Z axis.
func QZ(ang angle.Rad) Q {
s, c := (ang / 2.0).Sincos()
return Q{c, 0, 0, -s}
}
// QV produces an instance of Q such that Q applied to V{1,0,0} will point in
// the same direction as the argument v.
func QV(v V) Q {
s, c := (angle.Atan(v.Y, v.X) / 2.0).Sincos()
qy := Q{c, 0, 0, -s}
if v.Y < 1e-5 && v.Y > -1e-1 {
qy = Q{1, 0, 0, 0}
}
v = qy.TInv().V(v)
s, c = (angle.Atan(v.Z, v.X) / 2.0).Sincos()
qz := Q{c, 0, s, 0}
if v.Z < 1e-5 && v.Z > -1e-1 {
qz = Q{1, 0, 0, 0}
}
out := qz.Product(qy)
return out
}
// Normalize returns an instance of Q pointint in the same direction with a
// magnitude of 1.
func (q Q) Normalize() Q {
d := q.A*q.A + q.B*q.B + q.C*q.C + q.D*q.D
if d == 0 {
return Q{}
}
const small cmpr.Tolerance = 1e-10
if small.Equal(1.0, d) {
return q
}
d = math.Sqrt(d)
return Q{
A: q.A / d,
B: q.B / d,
C: q.C / d,
D: q.D / d,
}
}
// Product applies the rotation of q2 to q.
func (q Q) Product(q2 Q) Q {
return Q{
A: q.A*q2.A - q.B*q2.B - q.C*q2.C - q.D*q2.D,
B: q.A*q2.B + q.B*q2.A + q.C*q2.D - q.D*q2.C,
C: q.A*q2.C - q.B*q2.D + q.C*q2.A + q.D*q2.B,
D: q.A*q2.D + q.B*q2.C - q.C*q2.B + q.D*q2.A,
}
}
// T produces to transform equal to Q.
func (q Q) T() *T {
return &T{
{
1 - 2*q.C*q.C - 2*q.D*q.D,
2*q.B*q.C + 2*q.A*q.D,
2*q.B*q.D - 2*q.A*q.C,
0,
}, {
2*q.B*q.C - 2*q.A*q.D,
1 - 2*q.B*q.B - 2*q.D*q.D,
2*q.C*q.D + 2*q.A*q.B,
0,
}, {
2*q.B*q.D + 2*q.A*q.C,
2*q.C*q.D - 2*q.A*q.B,
1 - 2*q.B*q.B - 2*q.C*q.C,
0,
}, {
0,
0,
0,
1,
},
}
}
// TInv fulfills TGenInv.
func (q Q) TInv() *T {
return Q{q.A, -q.B, -q.C, -q.D}.T()
}
// String fullfils Stringer.
func (q Q) String() string {
return strings.Join([]string{
"Q(",
strconv.FormatFloat(q.A, 'f', Prec, 64),
" + ",
strconv.FormatFloat(q.B, 'f', Prec, 64),
"i + ",
strconv.FormatFloat(q.C, 'f', Prec, 64),
"j + ",
strconv.FormatFloat(q.D, 'f', Prec, 64),
"k)",
}, "")
} | d3/q.go | 0.785473 | 0.677773 | q.go | starcoder |
package main
import (
"flag"
"fmt"
"os"
)
// Parameters is a collection of all program parameters.
type Parameters struct {
TTName string // Name of the input truth-table file
MinQ float64 // Minimum quadratic coefficient
MaxQ float64 // Maximum quadratic coefficient
MinL float64 // Minimum linear coefficient
MaxL float64 // Maximum linear coefficient
RoundTo float64 // Value to which to round all coefficients
MaxAncillae uint // Maximum number of additional variables we're allowed to add
ProfName string // Name of a pprof performance-profile file
Tolerance float64 // Smallest-in-magnitude values for the LP solver to consider nonzero
NumLPSolves uint64 // Tally of the number of LP solver invocations
Approach ReductionApproach // How to reduce the exponential search space
Rank int // The current process's rank in the parallel computation
NumRanks int // The total number of ranks in the parallel computation
}
// A ReductionApproach defines an approach to reduce the search space.
type ReductionApproach int
// These are the acceptable values for a ReductionApproach.
const (
ReduceHeuristic ReductionApproach = iota // Use a heuristic approach.
ReduceBruteForce // Try all possibilities until one succeeds.
ReduceBruteForceAll // Try all possibilities and tally successes/failures.
)
// String returns a ReductionApproach as a string.
func (ra *ReductionApproach) String() string {
switch *ra {
case ReduceHeuristic:
return "heuristic"
case ReduceBruteForce:
return "brute-force"
case ReduceBruteForceAll:
return "full-brute-force"
default:
panic(fmt.Sprintf("unexpected ReductionApproach %d", *ra))
}
}
// Set assigns a ReductionApproach from a string.
func (ra *ReductionApproach) Set(s string) error {
switch s {
case "heuristic":
*ra = ReduceHeuristic
case "brute-force":
*ra = ReduceBruteForce
case "full-brute-force":
*ra = ReduceBruteForceAll
default:
return fmt.Errorf("unexpected reduction approach %q", s)
}
return nil
}
// ParseCommandLine parses parameters from the command line.
func ParseCommandLine(p *Parameters) {
// Parse the command line.
flag.Usage = func() {
fmt.Fprintf(flag.CommandLine.Output(), "Usage: %s [<options>] [<input.tt>]\n", os.Args[0])
flag.PrintDefaults()
}
flag.Float64Var(&p.MinQ, "qmin", -1.0, "Minimum quadratic coefficient")
flag.Float64Var(&p.MaxQ, "qmax", 1.0, "Maximum quadratic coefficient")
flag.Float64Var(&p.MinL, "lmin", -1.0, "Minimum linear coefficient")
flag.Float64Var(&p.MaxL, "lmax", 1.0, "Maximum linear coefficient")
flag.Float64Var(&p.RoundTo, "round", 0, "Value to which to round coefficients or 0 for no rounding")
flag.UintVar(&p.MaxAncillae, "max-ancillae", 10, "Maximum number of ancilllary variables the program is allowed to add")
flag.StringVar(&p.ProfName, "profile", "", "Name of a pprof performance file to write")
flag.Float64Var(&p.Tolerance, "tolerance", 1e-10, "Smallest-in-magnitude values for the LP solver to consider nonzero")
flag.Var(&p.Approach, "approach", `Approach to reducing the search space, one of "heuristic", "brute-force", or "full-brute-force" (default: heuristic)`)
flag.Parse()
if flag.NArg() >= 1 {
p.TTName = flag.Arg(0)
}
// Validate the arguments.
switch {
case p.MinQ >= p.MaxQ:
notify.Fatal("--qmin must specify a value that is less than --qmax")
case p.MinL >= p.MaxL:
notify.Fatal("--lmin must specify a value that is less than --lmax")
case p.MinL >= 0.0:
notify.Fatal("--lmin must be negative")
case p.MinQ >= 0.0:
notify.Fatal("--qmin must be negative")
case p.MaxL <= 0.0:
notify.Fatal("--lmax must be positive")
case p.MaxQ <= 0.0:
notify.Fatal("--qmax must be positive")
case p.RoundTo < 0.0:
notify.Fatal("--round must be non-negative")
case p.Tolerance < 0.0:
notify.Fatal("--tolerance must be non-negative")
}
} | params.go | 0.633977 | 0.446495 | params.go | starcoder |
package fp
// MergeInt takes two inputs: map[int]int and map[int]int and merge two maps and returns a new map[int]int.
func MergeInt(map1, map2 map[int]int) map[int]int {
if map1 == nil && map2 == nil {
return map[int]int{}
}
newMap := make(map[int]int)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeIntInt64 takes two inputs: map[int]int64 and map[int]int64 and merge two maps and returns a new map[int]int64.
func MergeIntInt64(map1, map2 map[int]int64) map[int]int64 {
if map1 == nil && map2 == nil {
return map[int]int64{}
}
newMap := make(map[int]int64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeIntInt32 takes two inputs: map[int]int32 and map[int]int32 and merge two maps and returns a new map[int]int32.
func MergeIntInt32(map1, map2 map[int]int32) map[int]int32 {
if map1 == nil && map2 == nil {
return map[int]int32{}
}
newMap := make(map[int]int32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeIntInt16 takes two inputs: map[int]int16 and map[int]int16 and merge two maps and returns a new map[int]int16.
func MergeIntInt16(map1, map2 map[int]int16) map[int]int16 {
if map1 == nil && map2 == nil {
return map[int]int16{}
}
newMap := make(map[int]int16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeIntInt8 takes two inputs: map[int]int8 and map[int]int8 and merge two maps and returns a new map[int]int8.
func MergeIntInt8(map1, map2 map[int]int8) map[int]int8 {
if map1 == nil && map2 == nil {
return map[int]int8{}
}
newMap := make(map[int]int8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeIntUint takes two inputs: map[int]uint and map[int]uint and merge two maps and returns a new map[int]uint.
func MergeIntUint(map1, map2 map[int]uint) map[int]uint {
if map1 == nil && map2 == nil {
return map[int]uint{}
}
newMap := make(map[int]uint)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeIntUint64 takes two inputs: map[int]uint64 and map[int]uint64 and merge two maps and returns a new map[int]uint64.
func MergeIntUint64(map1, map2 map[int]uint64) map[int]uint64 {
if map1 == nil && map2 == nil {
return map[int]uint64{}
}
newMap := make(map[int]uint64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeIntUint32 takes two inputs: map[int]uint32 and map[int]uint32 and merge two maps and returns a new map[int]uint32.
func MergeIntUint32(map1, map2 map[int]uint32) map[int]uint32 {
if map1 == nil && map2 == nil {
return map[int]uint32{}
}
newMap := make(map[int]uint32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeIntUint16 takes two inputs: map[int]uint16 and map[int]uint16 and merge two maps and returns a new map[int]uint16.
func MergeIntUint16(map1, map2 map[int]uint16) map[int]uint16 {
if map1 == nil && map2 == nil {
return map[int]uint16{}
}
newMap := make(map[int]uint16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeIntUint8 takes two inputs: map[int]uint8 and map[int]uint8 and merge two maps and returns a new map[int]uint8.
func MergeIntUint8(map1, map2 map[int]uint8) map[int]uint8 {
if map1 == nil && map2 == nil {
return map[int]uint8{}
}
newMap := make(map[int]uint8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeIntStr takes two inputs: map[int]string and map[int]string and merge two maps and returns a new map[int]string.
func MergeIntStr(map1, map2 map[int]string) map[int]string {
if map1 == nil && map2 == nil {
return map[int]string{}
}
newMap := make(map[int]string)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeIntBool takes two inputs: map[int]bool and map[int]bool and merge two maps and returns a new map[int]bool.
func MergeIntBool(map1, map2 map[int]bool) map[int]bool {
if map1 == nil && map2 == nil {
return map[int]bool{}
}
newMap := make(map[int]bool)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeIntFloat32 takes two inputs: map[int]float32 and map[int]float32 and merge two maps and returns a new map[int]float32.
func MergeIntFloat32(map1, map2 map[int]float32) map[int]float32 {
if map1 == nil && map2 == nil {
return map[int]float32{}
}
newMap := make(map[int]float32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeIntFloat64 takes two inputs: map[int]float64 and map[int]float64 and merge two maps and returns a new map[int]float64.
func MergeIntFloat64(map1, map2 map[int]float64) map[int]float64 {
if map1 == nil && map2 == nil {
return map[int]float64{}
}
newMap := make(map[int]float64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt64Int takes two inputs: map[int64]int and map[int64]int and merge two maps and returns a new map[int64]int.
func MergeInt64Int(map1, map2 map[int64]int) map[int64]int {
if map1 == nil && map2 == nil {
return map[int64]int{}
}
newMap := make(map[int64]int)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt64 takes two inputs: map[int64]int64 and map[int64]int64 and merge two maps and returns a new map[int64]int64.
func MergeInt64(map1, map2 map[int64]int64) map[int64]int64 {
if map1 == nil && map2 == nil {
return map[int64]int64{}
}
newMap := make(map[int64]int64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt64Int32 takes two inputs: map[int64]int32 and map[int64]int32 and merge two maps and returns a new map[int64]int32.
func MergeInt64Int32(map1, map2 map[int64]int32) map[int64]int32 {
if map1 == nil && map2 == nil {
return map[int64]int32{}
}
newMap := make(map[int64]int32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt64Int16 takes two inputs: map[int64]int16 and map[int64]int16 and merge two maps and returns a new map[int64]int16.
func MergeInt64Int16(map1, map2 map[int64]int16) map[int64]int16 {
if map1 == nil && map2 == nil {
return map[int64]int16{}
}
newMap := make(map[int64]int16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt64Int8 takes two inputs: map[int64]int8 and map[int64]int8 and merge two maps and returns a new map[int64]int8.
func MergeInt64Int8(map1, map2 map[int64]int8) map[int64]int8 {
if map1 == nil && map2 == nil {
return map[int64]int8{}
}
newMap := make(map[int64]int8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt64Uint takes two inputs: map[int64]uint and map[int64]uint and merge two maps and returns a new map[int64]uint.
func MergeInt64Uint(map1, map2 map[int64]uint) map[int64]uint {
if map1 == nil && map2 == nil {
return map[int64]uint{}
}
newMap := make(map[int64]uint)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt64Uint64 takes two inputs: map[int64]uint64 and map[int64]uint64 and merge two maps and returns a new map[int64]uint64.
func MergeInt64Uint64(map1, map2 map[int64]uint64) map[int64]uint64 {
if map1 == nil && map2 == nil {
return map[int64]uint64{}
}
newMap := make(map[int64]uint64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt64Uint32 takes two inputs: map[int64]uint32 and map[int64]uint32 and merge two maps and returns a new map[int64]uint32.
func MergeInt64Uint32(map1, map2 map[int64]uint32) map[int64]uint32 {
if map1 == nil && map2 == nil {
return map[int64]uint32{}
}
newMap := make(map[int64]uint32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt64Uint16 takes two inputs: map[int64]uint16 and map[int64]uint16 and merge two maps and returns a new map[int64]uint16.
func MergeInt64Uint16(map1, map2 map[int64]uint16) map[int64]uint16 {
if map1 == nil && map2 == nil {
return map[int64]uint16{}
}
newMap := make(map[int64]uint16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt64Uint8 takes two inputs: map[int64]uint8 and map[int64]uint8 and merge two maps and returns a new map[int64]uint8.
func MergeInt64Uint8(map1, map2 map[int64]uint8) map[int64]uint8 {
if map1 == nil && map2 == nil {
return map[int64]uint8{}
}
newMap := make(map[int64]uint8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt64Str takes two inputs: map[int64]string and map[int64]string and merge two maps and returns a new map[int64]string.
func MergeInt64Str(map1, map2 map[int64]string) map[int64]string {
if map1 == nil && map2 == nil {
return map[int64]string{}
}
newMap := make(map[int64]string)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt64Bool takes two inputs: map[int64]bool and map[int64]bool and merge two maps and returns a new map[int64]bool.
func MergeInt64Bool(map1, map2 map[int64]bool) map[int64]bool {
if map1 == nil && map2 == nil {
return map[int64]bool{}
}
newMap := make(map[int64]bool)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt64Float32 takes two inputs: map[int64]float32 and map[int64]float32 and merge two maps and returns a new map[int64]float32.
func MergeInt64Float32(map1, map2 map[int64]float32) map[int64]float32 {
if map1 == nil && map2 == nil {
return map[int64]float32{}
}
newMap := make(map[int64]float32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt64Float64 takes two inputs: map[int64]float64 and map[int64]float64 and merge two maps and returns a new map[int64]float64.
func MergeInt64Float64(map1, map2 map[int64]float64) map[int64]float64 {
if map1 == nil && map2 == nil {
return map[int64]float64{}
}
newMap := make(map[int64]float64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt32Int takes two inputs: map[int32]int and map[int32]int and merge two maps and returns a new map[int32]int.
func MergeInt32Int(map1, map2 map[int32]int) map[int32]int {
if map1 == nil && map2 == nil {
return map[int32]int{}
}
newMap := make(map[int32]int)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt32Int64 takes two inputs: map[int32]int64 and map[int32]int64 and merge two maps and returns a new map[int32]int64.
func MergeInt32Int64(map1, map2 map[int32]int64) map[int32]int64 {
if map1 == nil && map2 == nil {
return map[int32]int64{}
}
newMap := make(map[int32]int64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt32 takes two inputs: map[int32]int32 and map[int32]int32 and merge two maps and returns a new map[int32]int32.
func MergeInt32(map1, map2 map[int32]int32) map[int32]int32 {
if map1 == nil && map2 == nil {
return map[int32]int32{}
}
newMap := make(map[int32]int32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt32Int16 takes two inputs: map[int32]int16 and map[int32]int16 and merge two maps and returns a new map[int32]int16.
func MergeInt32Int16(map1, map2 map[int32]int16) map[int32]int16 {
if map1 == nil && map2 == nil {
return map[int32]int16{}
}
newMap := make(map[int32]int16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt32Int8 takes two inputs: map[int32]int8 and map[int32]int8 and merge two maps and returns a new map[int32]int8.
func MergeInt32Int8(map1, map2 map[int32]int8) map[int32]int8 {
if map1 == nil && map2 == nil {
return map[int32]int8{}
}
newMap := make(map[int32]int8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt32Uint takes two inputs: map[int32]uint and map[int32]uint and merge two maps and returns a new map[int32]uint.
func MergeInt32Uint(map1, map2 map[int32]uint) map[int32]uint {
if map1 == nil && map2 == nil {
return map[int32]uint{}
}
newMap := make(map[int32]uint)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt32Uint64 takes two inputs: map[int32]uint64 and map[int32]uint64 and merge two maps and returns a new map[int32]uint64.
func MergeInt32Uint64(map1, map2 map[int32]uint64) map[int32]uint64 {
if map1 == nil && map2 == nil {
return map[int32]uint64{}
}
newMap := make(map[int32]uint64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt32Uint32 takes two inputs: map[int32]uint32 and map[int32]uint32 and merge two maps and returns a new map[int32]uint32.
func MergeInt32Uint32(map1, map2 map[int32]uint32) map[int32]uint32 {
if map1 == nil && map2 == nil {
return map[int32]uint32{}
}
newMap := make(map[int32]uint32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt32Uint16 takes two inputs: map[int32]uint16 and map[int32]uint16 and merge two maps and returns a new map[int32]uint16.
func MergeInt32Uint16(map1, map2 map[int32]uint16) map[int32]uint16 {
if map1 == nil && map2 == nil {
return map[int32]uint16{}
}
newMap := make(map[int32]uint16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt32Uint8 takes two inputs: map[int32]uint8 and map[int32]uint8 and merge two maps and returns a new map[int32]uint8.
func MergeInt32Uint8(map1, map2 map[int32]uint8) map[int32]uint8 {
if map1 == nil && map2 == nil {
return map[int32]uint8{}
}
newMap := make(map[int32]uint8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt32Str takes two inputs: map[int32]string and map[int32]string and merge two maps and returns a new map[int32]string.
func MergeInt32Str(map1, map2 map[int32]string) map[int32]string {
if map1 == nil && map2 == nil {
return map[int32]string{}
}
newMap := make(map[int32]string)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt32Bool takes two inputs: map[int32]bool and map[int32]bool and merge two maps and returns a new map[int32]bool.
func MergeInt32Bool(map1, map2 map[int32]bool) map[int32]bool {
if map1 == nil && map2 == nil {
return map[int32]bool{}
}
newMap := make(map[int32]bool)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt32Float32 takes two inputs: map[int32]float32 and map[int32]float32 and merge two maps and returns a new map[int32]float32.
func MergeInt32Float32(map1, map2 map[int32]float32) map[int32]float32 {
if map1 == nil && map2 == nil {
return map[int32]float32{}
}
newMap := make(map[int32]float32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt32Float64 takes two inputs: map[int32]float64 and map[int32]float64 and merge two maps and returns a new map[int32]float64.
func MergeInt32Float64(map1, map2 map[int32]float64) map[int32]float64 {
if map1 == nil && map2 == nil {
return map[int32]float64{}
}
newMap := make(map[int32]float64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt16Int takes two inputs: map[int16]int and map[int16]int and merge two maps and returns a new map[int16]int.
func MergeInt16Int(map1, map2 map[int16]int) map[int16]int {
if map1 == nil && map2 == nil {
return map[int16]int{}
}
newMap := make(map[int16]int)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt16Int64 takes two inputs: map[int16]int64 and map[int16]int64 and merge two maps and returns a new map[int16]int64.
func MergeInt16Int64(map1, map2 map[int16]int64) map[int16]int64 {
if map1 == nil && map2 == nil {
return map[int16]int64{}
}
newMap := make(map[int16]int64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt16Int32 takes two inputs: map[int16]int32 and map[int16]int32 and merge two maps and returns a new map[int16]int32.
func MergeInt16Int32(map1, map2 map[int16]int32) map[int16]int32 {
if map1 == nil && map2 == nil {
return map[int16]int32{}
}
newMap := make(map[int16]int32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt16 takes two inputs: map[int16]int16 and map[int16]int16 and merge two maps and returns a new map[int16]int16.
func MergeInt16(map1, map2 map[int16]int16) map[int16]int16 {
if map1 == nil && map2 == nil {
return map[int16]int16{}
}
newMap := make(map[int16]int16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt16Int8 takes two inputs: map[int16]int8 and map[int16]int8 and merge two maps and returns a new map[int16]int8.
func MergeInt16Int8(map1, map2 map[int16]int8) map[int16]int8 {
if map1 == nil && map2 == nil {
return map[int16]int8{}
}
newMap := make(map[int16]int8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt16Uint takes two inputs: map[int16]uint and map[int16]uint and merge two maps and returns a new map[int16]uint.
func MergeInt16Uint(map1, map2 map[int16]uint) map[int16]uint {
if map1 == nil && map2 == nil {
return map[int16]uint{}
}
newMap := make(map[int16]uint)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt16Uint64 takes two inputs: map[int16]uint64 and map[int16]uint64 and merge two maps and returns a new map[int16]uint64.
func MergeInt16Uint64(map1, map2 map[int16]uint64) map[int16]uint64 {
if map1 == nil && map2 == nil {
return map[int16]uint64{}
}
newMap := make(map[int16]uint64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt16Uint32 takes two inputs: map[int16]uint32 and map[int16]uint32 and merge two maps and returns a new map[int16]uint32.
func MergeInt16Uint32(map1, map2 map[int16]uint32) map[int16]uint32 {
if map1 == nil && map2 == nil {
return map[int16]uint32{}
}
newMap := make(map[int16]uint32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt16Uint16 takes two inputs: map[int16]uint16 and map[int16]uint16 and merge two maps and returns a new map[int16]uint16.
func MergeInt16Uint16(map1, map2 map[int16]uint16) map[int16]uint16 {
if map1 == nil && map2 == nil {
return map[int16]uint16{}
}
newMap := make(map[int16]uint16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt16Uint8 takes two inputs: map[int16]uint8 and map[int16]uint8 and merge two maps and returns a new map[int16]uint8.
func MergeInt16Uint8(map1, map2 map[int16]uint8) map[int16]uint8 {
if map1 == nil && map2 == nil {
return map[int16]uint8{}
}
newMap := make(map[int16]uint8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt16Str takes two inputs: map[int16]string and map[int16]string and merge two maps and returns a new map[int16]string.
func MergeInt16Str(map1, map2 map[int16]string) map[int16]string {
if map1 == nil && map2 == nil {
return map[int16]string{}
}
newMap := make(map[int16]string)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt16Bool takes two inputs: map[int16]bool and map[int16]bool and merge two maps and returns a new map[int16]bool.
func MergeInt16Bool(map1, map2 map[int16]bool) map[int16]bool {
if map1 == nil && map2 == nil {
return map[int16]bool{}
}
newMap := make(map[int16]bool)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt16Float32 takes two inputs: map[int16]float32 and map[int16]float32 and merge two maps and returns a new map[int16]float32.
func MergeInt16Float32(map1, map2 map[int16]float32) map[int16]float32 {
if map1 == nil && map2 == nil {
return map[int16]float32{}
}
newMap := make(map[int16]float32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt16Float64 takes two inputs: map[int16]float64 and map[int16]float64 and merge two maps and returns a new map[int16]float64.
func MergeInt16Float64(map1, map2 map[int16]float64) map[int16]float64 {
if map1 == nil && map2 == nil {
return map[int16]float64{}
}
newMap := make(map[int16]float64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt8Int takes two inputs: map[int8]int and map[int8]int and merge two maps and returns a new map[int8]int.
func MergeInt8Int(map1, map2 map[int8]int) map[int8]int {
if map1 == nil && map2 == nil {
return map[int8]int{}
}
newMap := make(map[int8]int)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt8Int64 takes two inputs: map[int8]int64 and map[int8]int64 and merge two maps and returns a new map[int8]int64.
func MergeInt8Int64(map1, map2 map[int8]int64) map[int8]int64 {
if map1 == nil && map2 == nil {
return map[int8]int64{}
}
newMap := make(map[int8]int64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt8Int32 takes two inputs: map[int8]int32 and map[int8]int32 and merge two maps and returns a new map[int8]int32.
func MergeInt8Int32(map1, map2 map[int8]int32) map[int8]int32 {
if map1 == nil && map2 == nil {
return map[int8]int32{}
}
newMap := make(map[int8]int32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt8Int16 takes two inputs: map[int8]int16 and map[int8]int16 and merge two maps and returns a new map[int8]int16.
func MergeInt8Int16(map1, map2 map[int8]int16) map[int8]int16 {
if map1 == nil && map2 == nil {
return map[int8]int16{}
}
newMap := make(map[int8]int16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt8 takes two inputs: map[int8]int8 and map[int8]int8 and merge two maps and returns a new map[int8]int8.
func MergeInt8(map1, map2 map[int8]int8) map[int8]int8 {
if map1 == nil && map2 == nil {
return map[int8]int8{}
}
newMap := make(map[int8]int8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt8Uint takes two inputs: map[int8]uint and map[int8]uint and merge two maps and returns a new map[int8]uint.
func MergeInt8Uint(map1, map2 map[int8]uint) map[int8]uint {
if map1 == nil && map2 == nil {
return map[int8]uint{}
}
newMap := make(map[int8]uint)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt8Uint64 takes two inputs: map[int8]uint64 and map[int8]uint64 and merge two maps and returns a new map[int8]uint64.
func MergeInt8Uint64(map1, map2 map[int8]uint64) map[int8]uint64 {
if map1 == nil && map2 == nil {
return map[int8]uint64{}
}
newMap := make(map[int8]uint64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt8Uint32 takes two inputs: map[int8]uint32 and map[int8]uint32 and merge two maps and returns a new map[int8]uint32.
func MergeInt8Uint32(map1, map2 map[int8]uint32) map[int8]uint32 {
if map1 == nil && map2 == nil {
return map[int8]uint32{}
}
newMap := make(map[int8]uint32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt8Uint16 takes two inputs: map[int8]uint16 and map[int8]uint16 and merge two maps and returns a new map[int8]uint16.
func MergeInt8Uint16(map1, map2 map[int8]uint16) map[int8]uint16 {
if map1 == nil && map2 == nil {
return map[int8]uint16{}
}
newMap := make(map[int8]uint16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt8Uint8 takes two inputs: map[int8]uint8 and map[int8]uint8 and merge two maps and returns a new map[int8]uint8.
func MergeInt8Uint8(map1, map2 map[int8]uint8) map[int8]uint8 {
if map1 == nil && map2 == nil {
return map[int8]uint8{}
}
newMap := make(map[int8]uint8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt8Str takes two inputs: map[int8]string and map[int8]string and merge two maps and returns a new map[int8]string.
func MergeInt8Str(map1, map2 map[int8]string) map[int8]string {
if map1 == nil && map2 == nil {
return map[int8]string{}
}
newMap := make(map[int8]string)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt8Bool takes two inputs: map[int8]bool and map[int8]bool and merge two maps and returns a new map[int8]bool.
func MergeInt8Bool(map1, map2 map[int8]bool) map[int8]bool {
if map1 == nil && map2 == nil {
return map[int8]bool{}
}
newMap := make(map[int8]bool)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt8Float32 takes two inputs: map[int8]float32 and map[int8]float32 and merge two maps and returns a new map[int8]float32.
func MergeInt8Float32(map1, map2 map[int8]float32) map[int8]float32 {
if map1 == nil && map2 == nil {
return map[int8]float32{}
}
newMap := make(map[int8]float32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeInt8Float64 takes two inputs: map[int8]float64 and map[int8]float64 and merge two maps and returns a new map[int8]float64.
func MergeInt8Float64(map1, map2 map[int8]float64) map[int8]float64 {
if map1 == nil && map2 == nil {
return map[int8]float64{}
}
newMap := make(map[int8]float64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUintInt takes two inputs: map[uint]int and map[uint]int and merge two maps and returns a new map[uint]int.
func MergeUintInt(map1, map2 map[uint]int) map[uint]int {
if map1 == nil && map2 == nil {
return map[uint]int{}
}
newMap := make(map[uint]int)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUintInt64 takes two inputs: map[uint]int64 and map[uint]int64 and merge two maps and returns a new map[uint]int64.
func MergeUintInt64(map1, map2 map[uint]int64) map[uint]int64 {
if map1 == nil && map2 == nil {
return map[uint]int64{}
}
newMap := make(map[uint]int64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUintInt32 takes two inputs: map[uint]int32 and map[uint]int32 and merge two maps and returns a new map[uint]int32.
func MergeUintInt32(map1, map2 map[uint]int32) map[uint]int32 {
if map1 == nil && map2 == nil {
return map[uint]int32{}
}
newMap := make(map[uint]int32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUintInt16 takes two inputs: map[uint]int16 and map[uint]int16 and merge two maps and returns a new map[uint]int16.
func MergeUintInt16(map1, map2 map[uint]int16) map[uint]int16 {
if map1 == nil && map2 == nil {
return map[uint]int16{}
}
newMap := make(map[uint]int16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUintInt8 takes two inputs: map[uint]int8 and map[uint]int8 and merge two maps and returns a new map[uint]int8.
func MergeUintInt8(map1, map2 map[uint]int8) map[uint]int8 {
if map1 == nil && map2 == nil {
return map[uint]int8{}
}
newMap := make(map[uint]int8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint takes two inputs: map[uint]uint and map[uint]uint and merge two maps and returns a new map[uint]uint.
func MergeUint(map1, map2 map[uint]uint) map[uint]uint {
if map1 == nil && map2 == nil {
return map[uint]uint{}
}
newMap := make(map[uint]uint)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUintUint64 takes two inputs: map[uint]uint64 and map[uint]uint64 and merge two maps and returns a new map[uint]uint64.
func MergeUintUint64(map1, map2 map[uint]uint64) map[uint]uint64 {
if map1 == nil && map2 == nil {
return map[uint]uint64{}
}
newMap := make(map[uint]uint64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUintUint32 takes two inputs: map[uint]uint32 and map[uint]uint32 and merge two maps and returns a new map[uint]uint32.
func MergeUintUint32(map1, map2 map[uint]uint32) map[uint]uint32 {
if map1 == nil && map2 == nil {
return map[uint]uint32{}
}
newMap := make(map[uint]uint32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUintUint16 takes two inputs: map[uint]uint16 and map[uint]uint16 and merge two maps and returns a new map[uint]uint16.
func MergeUintUint16(map1, map2 map[uint]uint16) map[uint]uint16 {
if map1 == nil && map2 == nil {
return map[uint]uint16{}
}
newMap := make(map[uint]uint16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUintUint8 takes two inputs: map[uint]uint8 and map[uint]uint8 and merge two maps and returns a new map[uint]uint8.
func MergeUintUint8(map1, map2 map[uint]uint8) map[uint]uint8 {
if map1 == nil && map2 == nil {
return map[uint]uint8{}
}
newMap := make(map[uint]uint8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUintStr takes two inputs: map[uint]string and map[uint]string and merge two maps and returns a new map[uint]string.
func MergeUintStr(map1, map2 map[uint]string) map[uint]string {
if map1 == nil && map2 == nil {
return map[uint]string{}
}
newMap := make(map[uint]string)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUintBool takes two inputs: map[uint]bool and map[uint]bool and merge two maps and returns a new map[uint]bool.
func MergeUintBool(map1, map2 map[uint]bool) map[uint]bool {
if map1 == nil && map2 == nil {
return map[uint]bool{}
}
newMap := make(map[uint]bool)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUintFloat32 takes two inputs: map[uint]float32 and map[uint]float32 and merge two maps and returns a new map[uint]float32.
func MergeUintFloat32(map1, map2 map[uint]float32) map[uint]float32 {
if map1 == nil && map2 == nil {
return map[uint]float32{}
}
newMap := make(map[uint]float32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUintFloat64 takes two inputs: map[uint]float64 and map[uint]float64 and merge two maps and returns a new map[uint]float64.
func MergeUintFloat64(map1, map2 map[uint]float64) map[uint]float64 {
if map1 == nil && map2 == nil {
return map[uint]float64{}
}
newMap := make(map[uint]float64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint64Int takes two inputs: map[uint64]int and map[uint64]int and merge two maps and returns a new map[uint64]int.
func MergeUint64Int(map1, map2 map[uint64]int) map[uint64]int {
if map1 == nil && map2 == nil {
return map[uint64]int{}
}
newMap := make(map[uint64]int)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint64Int64 takes two inputs: map[uint64]int64 and map[uint64]int64 and merge two maps and returns a new map[uint64]int64.
func MergeUint64Int64(map1, map2 map[uint64]int64) map[uint64]int64 {
if map1 == nil && map2 == nil {
return map[uint64]int64{}
}
newMap := make(map[uint64]int64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint64Int32 takes two inputs: map[uint64]int32 and map[uint64]int32 and merge two maps and returns a new map[uint64]int32.
func MergeUint64Int32(map1, map2 map[uint64]int32) map[uint64]int32 {
if map1 == nil && map2 == nil {
return map[uint64]int32{}
}
newMap := make(map[uint64]int32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint64Int16 takes two inputs: map[uint64]int16 and map[uint64]int16 and merge two maps and returns a new map[uint64]int16.
func MergeUint64Int16(map1, map2 map[uint64]int16) map[uint64]int16 {
if map1 == nil && map2 == nil {
return map[uint64]int16{}
}
newMap := make(map[uint64]int16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint64Int8 takes two inputs: map[uint64]int8 and map[uint64]int8 and merge two maps and returns a new map[uint64]int8.
func MergeUint64Int8(map1, map2 map[uint64]int8) map[uint64]int8 {
if map1 == nil && map2 == nil {
return map[uint64]int8{}
}
newMap := make(map[uint64]int8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint64Uint takes two inputs: map[uint64]uint and map[uint64]uint and merge two maps and returns a new map[uint64]uint.
func MergeUint64Uint(map1, map2 map[uint64]uint) map[uint64]uint {
if map1 == nil && map2 == nil {
return map[uint64]uint{}
}
newMap := make(map[uint64]uint)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint64 takes two inputs: map[uint64]uint64 and map[uint64]uint64 and merge two maps and returns a new map[uint64]uint64.
func MergeUint64(map1, map2 map[uint64]uint64) map[uint64]uint64 {
if map1 == nil && map2 == nil {
return map[uint64]uint64{}
}
newMap := make(map[uint64]uint64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint64Uint32 takes two inputs: map[uint64]uint32 and map[uint64]uint32 and merge two maps and returns a new map[uint64]uint32.
func MergeUint64Uint32(map1, map2 map[uint64]uint32) map[uint64]uint32 {
if map1 == nil && map2 == nil {
return map[uint64]uint32{}
}
newMap := make(map[uint64]uint32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint64Uint16 takes two inputs: map[uint64]uint16 and map[uint64]uint16 and merge two maps and returns a new map[uint64]uint16.
func MergeUint64Uint16(map1, map2 map[uint64]uint16) map[uint64]uint16 {
if map1 == nil && map2 == nil {
return map[uint64]uint16{}
}
newMap := make(map[uint64]uint16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint64Uint8 takes two inputs: map[uint64]uint8 and map[uint64]uint8 and merge two maps and returns a new map[uint64]uint8.
func MergeUint64Uint8(map1, map2 map[uint64]uint8) map[uint64]uint8 {
if map1 == nil && map2 == nil {
return map[uint64]uint8{}
}
newMap := make(map[uint64]uint8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint64Str takes two inputs: map[uint64]string and map[uint64]string and merge two maps and returns a new map[uint64]string.
func MergeUint64Str(map1, map2 map[uint64]string) map[uint64]string {
if map1 == nil && map2 == nil {
return map[uint64]string{}
}
newMap := make(map[uint64]string)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint64Bool takes two inputs: map[uint64]bool and map[uint64]bool and merge two maps and returns a new map[uint64]bool.
func MergeUint64Bool(map1, map2 map[uint64]bool) map[uint64]bool {
if map1 == nil && map2 == nil {
return map[uint64]bool{}
}
newMap := make(map[uint64]bool)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint64Float32 takes two inputs: map[uint64]float32 and map[uint64]float32 and merge two maps and returns a new map[uint64]float32.
func MergeUint64Float32(map1, map2 map[uint64]float32) map[uint64]float32 {
if map1 == nil && map2 == nil {
return map[uint64]float32{}
}
newMap := make(map[uint64]float32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint64Float64 takes two inputs: map[uint64]float64 and map[uint64]float64 and merge two maps and returns a new map[uint64]float64.
func MergeUint64Float64(map1, map2 map[uint64]float64) map[uint64]float64 {
if map1 == nil && map2 == nil {
return map[uint64]float64{}
}
newMap := make(map[uint64]float64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint32Int takes two inputs: map[uint32]int and map[uint32]int and merge two maps and returns a new map[uint32]int.
func MergeUint32Int(map1, map2 map[uint32]int) map[uint32]int {
if map1 == nil && map2 == nil {
return map[uint32]int{}
}
newMap := make(map[uint32]int)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint32Int64 takes two inputs: map[uint32]int64 and map[uint32]int64 and merge two maps and returns a new map[uint32]int64.
func MergeUint32Int64(map1, map2 map[uint32]int64) map[uint32]int64 {
if map1 == nil && map2 == nil {
return map[uint32]int64{}
}
newMap := make(map[uint32]int64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint32Int32 takes two inputs: map[uint32]int32 and map[uint32]int32 and merge two maps and returns a new map[uint32]int32.
func MergeUint32Int32(map1, map2 map[uint32]int32) map[uint32]int32 {
if map1 == nil && map2 == nil {
return map[uint32]int32{}
}
newMap := make(map[uint32]int32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint32Int16 takes two inputs: map[uint32]int16 and map[uint32]int16 and merge two maps and returns a new map[uint32]int16.
func MergeUint32Int16(map1, map2 map[uint32]int16) map[uint32]int16 {
if map1 == nil && map2 == nil {
return map[uint32]int16{}
}
newMap := make(map[uint32]int16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint32Int8 takes two inputs: map[uint32]int8 and map[uint32]int8 and merge two maps and returns a new map[uint32]int8.
func MergeUint32Int8(map1, map2 map[uint32]int8) map[uint32]int8 {
if map1 == nil && map2 == nil {
return map[uint32]int8{}
}
newMap := make(map[uint32]int8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint32Uint takes two inputs: map[uint32]uint and map[uint32]uint and merge two maps and returns a new map[uint32]uint.
func MergeUint32Uint(map1, map2 map[uint32]uint) map[uint32]uint {
if map1 == nil && map2 == nil {
return map[uint32]uint{}
}
newMap := make(map[uint32]uint)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint32Uint64 takes two inputs: map[uint32]uint64 and map[uint32]uint64 and merge two maps and returns a new map[uint32]uint64.
func MergeUint32Uint64(map1, map2 map[uint32]uint64) map[uint32]uint64 {
if map1 == nil && map2 == nil {
return map[uint32]uint64{}
}
newMap := make(map[uint32]uint64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint32 takes two inputs: map[uint32]uint32 and map[uint32]uint32 and merge two maps and returns a new map[uint32]uint32.
func MergeUint32(map1, map2 map[uint32]uint32) map[uint32]uint32 {
if map1 == nil && map2 == nil {
return map[uint32]uint32{}
}
newMap := make(map[uint32]uint32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint32Uint16 takes two inputs: map[uint32]uint16 and map[uint32]uint16 and merge two maps and returns a new map[uint32]uint16.
func MergeUint32Uint16(map1, map2 map[uint32]uint16) map[uint32]uint16 {
if map1 == nil && map2 == nil {
return map[uint32]uint16{}
}
newMap := make(map[uint32]uint16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint32Uint8 takes two inputs: map[uint32]uint8 and map[uint32]uint8 and merge two maps and returns a new map[uint32]uint8.
func MergeUint32Uint8(map1, map2 map[uint32]uint8) map[uint32]uint8 {
if map1 == nil && map2 == nil {
return map[uint32]uint8{}
}
newMap := make(map[uint32]uint8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint32Str takes two inputs: map[uint32]string and map[uint32]string and merge two maps and returns a new map[uint32]string.
func MergeUint32Str(map1, map2 map[uint32]string) map[uint32]string {
if map1 == nil && map2 == nil {
return map[uint32]string{}
}
newMap := make(map[uint32]string)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint32Bool takes two inputs: map[uint32]bool and map[uint32]bool and merge two maps and returns a new map[uint32]bool.
func MergeUint32Bool(map1, map2 map[uint32]bool) map[uint32]bool {
if map1 == nil && map2 == nil {
return map[uint32]bool{}
}
newMap := make(map[uint32]bool)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint32Float32 takes two inputs: map[uint32]float32 and map[uint32]float32 and merge two maps and returns a new map[uint32]float32.
func MergeUint32Float32(map1, map2 map[uint32]float32) map[uint32]float32 {
if map1 == nil && map2 == nil {
return map[uint32]float32{}
}
newMap := make(map[uint32]float32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint32Float64 takes two inputs: map[uint32]float64 and map[uint32]float64 and merge two maps and returns a new map[uint32]float64.
func MergeUint32Float64(map1, map2 map[uint32]float64) map[uint32]float64 {
if map1 == nil && map2 == nil {
return map[uint32]float64{}
}
newMap := make(map[uint32]float64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint16Int takes two inputs: map[uint16]int and map[uint16]int and merge two maps and returns a new map[uint16]int.
func MergeUint16Int(map1, map2 map[uint16]int) map[uint16]int {
if map1 == nil && map2 == nil {
return map[uint16]int{}
}
newMap := make(map[uint16]int)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint16Int64 takes two inputs: map[uint16]int64 and map[uint16]int64 and merge two maps and returns a new map[uint16]int64.
func MergeUint16Int64(map1, map2 map[uint16]int64) map[uint16]int64 {
if map1 == nil && map2 == nil {
return map[uint16]int64{}
}
newMap := make(map[uint16]int64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint16Int32 takes two inputs: map[uint16]int32 and map[uint16]int32 and merge two maps and returns a new map[uint16]int32.
func MergeUint16Int32(map1, map2 map[uint16]int32) map[uint16]int32 {
if map1 == nil && map2 == nil {
return map[uint16]int32{}
}
newMap := make(map[uint16]int32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint16Int16 takes two inputs: map[uint16]int16 and map[uint16]int16 and merge two maps and returns a new map[uint16]int16.
func MergeUint16Int16(map1, map2 map[uint16]int16) map[uint16]int16 {
if map1 == nil && map2 == nil {
return map[uint16]int16{}
}
newMap := make(map[uint16]int16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint16Int8 takes two inputs: map[uint16]int8 and map[uint16]int8 and merge two maps and returns a new map[uint16]int8.
func MergeUint16Int8(map1, map2 map[uint16]int8) map[uint16]int8 {
if map1 == nil && map2 == nil {
return map[uint16]int8{}
}
newMap := make(map[uint16]int8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint16Uint takes two inputs: map[uint16]uint and map[uint16]uint and merge two maps and returns a new map[uint16]uint.
func MergeUint16Uint(map1, map2 map[uint16]uint) map[uint16]uint {
if map1 == nil && map2 == nil {
return map[uint16]uint{}
}
newMap := make(map[uint16]uint)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint16Uint64 takes two inputs: map[uint16]uint64 and map[uint16]uint64 and merge two maps and returns a new map[uint16]uint64.
func MergeUint16Uint64(map1, map2 map[uint16]uint64) map[uint16]uint64 {
if map1 == nil && map2 == nil {
return map[uint16]uint64{}
}
newMap := make(map[uint16]uint64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint16Uint32 takes two inputs: map[uint16]uint32 and map[uint16]uint32 and merge two maps and returns a new map[uint16]uint32.
func MergeUint16Uint32(map1, map2 map[uint16]uint32) map[uint16]uint32 {
if map1 == nil && map2 == nil {
return map[uint16]uint32{}
}
newMap := make(map[uint16]uint32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint16 takes two inputs: map[uint16]uint16 and map[uint16]uint16 and merge two maps and returns a new map[uint16]uint16.
func MergeUint16(map1, map2 map[uint16]uint16) map[uint16]uint16 {
if map1 == nil && map2 == nil {
return map[uint16]uint16{}
}
newMap := make(map[uint16]uint16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint16Uint8 takes two inputs: map[uint16]uint8 and map[uint16]uint8 and merge two maps and returns a new map[uint16]uint8.
func MergeUint16Uint8(map1, map2 map[uint16]uint8) map[uint16]uint8 {
if map1 == nil && map2 == nil {
return map[uint16]uint8{}
}
newMap := make(map[uint16]uint8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint16Str takes two inputs: map[uint16]string and map[uint16]string and merge two maps and returns a new map[uint16]string.
func MergeUint16Str(map1, map2 map[uint16]string) map[uint16]string {
if map1 == nil && map2 == nil {
return map[uint16]string{}
}
newMap := make(map[uint16]string)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint16Bool takes two inputs: map[uint16]bool and map[uint16]bool and merge two maps and returns a new map[uint16]bool.
func MergeUint16Bool(map1, map2 map[uint16]bool) map[uint16]bool {
if map1 == nil && map2 == nil {
return map[uint16]bool{}
}
newMap := make(map[uint16]bool)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint16Float32 takes two inputs: map[uint16]float32 and map[uint16]float32 and merge two maps and returns a new map[uint16]float32.
func MergeUint16Float32(map1, map2 map[uint16]float32) map[uint16]float32 {
if map1 == nil && map2 == nil {
return map[uint16]float32{}
}
newMap := make(map[uint16]float32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint16Float64 takes two inputs: map[uint16]float64 and map[uint16]float64 and merge two maps and returns a new map[uint16]float64.
func MergeUint16Float64(map1, map2 map[uint16]float64) map[uint16]float64 {
if map1 == nil && map2 == nil {
return map[uint16]float64{}
}
newMap := make(map[uint16]float64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint8Int takes two inputs: map[uint8]int and map[uint8]int and merge two maps and returns a new map[uint8]int.
func MergeUint8Int(map1, map2 map[uint8]int) map[uint8]int {
if map1 == nil && map2 == nil {
return map[uint8]int{}
}
newMap := make(map[uint8]int)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint8Int64 takes two inputs: map[uint8]int64 and map[uint8]int64 and merge two maps and returns a new map[uint8]int64.
func MergeUint8Int64(map1, map2 map[uint8]int64) map[uint8]int64 {
if map1 == nil && map2 == nil {
return map[uint8]int64{}
}
newMap := make(map[uint8]int64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint8Int32 takes two inputs: map[uint8]int32 and map[uint8]int32 and merge two maps and returns a new map[uint8]int32.
func MergeUint8Int32(map1, map2 map[uint8]int32) map[uint8]int32 {
if map1 == nil && map2 == nil {
return map[uint8]int32{}
}
newMap := make(map[uint8]int32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint8Int16 takes two inputs: map[uint8]int16 and map[uint8]int16 and merge two maps and returns a new map[uint8]int16.
func MergeUint8Int16(map1, map2 map[uint8]int16) map[uint8]int16 {
if map1 == nil && map2 == nil {
return map[uint8]int16{}
}
newMap := make(map[uint8]int16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint8Int8 takes two inputs: map[uint8]int8 and map[uint8]int8 and merge two maps and returns a new map[uint8]int8.
func MergeUint8Int8(map1, map2 map[uint8]int8) map[uint8]int8 {
if map1 == nil && map2 == nil {
return map[uint8]int8{}
}
newMap := make(map[uint8]int8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint8Uint takes two inputs: map[uint8]uint and map[uint8]uint and merge two maps and returns a new map[uint8]uint.
func MergeUint8Uint(map1, map2 map[uint8]uint) map[uint8]uint {
if map1 == nil && map2 == nil {
return map[uint8]uint{}
}
newMap := make(map[uint8]uint)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint8Uint64 takes two inputs: map[uint8]uint64 and map[uint8]uint64 and merge two maps and returns a new map[uint8]uint64.
func MergeUint8Uint64(map1, map2 map[uint8]uint64) map[uint8]uint64 {
if map1 == nil && map2 == nil {
return map[uint8]uint64{}
}
newMap := make(map[uint8]uint64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint8Uint32 takes two inputs: map[uint8]uint32 and map[uint8]uint32 and merge two maps and returns a new map[uint8]uint32.
func MergeUint8Uint32(map1, map2 map[uint8]uint32) map[uint8]uint32 {
if map1 == nil && map2 == nil {
return map[uint8]uint32{}
}
newMap := make(map[uint8]uint32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint8Uint16 takes two inputs: map[uint8]uint16 and map[uint8]uint16 and merge two maps and returns a new map[uint8]uint16.
func MergeUint8Uint16(map1, map2 map[uint8]uint16) map[uint8]uint16 {
if map1 == nil && map2 == nil {
return map[uint8]uint16{}
}
newMap := make(map[uint8]uint16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint8 takes two inputs: map[uint8]uint8 and map[uint8]uint8 and merge two maps and returns a new map[uint8]uint8.
func MergeUint8(map1, map2 map[uint8]uint8) map[uint8]uint8 {
if map1 == nil && map2 == nil {
return map[uint8]uint8{}
}
newMap := make(map[uint8]uint8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint8Str takes two inputs: map[uint8]string and map[uint8]string and merge two maps and returns a new map[uint8]string.
func MergeUint8Str(map1, map2 map[uint8]string) map[uint8]string {
if map1 == nil && map2 == nil {
return map[uint8]string{}
}
newMap := make(map[uint8]string)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint8Bool takes two inputs: map[uint8]bool and map[uint8]bool and merge two maps and returns a new map[uint8]bool.
func MergeUint8Bool(map1, map2 map[uint8]bool) map[uint8]bool {
if map1 == nil && map2 == nil {
return map[uint8]bool{}
}
newMap := make(map[uint8]bool)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint8Float32 takes two inputs: map[uint8]float32 and map[uint8]float32 and merge two maps and returns a new map[uint8]float32.
func MergeUint8Float32(map1, map2 map[uint8]float32) map[uint8]float32 {
if map1 == nil && map2 == nil {
return map[uint8]float32{}
}
newMap := make(map[uint8]float32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeUint8Float64 takes two inputs: map[uint8]float64 and map[uint8]float64 and merge two maps and returns a new map[uint8]float64.
func MergeUint8Float64(map1, map2 map[uint8]float64) map[uint8]float64 {
if map1 == nil && map2 == nil {
return map[uint8]float64{}
}
newMap := make(map[uint8]float64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeStrInt takes two inputs: map[string]int and map[string]int and merge two maps and returns a new map[string]int.
func MergeStrInt(map1, map2 map[string]int) map[string]int {
if map1 == nil && map2 == nil {
return map[string]int{}
}
newMap := make(map[string]int)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeStrInt64 takes two inputs: map[string]int64 and map[string]int64 and merge two maps and returns a new map[string]int64.
func MergeStrInt64(map1, map2 map[string]int64) map[string]int64 {
if map1 == nil && map2 == nil {
return map[string]int64{}
}
newMap := make(map[string]int64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeStrInt32 takes two inputs: map[string]int32 and map[string]int32 and merge two maps and returns a new map[string]int32.
func MergeStrInt32(map1, map2 map[string]int32) map[string]int32 {
if map1 == nil && map2 == nil {
return map[string]int32{}
}
newMap := make(map[string]int32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeStrInt16 takes two inputs: map[string]int16 and map[string]int16 and merge two maps and returns a new map[string]int16.
func MergeStrInt16(map1, map2 map[string]int16) map[string]int16 {
if map1 == nil && map2 == nil {
return map[string]int16{}
}
newMap := make(map[string]int16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeStrInt8 takes two inputs: map[string]int8 and map[string]int8 and merge two maps and returns a new map[string]int8.
func MergeStrInt8(map1, map2 map[string]int8) map[string]int8 {
if map1 == nil && map2 == nil {
return map[string]int8{}
}
newMap := make(map[string]int8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeStrUint takes two inputs: map[string]uint and map[string]uint and merge two maps and returns a new map[string]uint.
func MergeStrUint(map1, map2 map[string]uint) map[string]uint {
if map1 == nil && map2 == nil {
return map[string]uint{}
}
newMap := make(map[string]uint)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeStrUint64 takes two inputs: map[string]uint64 and map[string]uint64 and merge two maps and returns a new map[string]uint64.
func MergeStrUint64(map1, map2 map[string]uint64) map[string]uint64 {
if map1 == nil && map2 == nil {
return map[string]uint64{}
}
newMap := make(map[string]uint64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeStrUint32 takes two inputs: map[string]uint32 and map[string]uint32 and merge two maps and returns a new map[string]uint32.
func MergeStrUint32(map1, map2 map[string]uint32) map[string]uint32 {
if map1 == nil && map2 == nil {
return map[string]uint32{}
}
newMap := make(map[string]uint32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeStrUint16 takes two inputs: map[string]uint16 and map[string]uint16 and merge two maps and returns a new map[string]uint16.
func MergeStrUint16(map1, map2 map[string]uint16) map[string]uint16 {
if map1 == nil && map2 == nil {
return map[string]uint16{}
}
newMap := make(map[string]uint16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeStrUint8 takes two inputs: map[string]uint8 and map[string]uint8 and merge two maps and returns a new map[string]uint8.
func MergeStrUint8(map1, map2 map[string]uint8) map[string]uint8 {
if map1 == nil && map2 == nil {
return map[string]uint8{}
}
newMap := make(map[string]uint8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeStr takes two inputs: map[string]string and map[string]string and merge two maps and returns a new map[string]string.
func MergeStr(map1, map2 map[string]string) map[string]string {
if map1 == nil && map2 == nil {
return map[string]string{}
}
newMap := make(map[string]string)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeStrBool takes two inputs: map[string]bool and map[string]bool and merge two maps and returns a new map[string]bool.
func MergeStrBool(map1, map2 map[string]bool) map[string]bool {
if map1 == nil && map2 == nil {
return map[string]bool{}
}
newMap := make(map[string]bool)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeStrFloat32 takes two inputs: map[string]float32 and map[string]float32 and merge two maps and returns a new map[string]float32.
func MergeStrFloat32(map1, map2 map[string]float32) map[string]float32 {
if map1 == nil && map2 == nil {
return map[string]float32{}
}
newMap := make(map[string]float32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeStrFloat64 takes two inputs: map[string]float64 and map[string]float64 and merge two maps and returns a new map[string]float64.
func MergeStrFloat64(map1, map2 map[string]float64) map[string]float64 {
if map1 == nil && map2 == nil {
return map[string]float64{}
}
newMap := make(map[string]float64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeBoolInt takes two inputs: map[bool]int and map[bool]int and merge two maps and returns a new map[bool]int.
func MergeBoolInt(map1, map2 map[bool]int) map[bool]int {
if map1 == nil && map2 == nil {
return map[bool]int{}
}
newMap := make(map[bool]int)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeBoolInt64 takes two inputs: map[bool]int64 and map[bool]int64 and merge two maps and returns a new map[bool]int64.
func MergeBoolInt64(map1, map2 map[bool]int64) map[bool]int64 {
if map1 == nil && map2 == nil {
return map[bool]int64{}
}
newMap := make(map[bool]int64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeBoolInt32 takes two inputs: map[bool]int32 and map[bool]int32 and merge two maps and returns a new map[bool]int32.
func MergeBoolInt32(map1, map2 map[bool]int32) map[bool]int32 {
if map1 == nil && map2 == nil {
return map[bool]int32{}
}
newMap := make(map[bool]int32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeBoolInt16 takes two inputs: map[bool]int16 and map[bool]int16 and merge two maps and returns a new map[bool]int16.
func MergeBoolInt16(map1, map2 map[bool]int16) map[bool]int16 {
if map1 == nil && map2 == nil {
return map[bool]int16{}
}
newMap := make(map[bool]int16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeBoolInt8 takes two inputs: map[bool]int8 and map[bool]int8 and merge two maps and returns a new map[bool]int8.
func MergeBoolInt8(map1, map2 map[bool]int8) map[bool]int8 {
if map1 == nil && map2 == nil {
return map[bool]int8{}
}
newMap := make(map[bool]int8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeBoolUint takes two inputs: map[bool]uint and map[bool]uint and merge two maps and returns a new map[bool]uint.
func MergeBoolUint(map1, map2 map[bool]uint) map[bool]uint {
if map1 == nil && map2 == nil {
return map[bool]uint{}
}
newMap := make(map[bool]uint)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeBoolUint64 takes two inputs: map[bool]uint64 and map[bool]uint64 and merge two maps and returns a new map[bool]uint64.
func MergeBoolUint64(map1, map2 map[bool]uint64) map[bool]uint64 {
if map1 == nil && map2 == nil {
return map[bool]uint64{}
}
newMap := make(map[bool]uint64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeBoolUint32 takes two inputs: map[bool]uint32 and map[bool]uint32 and merge two maps and returns a new map[bool]uint32.
func MergeBoolUint32(map1, map2 map[bool]uint32) map[bool]uint32 {
if map1 == nil && map2 == nil {
return map[bool]uint32{}
}
newMap := make(map[bool]uint32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeBoolUint16 takes two inputs: map[bool]uint16 and map[bool]uint16 and merge two maps and returns a new map[bool]uint16.
func MergeBoolUint16(map1, map2 map[bool]uint16) map[bool]uint16 {
if map1 == nil && map2 == nil {
return map[bool]uint16{}
}
newMap := make(map[bool]uint16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeBoolUint8 takes two inputs: map[bool]uint8 and map[bool]uint8 and merge two maps and returns a new map[bool]uint8.
func MergeBoolUint8(map1, map2 map[bool]uint8) map[bool]uint8 {
if map1 == nil && map2 == nil {
return map[bool]uint8{}
}
newMap := make(map[bool]uint8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeBoolStr takes two inputs: map[bool]string and map[bool]string and merge two maps and returns a new map[bool]string.
func MergeBoolStr(map1, map2 map[bool]string) map[bool]string {
if map1 == nil && map2 == nil {
return map[bool]string{}
}
newMap := make(map[bool]string)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeBool takes two inputs: map[bool]bool and map[bool]bool and merge two maps and returns a new map[bool]bool.
func MergeBool(map1, map2 map[bool]bool) map[bool]bool {
if map1 == nil && map2 == nil {
return map[bool]bool{}
}
newMap := make(map[bool]bool)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeBoolFloat32 takes two inputs: map[bool]float32 and map[bool]float32 and merge two maps and returns a new map[bool]float32.
func MergeBoolFloat32(map1, map2 map[bool]float32) map[bool]float32 {
if map1 == nil && map2 == nil {
return map[bool]float32{}
}
newMap := make(map[bool]float32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeBoolFloat64 takes two inputs: map[bool]float64 and map[bool]float64 and merge two maps and returns a new map[bool]float64.
func MergeBoolFloat64(map1, map2 map[bool]float64) map[bool]float64 {
if map1 == nil && map2 == nil {
return map[bool]float64{}
}
newMap := make(map[bool]float64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat32Int takes two inputs: map[float32]int and map[float32]int and merge two maps and returns a new map[float32]int.
func MergeFloat32Int(map1, map2 map[float32]int) map[float32]int {
if map1 == nil && map2 == nil {
return map[float32]int{}
}
newMap := make(map[float32]int)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat32Int64 takes two inputs: map[float32]int64 and map[float32]int64 and merge two maps and returns a new map[float32]int64.
func MergeFloat32Int64(map1, map2 map[float32]int64) map[float32]int64 {
if map1 == nil && map2 == nil {
return map[float32]int64{}
}
newMap := make(map[float32]int64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat32Int32 takes two inputs: map[float32]int32 and map[float32]int32 and merge two maps and returns a new map[float32]int32.
func MergeFloat32Int32(map1, map2 map[float32]int32) map[float32]int32 {
if map1 == nil && map2 == nil {
return map[float32]int32{}
}
newMap := make(map[float32]int32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat32Int16 takes two inputs: map[float32]int16 and map[float32]int16 and merge two maps and returns a new map[float32]int16.
func MergeFloat32Int16(map1, map2 map[float32]int16) map[float32]int16 {
if map1 == nil && map2 == nil {
return map[float32]int16{}
}
newMap := make(map[float32]int16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat32Int8 takes two inputs: map[float32]int8 and map[float32]int8 and merge two maps and returns a new map[float32]int8.
func MergeFloat32Int8(map1, map2 map[float32]int8) map[float32]int8 {
if map1 == nil && map2 == nil {
return map[float32]int8{}
}
newMap := make(map[float32]int8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat32Uint takes two inputs: map[float32]uint and map[float32]uint and merge two maps and returns a new map[float32]uint.
func MergeFloat32Uint(map1, map2 map[float32]uint) map[float32]uint {
if map1 == nil && map2 == nil {
return map[float32]uint{}
}
newMap := make(map[float32]uint)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat32Uint64 takes two inputs: map[float32]uint64 and map[float32]uint64 and merge two maps and returns a new map[float32]uint64.
func MergeFloat32Uint64(map1, map2 map[float32]uint64) map[float32]uint64 {
if map1 == nil && map2 == nil {
return map[float32]uint64{}
}
newMap := make(map[float32]uint64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat32Uint32 takes two inputs: map[float32]uint32 and map[float32]uint32 and merge two maps and returns a new map[float32]uint32.
func MergeFloat32Uint32(map1, map2 map[float32]uint32) map[float32]uint32 {
if map1 == nil && map2 == nil {
return map[float32]uint32{}
}
newMap := make(map[float32]uint32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat32Uint16 takes two inputs: map[float32]uint16 and map[float32]uint16 and merge two maps and returns a new map[float32]uint16.
func MergeFloat32Uint16(map1, map2 map[float32]uint16) map[float32]uint16 {
if map1 == nil && map2 == nil {
return map[float32]uint16{}
}
newMap := make(map[float32]uint16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat32Uint8 takes two inputs: map[float32]uint8 and map[float32]uint8 and merge two maps and returns a new map[float32]uint8.
func MergeFloat32Uint8(map1, map2 map[float32]uint8) map[float32]uint8 {
if map1 == nil && map2 == nil {
return map[float32]uint8{}
}
newMap := make(map[float32]uint8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat32Str takes two inputs: map[float32]string and map[float32]string and merge two maps and returns a new map[float32]string.
func MergeFloat32Str(map1, map2 map[float32]string) map[float32]string {
if map1 == nil && map2 == nil {
return map[float32]string{}
}
newMap := make(map[float32]string)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat32Bool takes two inputs: map[float32]bool and map[float32]bool and merge two maps and returns a new map[float32]bool.
func MergeFloat32Bool(map1, map2 map[float32]bool) map[float32]bool {
if map1 == nil && map2 == nil {
return map[float32]bool{}
}
newMap := make(map[float32]bool)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat32 takes two inputs: map[float32]float32 and map[float32]float32 and merge two maps and returns a new map[float32]float32.
func MergeFloat32(map1, map2 map[float32]float32) map[float32]float32 {
if map1 == nil && map2 == nil {
return map[float32]float32{}
}
newMap := make(map[float32]float32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat32Float64 takes two inputs: map[float32]float64 and map[float32]float64 and merge two maps and returns a new map[float32]float64.
func MergeFloat32Float64(map1, map2 map[float32]float64) map[float32]float64 {
if map1 == nil && map2 == nil {
return map[float32]float64{}
}
newMap := make(map[float32]float64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat64Int takes two inputs: map[float64]int and map[float64]int and merge two maps and returns a new map[float64]int.
func MergeFloat64Int(map1, map2 map[float64]int) map[float64]int {
if map1 == nil && map2 == nil {
return map[float64]int{}
}
newMap := make(map[float64]int)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat64Int64 takes two inputs: map[float64]int64 and map[float64]int64 and merge two maps and returns a new map[float64]int64.
func MergeFloat64Int64(map1, map2 map[float64]int64) map[float64]int64 {
if map1 == nil && map2 == nil {
return map[float64]int64{}
}
newMap := make(map[float64]int64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat64Int32 takes two inputs: map[float64]int32 and map[float64]int32 and merge two maps and returns a new map[float64]int32.
func MergeFloat64Int32(map1, map2 map[float64]int32) map[float64]int32 {
if map1 == nil && map2 == nil {
return map[float64]int32{}
}
newMap := make(map[float64]int32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat64Int16 takes two inputs: map[float64]int16 and map[float64]int16 and merge two maps and returns a new map[float64]int16.
func MergeFloat64Int16(map1, map2 map[float64]int16) map[float64]int16 {
if map1 == nil && map2 == nil {
return map[float64]int16{}
}
newMap := make(map[float64]int16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat64Int8 takes two inputs: map[float64]int8 and map[float64]int8 and merge two maps and returns a new map[float64]int8.
func MergeFloat64Int8(map1, map2 map[float64]int8) map[float64]int8 {
if map1 == nil && map2 == nil {
return map[float64]int8{}
}
newMap := make(map[float64]int8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat64Uint takes two inputs: map[float64]uint and map[float64]uint and merge two maps and returns a new map[float64]uint.
func MergeFloat64Uint(map1, map2 map[float64]uint) map[float64]uint {
if map1 == nil && map2 == nil {
return map[float64]uint{}
}
newMap := make(map[float64]uint)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat64Uint64 takes two inputs: map[float64]uint64 and map[float64]uint64 and merge two maps and returns a new map[float64]uint64.
func MergeFloat64Uint64(map1, map2 map[float64]uint64) map[float64]uint64 {
if map1 == nil && map2 == nil {
return map[float64]uint64{}
}
newMap := make(map[float64]uint64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat64Uint32 takes two inputs: map[float64]uint32 and map[float64]uint32 and merge two maps and returns a new map[float64]uint32.
func MergeFloat64Uint32(map1, map2 map[float64]uint32) map[float64]uint32 {
if map1 == nil && map2 == nil {
return map[float64]uint32{}
}
newMap := make(map[float64]uint32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat64Uint16 takes two inputs: map[float64]uint16 and map[float64]uint16 and merge two maps and returns a new map[float64]uint16.
func MergeFloat64Uint16(map1, map2 map[float64]uint16) map[float64]uint16 {
if map1 == nil && map2 == nil {
return map[float64]uint16{}
}
newMap := make(map[float64]uint16)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat64Uint8 takes two inputs: map[float64]uint8 and map[float64]uint8 and merge two maps and returns a new map[float64]uint8.
func MergeFloat64Uint8(map1, map2 map[float64]uint8) map[float64]uint8 {
if map1 == nil && map2 == nil {
return map[float64]uint8{}
}
newMap := make(map[float64]uint8)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat64Str takes two inputs: map[float64]string and map[float64]string and merge two maps and returns a new map[float64]string.
func MergeFloat64Str(map1, map2 map[float64]string) map[float64]string {
if map1 == nil && map2 == nil {
return map[float64]string{}
}
newMap := make(map[float64]string)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat64Bool takes two inputs: map[float64]bool and map[float64]bool and merge two maps and returns a new map[float64]bool.
func MergeFloat64Bool(map1, map2 map[float64]bool) map[float64]bool {
if map1 == nil && map2 == nil {
return map[float64]bool{}
}
newMap := make(map[float64]bool)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat64Float32 takes two inputs: map[float64]float32 and map[float64]float32 and merge two maps and returns a new map[float64]float32.
func MergeFloat64Float32(map1, map2 map[float64]float32) map[float64]float32 {
if map1 == nil && map2 == nil {
return map[float64]float32{}
}
newMap := make(map[float64]float32)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
// MergeFloat64 takes two inputs: map[float64]float64 and map[float64]float64 and merge two maps and returns a new map[float64]float64.
func MergeFloat64(map1, map2 map[float64]float64) map[float64]float64 {
if map1 == nil && map2 == nil {
return map[float64]float64{}
}
newMap := make(map[float64]float64)
if map1 == nil {
for k, v := range map2 {
newMap[k] = v
}
return newMap
}
if map2 == nil {
for k, v := range map1 {
newMap[k] = v
}
return newMap
}
for k, v := range map1 {
newMap[k] = v
}
for k, v := range map2 {
newMap[k] = v
}
return newMap
} | fp/merge.go | 0.715623 | 0.555556 | merge.go | starcoder |
// Package sqlserver handles schema and data migrations from sqlserver.
package sqlserver
import (
"github.com/cloudspannerecosystem/harbourbridge/common/constants"
"github.com/cloudspannerecosystem/harbourbridge/internal"
"github.com/cloudspannerecosystem/harbourbridge/schema"
"github.com/cloudspannerecosystem/harbourbridge/spanner/ddl"
)
const (
stringLimit int64 = 2621440
)
// ToDdlImpl sql server specific implementation for ToDdl.
type ToDdlImpl struct {
}
// ToSpannerType maps a scalar source schema type (defined by id and
// mods) into a Spanner type. This is the core source-to-Spanner type
// mapping. toSpannerType returns the Spanner type and a list of type
// conversion issues encountered.
func (tdi ToDdlImpl) ToSpannerType(conv *internal.Conv, columnType schema.Type) (ddl.Type, []internal.SchemaIssue) {
ty, issues := toSpannerTypeInternal(conv, columnType.Name, columnType.Mods)
if conv.TargetDb == constants.TargetExperimentalPostgres {
ty = overrideExperimentalType(columnType, ty)
}
return ty, issues
}
// toSpannerType maps a scalar source schema type (defined by id and
// mods) into a Spanner type. This is the core source-to-Spanner type
// mapping. toSpannerType returns the Spanner type and a list of type
// conversion issues encountered.
func toSpannerTypeInternal(conv *internal.Conv, id string, mods []int64) (ddl.Type, []internal.SchemaIssue) {
switch id {
case "bit":
return ddl.Type{Name: ddl.Bool}, nil
case "uniqueidentifier":
return ddl.Type{Name: ddl.String, Len: ddl.MaxLength}, nil
case "binary", "varbinary", "image":
return ddl.Type{Name: ddl.Bytes, Len: ddl.MaxLength}, nil
case "date":
return ddl.Type{Name: ddl.Date}, nil
case "float", "real":
return ddl.Type{Name: ddl.Float64}, []internal.SchemaIssue{internal.Widened}
case "bigint":
return ddl.Type{Name: ddl.Int64}, nil
case "tinyint", "smallint", "int":
return ddl.Type{Name: ddl.Int64}, []internal.SchemaIssue{internal.Widened}
case "numeric", "money", "smallmoney", "decimal":
return ddl.Type{Name: ddl.Numeric}, nil
case "ntext", "text", "xml":
return ddl.Type{Name: ddl.String, Len: ddl.MaxLength}, nil
case "smalldatetime", "datetimeoffset", "datetime2", "datetime":
return ddl.Type{Name: ddl.Timestamp}, []internal.SchemaIssue{internal.Timestamp}
case "time":
return ddl.Type{Name: ddl.String, Len: ddl.MaxLength}, []internal.SchemaIssue{internal.Time}
case "varchar", "char", "nvarchar", "nchar":
// Sets the length only if the source length falls within the allowed length range in Spanner.
if len(mods) > 0 && mods[0] > 0 && mods[0] <= stringLimit {
return ddl.Type{Name: ddl.String, Len: mods[0]}, nil
// Raise issue when
// 1. mods[0] value greater than string limit
// 2. mods[0] value less than 0.(sql server return -1 when type length set to max)
} else if mods[0] > stringLimit || mods[0] < 0 {
return ddl.Type{Name: ddl.String, Len: ddl.MaxLength}, []internal.SchemaIssue{internal.StringOverflow}
}
return ddl.Type{Name: ddl.String, Len: ddl.MaxLength}, nil
case "timestamp":
return ddl.Type{Name: ddl.Int64}, nil
default:
return ddl.Type{Name: ddl.String, Len: ddl.MaxLength}, []internal.SchemaIssue{internal.NoGoodType}
}
}
// Override the types to map to experimental postgres types.
func overrideExperimentalType(columnType schema.Type, originalType ddl.Type) ddl.Type {
if columnType.Name == "date" {
return ddl.Type{Name: ddl.String, Len: ddl.MaxLength}
}
return originalType
} | sources/sqlserver/toddl.go | 0.631708 | 0.419291 | toddl.go | starcoder |
package pe
import (
"github.com/gonum/matrix/mat64"
"github.com/volkerp/goquadtree/quadtree"
)
/*
entity.go
by <NAME>
Structure and functionality of an entity.
*/
//Entity Constants
const (
ShapeCircle int = 0
ShapeRectangle int = 1
)
//Entity -: Data structure containing Physics data.
type Entity struct {
ID int // Unique ID
//Movement
LinearVelocity *mat64.Vector // 2D Vector
Mass float64
InverseMass float64
LinearPosition *mat64.Vector // 2D Vector Also Center of MASS
Force *mat64.Vector // 2D Vector
shape int // The kind of Rigidbody this is using.
//Circle
CircleRadius float64
//Rectangle
Min *mat64.Vector // Bottom left corner - Local Coord
Max *mat64.Vector // Top right corner - Local Coord
rectHeight float64 // internal
rectWidth float64 // internal
//Rotation
AngularVelocity float64 // Change in current angle
Inertia float64
InverseInertia float64
AngularPosition float64 // Current angle
Torque float64 // 2D Vector - Rotational Force
//Bounding Box
BoundingBoxOffSet []float64 //4 indices for xa, xb, ya, yb
} //End Entity
//New -: Create new Entity
func (e *Entity) New(shape int) *Entity {
se := new(Entity)
//Default Values
se.LinearVelocity = mat64.NewVector(2, []float64{0, 0})
se.Mass = 10.0
se.InverseMass = 1 / se.Mass
se.LinearPosition = mat64.NewVector(2, []float64{0, 0})
se.Force = mat64.NewVector(2, []float64{0, 0})
se.shape = shape
se.CircleRadius = 10
return se
} //End New()
//Calculate -: run entity Calculations
func (e *Entity) Calculate() {
if e.shape == ShapeCircle { //Circle
e.LinearPosition = mat64.NewVector(2, []float64{0, 0})
e.Inertia = (e.Mass * e.CircleRadius * e.CircleRadius) / 4
e.InverseInertia = 1 / e.Inertia
e.BoundingBoxOffSet = []float64{e.CircleRadius * -1, e.CircleRadius, e.CircleRadius * -1, e.CircleRadius}
} else if e.shape == ShapeRectangle { //Rectangle
e.rectHeight = e.Min.At(1, 0) - e.Max.At(1, 0) //Calc Height
e.rectWidth = e.Min.At(0, 1) - e.Max.At(0, 1) //Calc Width
e.LinearPosition = mat64.NewVector(2, []float64{e.rectWidth / 2, e.rectHeight / 2}) //Calc COM
e.Inertia = (e.Mass * (e.rectHeight*e.rectHeight + e.rectWidth*e.rectWidth)) / 12 //Calc Inertia
e.InverseInertia = 1 / e.Inertia
//Bounding Box OffSet
tc := 0.0
if e.rectHeight > e.rectWidth {
tc = e.rectHeight / 2
} else {
tc = e.rectWidth / 2
}
e.BoundingBoxOffSet = []float64{tc * -1, tc, tc * -1, tc}
}
} //End Calculate()
//BoundingBox -: implementing method to match type BoundingBoxer in QuadTree
func (e *Entity) BoundingBox() quadtree.BoundingBox {
return quadtree.NewBoundingBox(
e.LinearPosition.At(0, 0)+e.BoundingBoxOffSet[0],
e.LinearPosition.At(0, 0)+e.BoundingBoxOffSet[1],
e.LinearPosition.At(1, 0)+e.BoundingBoxOffSet[2],
e.LinearPosition.At(1, 0)+e.BoundingBoxOffSet[3])
} //End BoundingBox()
//SetForce -: Assign Force to the entity
func (e *Entity) SetForce(v *mat64.Vector) {
e.Force = v
} //End SetForce
//AddForce -: Add Force to the entity
func (e *Entity) AddForce(v *mat64.Vector) {
e.Force.AddVec(e.Force, v)
} //End AddForce
//SetTorque -: Assign Torque to the entity
func (e *Entity) SetTorque(v float64) {
e.Torque = v
} //End SetTorque
//AddTorque -: Assign Torque to the entity
func (e *Entity) AddTorque(v float64) {
e.Torque += v
} //End AddTorque
//ApplyForces -: Apply forces to the entity.
func (e *Entity) ApplyForces(f *mat64.Vector, v float64) {
if f == nil {
f = mat64.NewVector(2, nil)
}
e.AddForce(f)
e.AddTorque(v)
} //End ApplyForces
//UpdatePosition -: Update Position of the entity
func (e *Entity) UpdatePosition(dt float64) {
//Update Velocity | V(t+1) = V(t) + (F(t)*(1/m)*dt)
tf := mat64.NewVector(2, nil)
tf.CopyVec(e.Force)
tf.ScaleVec(e.InverseMass, tf) // F(t)*(1/m) <- btw this is acceleration
tf.ScaleVec(dt, tf) // (F(t)*(1/m)*dt) <- splits the acceleration over time
e.LinearVelocity.AddVec(e.LinearVelocity, tf) // V(t) + (F(t)*(1/m)*dt) <- amount to adjust velocity over time
//Update Position | P(t+1) = P(t) + (V(t+1)*dt)
tv := mat64.NewVector(2, nil)
tv.CopyVec(e.LinearVelocity)
tv.ScaleVec(dt, tv) // (V(t+1)*dt) <- splits velocity over time
e.LinearPosition.AddVec(e.LinearPosition, tv) // P(t) + (V(t+1)*dt) <- amount to adjust position over time
} //End UpdatePosition
//UpdateRotation -: Update Rotation of the entity
func (e *Entity) UpdateRotation(dt float64) {
//Update Angular Velocity | AV(t+1) = AV(t) + (t)*(1/I)*dt
ta := e.Torque * e.InverseInertia // (t)*(1/I) <- Angular Acceleration
e.AngularVelocity += (ta * dt) // AV(t) + (t)*(1/I)*dt <-splits acceleration over time
//Update Angle | A(t+1) = A(t) + (AV(t+1)*dt)
e.AngularPosition += (e.AngularVelocity * dt) // A(t) + (AV(t+1)*dt)
} //End UpdateRotation | entity.go | 0.737253 | 0.699383 | entity.go | starcoder |
package p384
import (
"crypto/elliptic"
"crypto/subtle"
"math/big"
"github.com/cloudflare/circl/math"
)
// Curve is used to provide the extended functionality and performance of
// elliptic.Curve interface.
type Curve interface {
elliptic.Curve
// IsAtInfinity returns True is the point is the identity point.
IsAtInfinity(X, Y *big.Int) bool
// SimultaneousMult calculates P=mG+nQ, where G is the generator and
// Q=(Qx,Qy). The scalars m and n are positive integers in big-endian form.
// Runs in non-constant time to be used in signature verification.
SimultaneousMult(Qx, Qy *big.Int, m, n []byte) (Px, Py *big.Int)
}
// P384 returns a Curve which implements P-384 (see FIPS 186-3, section D.2.4).
func P384() Curve { return p384 }
type curve struct{ *elliptic.CurveParams }
var p384 curve
func init() {
p384.CurveParams = elliptic.P384().Params()
}
// IsAtInfinity returns True is the point is the identity point.
func (c curve) IsAtInfinity(X, Y *big.Int) bool {
return X.Sign() == 0 && Y.Sign() == 0
}
// IsOnCurve reports whether the given (x,y) lies on the curve.
func (c curve) IsOnCurve(X, Y *big.Int) bool {
// bMon is the curve's B parameter encoded. bMon = B*R mod p.
bMon := &fp384{
0xcc, 0x2d, 0x41, 0x9d, 0x71, 0x88, 0x11, 0x08,
0xec, 0x32, 0x4c, 0x7a, 0xd8, 0xad, 0x29, 0xf7,
0x2e, 0x02, 0x20, 0x19, 0x9b, 0x20, 0xf2, 0x77,
0xe2, 0x8a, 0x93, 0x94, 0xee, 0x4b, 0x37, 0xe3,
0x94, 0x20, 0x02, 0x1f, 0xf4, 0x21, 0x2b, 0xb6,
0xf9, 0xbf, 0x4f, 0x60, 0x4b, 0x11, 0x08, 0xcd,
}
x, y := &fp384{}, &fp384{}
x.SetBigInt(X)
y.SetBigInt(Y)
montEncode(x, x)
montEncode(y, y)
y2, x3 := &fp384{}, &fp384{}
fp384Sqr(y2, y)
fp384Sqr(x3, x)
fp384Mul(x3, x3, x)
threeX := &fp384{}
fp384Add(threeX, x, x)
fp384Add(threeX, threeX, x)
fp384Sub(x3, x3, threeX)
fp384Add(x3, x3, bMon)
return *y2 == *x3
}
// Add returns the sum of (x1,y1) and (x2,y2)
func (c curve) Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int) {
P := newAffinePoint(x1, y1).toJacobian()
P.mixadd(P, newAffinePoint(x2, y2))
return P.toAffine().toInt()
}
// Double returns 2*(x,y)
func (c curve) Double(x1, y1 *big.Int) (x, y *big.Int) {
P := newAffinePoint(x1, y1).toJacobian()
P.double()
return P.toAffine().toInt()
}
// reduceScalar shorten a scalar modulo the order of the curve.
func (c curve) reduceScalar(k []byte) []byte {
const max = sizeFp
if len(k) > max {
bigK := new(big.Int).SetBytes(k)
bigK.Mod(bigK, c.Params().N)
k = bigK.Bytes()
}
if len(k) < max {
k = append(make([]byte, max-len(k)), k...)
}
return k
}
// toOdd performs k = (-k mod N) if k is even.
func (c curve) toOdd(k []byte) ([]byte, int) {
var X, Y big.Int
X.SetBytes(k)
Y.Neg(&X).Mod(&Y, c.Params().N)
isEven := 1 - int(X.Bit(0))
x := X.Bytes()
y := Y.Bytes()
if len(x) < len(y) {
x = append(make([]byte, len(y)-len(x)), x...)
} else if len(x) > len(y) {
y = append(make([]byte, len(x)-len(y)), y...)
}
subtle.ConstantTimeCopy(isEven, x, y)
return x, isEven
}
// ScalarMult returns (Qx,Qy)=k*(Px,Py) where k is a number in big-endian form.
func (c curve) ScalarMult(Px, Py *big.Int, k []byte) (Qx, Qy *big.Int) {
const omega = uint(5)
k = c.reduceScalar(k)
oddK, isEvenK := c.toOdd(k)
var scalar big.Int
scalar.SetBytes(oddK)
if scalar.Sign() == 0 {
return new(big.Int), new(big.Int)
}
L := math.SignedDigit(&scalar, omega)
var Q, R jacobianPoint
TabP := newAffinePoint(Px, Py).oddMultiples(omega)
for i := len(L) - 1; i >= 0; i-- {
for j := uint(0); j < omega-1; j++ {
Q.double()
}
idx := absolute(L[i]) >> 1
for j := range TabP {
R.cmov(&TabP[j], subtle.ConstantTimeEq(int32(j), idx))
}
R.cneg(int(L[i]>>31) & 1)
Q.add(&Q, &R)
}
Q.cneg(isEvenK)
return Q.toAffine().toInt()
}
// ScalarBaseMult returns k*G, where G is the base point of the group
// and k is an integer in big-endian form.
func (c curve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {
return c.ScalarMult(c.Params().Gx, c.Params().Gy, k)
}
// SimultaneousMult calculates P=mG+nQ, where G is the generator and Q=(x,y,z).
// The scalars m and n are integers in big-endian form. Non-constant time.
func (c curve) SimultaneousMult(Qx, Qy *big.Int, m, n []byte) (Px, Py *big.Int) {
const nOmega = uint(5)
var k big.Int
k.SetBytes(m)
nafM := math.OmegaNAF(&k, baseOmega)
k.SetBytes(n)
nafN := math.OmegaNAF(&k, nOmega)
if len(nafM) > len(nafN) {
nafN = append(nafN, make([]int32, len(nafM)-len(nafN))...)
} else if len(nafM) < len(nafN) {
nafM = append(nafM, make([]int32, len(nafN)-len(nafM))...)
}
TabQ := newAffinePoint(Qx, Qy).oddMultiples(nOmega)
var P, jR jacobianPoint
var aR affinePoint
for i := len(nafN) - 1; i >= 0; i-- {
P.double()
// Generator point
if nafM[i] != 0 {
idxM := absolute(nafM[i]) >> 1
aR = baseOddMultiples[idxM]
if nafM[i] < 0 {
aR.neg()
}
P.mixadd(&P, &aR)
}
// Input point
if nafN[i] != 0 {
idxN := absolute(nafN[i]) >> 1
jR = TabQ[idxN]
if nafN[i] < 0 {
jR.neg()
}
P.add(&P, &jR)
}
}
return P.toAffine().toInt()
}
// absolute returns always a positive value.
func absolute(x int32) int32 {
mask := x >> 31
return (x + mask) ^ mask
} | ecc/p384/p384.go | 0.817465 | 0.541651 | p384.go | starcoder |
package share
import (
"math/big"
"github.com/henrycg/prio/utils"
)
// Compressed representation of secret-shared data.
type PRGHints struct {
Key utils.PRGKey
Delta []*big.Int
}
// A server uses a ReplayPRG to recover the shared values
// that the client sent it (in the form of a PRGHints struct).
type ReplayPRG struct {
serverIdx int
leaderIdx int
rand *utils.BufPRGReader
hints *PRGHints
cur int
}
// A client uses a GenPRG to split values into shares
// (one share per server) using a PRG to compress the
// shares.
type GenPRG struct {
nServers int
leaderIdx int
rand []*utils.BufPRGReader
delta []*big.Int
}
// Produce a new ReplayPRG object for the given server/leader combo.
func NewReplayPRG(serverIdx int, leaderIdx int) *ReplayPRG {
out := new(ReplayPRG)
out.leaderIdx = leaderIdx
out.serverIdx = serverIdx
return out
}
// Import the compressed secret-shared values from hints.
func (p *ReplayPRG) Import(hints *PRGHints) {
p.hints = hints
p.rand = utils.NewBufPRG(utils.NewPRG(&p.hints.Key))
p.cur = 0
}
// Recover a secret-shared value that is shared in a field
// that uses modulus mod.
func (p *ReplayPRG) Get(mod *big.Int) *big.Int {
out := p.rand.RandInt(mod)
if p.IsLeader() {
out.Add(out, p.hints.Delta[p.cur])
out.Mod(out, mod)
}
p.cur++
return out
}
func (p *ReplayPRG) IsLeader() bool {
return p.serverIdx == p.leaderIdx
}
// Create a new GenPRG object for producing compressed secret-shared values.
func NewGenPRG(nServers int, leaderIdx int) *GenPRG {
out := new(GenPRG)
out.nServers = nServers
out.leaderIdx = leaderIdx
out.rand = make([]*utils.BufPRGReader, nServers)
for i := 0; i < nServers; i++ {
out.rand[i] = utils.NewBufPRG(utils.RandomPRG())
}
out.delta = make([]*big.Int, 0)
return out
}
// Split value into shares using modulus mod.
func (g *GenPRG) Share(mod *big.Int, value *big.Int) []*big.Int {
out := make([]*big.Int, g.nServers)
delta := new(big.Int)
for i := 0; i < g.nServers; i++ {
out[i] = g.rand[i].RandInt(mod)
delta.Add(delta, out[i])
}
delta.Sub(value, delta)
delta.Mod(delta, mod)
g.delta = append(g.delta, delta)
out[g.leaderIdx].Add(out[g.leaderIdx], delta)
out[g.leaderIdx].Mod(out[g.leaderIdx], mod)
return out
}
// Split a random value into shares using modulus mod.
func (g *GenPRG) ShareRand(mod *big.Int) *big.Int {
val := new(big.Int)
for i := 0; i < g.nServers; i++ {
val.Add(val, g.rand[i].RandInt(mod))
}
val.Mod(val, mod)
g.delta = append(g.delta, utils.Zero)
return val
}
// Generate the hints that serverIdx can use to recover the shares.
func (g *GenPRG) Hints(serverIdx int) *PRGHints {
out := new(PRGHints)
out.Key = g.rand[serverIdx].Key
if serverIdx == g.leaderIdx {
out.Delta = g.delta
}
return out
} | share/prg.go | 0.717705 | 0.453564 | prg.go | starcoder |
package f32
import (
"context"
"log"
"reflect"
)
func init() {
RegisterMatrix(reflect.TypeOf((*SparseVector)(nil)).Elem())
}
// SparseVector compressed storage by indices
type SparseVector struct {
l int // length of the sparse vector
values []float32
indices []int
}
// NewSparseVector returns a SparseVector
func NewSparseVector(l int) *SparseVector {
return newSparseVector(l, 0)
}
// NewSparseVectorFromArray returns a SparseVector
func NewSparseVectorFromArray(data []float32) *SparseVector {
l := len(data)
s := newSparseVector(l, 0)
for i := 0; i < l; i++ {
s.SetVec(i, data[i])
}
return s
}
func newSparseVector(l, s int) *SparseVector {
return &SparseVector{l: l, values: make([]float32, s), indices: make([]int, s)}
}
// Length of the vector
func (s *SparseVector) Length() int {
return s.l
}
// AtVec returns the value of a vector element at i-th
func (s *SparseVector) AtVec(i int) float32 {
if i < 0 || i >= s.Length() {
log.Panicf("Length '%+v' is invalid", i)
}
pointer, length, _ := s.index(i)
if pointer < length && s.indices[pointer] == i {
return s.values[pointer]
}
return 0
}
// SetVec sets the value at i-th of the vector
func (s *SparseVector) SetVec(i int, value float32) {
if i < 0 || i >= s.Length() {
log.Panicf("Length '%+v' is invalid", i)
}
pointer, length, _ := s.index(i)
if pointer < length && s.indices[pointer] == i {
if value == 0 {
s.remove(pointer)
} else {
s.values[pointer] = value
}
} else {
s.insert(pointer, i, value)
}
}
// Columns the number of columns of the vector
func (s *SparseVector) Columns() int {
return 1
}
// Rows the number of rows of the vector
func (s *SparseVector) Rows() int {
return s.l
}
// Update does a At and Set on the vector element at r-th, c-th
func (s *SparseVector) Update(r, c int, f func(float32) float32) {
if r < 0 || r >= s.Rows() {
log.Panicf("Row '%+v' is invalid", r)
}
if c < 0 || c >= s.Columns() {
log.Panicf("Column '%+v' is invalid", c)
}
v := s.AtVec(r)
s.SetVec(r, f(v))
}
// At returns the value of a vector element at r-th, c-th
func (s *SparseVector) At(r, c int) (value float32) {
s.Update(r, c, func(v float32) float32 {
value = v
return v
})
return
}
// Set sets the value at r-th, c-th of the vector
func (s *SparseVector) Set(r, c int, value float32) {
if r < 0 || r >= s.Rows() {
log.Panicf("Row '%+v' is invalid", r)
}
if c < 0 || c >= s.Columns() {
log.Panicf("Column '%+v' is invalid", c)
}
s.SetVec(r, value)
}
// ColumnsAt return the columns at c-th
func (s *SparseVector) ColumnsAt(c int) Vector {
if c < 0 || c >= s.Columns() {
log.Panicf("Column '%+v' is invalid", c)
}
return s.copy()
}
// RowsAt return the rows at r-th
func (s *SparseVector) RowsAt(r int) Vector {
if r < 0 || r >= s.Rows() {
log.Panicf("Row '%+v' is invalid", r)
}
rows := NewSparseVector(1)
v := s.AtVec(r)
rows.SetVec(0, v)
return rows
}
// RowsAtToArray return the rows at r-th
func (s *SparseVector) RowsAtToArray(r int) []float32 {
if r < 0 || r >= s.Rows() {
log.Panicf("Row '%+v' is invalid", r)
}
rows := make([]float32, 1)
v := s.AtVec(r)
rows[0] = v
return rows
}
func (s *SparseVector) insert(pointer, i int, value float32) {
if value == 0 {
return
}
s.indices = append(s.indices[:pointer], append([]int{i}, s.indices[pointer:]...)...)
s.values = append(s.values[:pointer], append([]float32{value}, s.values[pointer:]...)...)
}
func (s *SparseVector) remove(pointer int) {
s.indices = append(s.indices[:pointer], s.indices[pointer+1:]...)
s.values = append(s.values[:pointer], s.values[pointer+1:]...)
}
func (s *SparseVector) index(i int) (int, int, error) {
length := len(s.indices)
if i > length {
return length, length, nil
}
start := 0
end := length
for start < end {
p := (start + end) / 2
if s.indices[p] > i {
end = p
} else if s.indices[p] < i {
start = p + 1
} else {
return p, length, nil
}
}
return start, length, nil
}
func (s *SparseVector) copy() *SparseVector {
vector := newSparseVector(s.l, len(s.indices))
for i := range s.values {
vector.values[i] = s.values[i]
vector.indices[i] = s.indices[i]
}
return vector
}
// Copy copies the vector
func (s *SparseVector) Copy() Matrix {
return s.copy()
}
// Scalar multiplication of a vector by alpha
func (s *SparseVector) Scalar(alpha float32) Matrix {
return Scalar(context.Background(), s, alpha)
}
// Multiply multiplies a vector by another vector
func (s *SparseVector) Multiply(m Matrix) Matrix {
matrix := newMatrix(m.Rows(), s.Columns(), nil)
MatrixMatrixMultiply(context.Background(), s, m, nil, matrix)
return matrix
}
// Add addition of a metrix by another metrix
func (s *SparseVector) Add(m Matrix) Matrix {
matrix := s.Copy()
Add(context.Background(), s, m, nil, matrix)
return matrix
}
// Subtract subtracts one metrix from another metrix
func (s *SparseVector) Subtract(m Matrix) Matrix {
matrix := m.Copy()
Subtract(context.Background(), s, m, nil, matrix)
return matrix
}
// Negative the negative of a metrix
func (s *SparseVector) Negative() Matrix {
matrix := s.Copy()
Negative(context.Background(), s, nil, matrix)
return matrix
}
// Transpose swaps the rows and columns
func (s *SparseVector) Transpose() Matrix {
matrix := newMatrix(s.Columns(), s.Rows(), nil)
Transpose(context.Background(), s, nil, matrix)
return matrix
}
// Equal the two metrics are equal
func (s *SparseVector) Equal(m Matrix) bool {
return Equal(context.Background(), s, m)
}
// NotEqual the two metrix are not equal
func (s *SparseVector) NotEqual(m Matrix) bool {
return NotEqual(context.Background(), s, m)
}
// Size of the vector
func (s *SparseVector) Size() int {
return s.l
}
// Values the number of non-zero elements in the Vector
func (s *SparseVector) Values() int {
return len(s.values)
}
// Clear removes all elements from a vector
func (s *SparseVector) Clear() {
s.values = make([]float32, 0)
s.indices = make([]int, 0)
}
// Enumerate iterates through all non-zero elements, order is not guaranteed
func (s *SparseVector) Enumerate() Enumerate {
return s.iterator()
}
func (s *SparseVector) iterator() *sparseVectorIterator {
i := &sparseVectorIterator{
matrix: s,
size: len(s.values),
last: 0,
}
return i
}
type sparseVectorIterator struct {
matrix *SparseVector
size int
last int
old int
}
// HasNext checks the iterator has any more values
func (s *sparseVectorIterator) HasNext() bool {
if s.last >= s.size {
return false
}
return true
}
func (s *sparseVectorIterator) next() {
s.old = s.last
s.last++
}
// Next moves the iterator and returns the row, column and value
func (s *sparseVectorIterator) Next() (int, int, float32) {
s.next()
return s.matrix.indices[s.old], 0, s.matrix.values[s.old]
}
// Map replace each element with the result of applying a function to its value
func (s *SparseVector) Map() Map {
t := s.iterator()
i := &sparseVectorMap{t}
return i
}
type sparseVectorMap struct {
*sparseVectorIterator
}
// HasNext checks the iterator has any more values
func (s *sparseVectorMap) HasNext() bool {
return s.sparseVectorIterator.HasNext()
}
// Map move the iterator and uses a higher order function to changes the elements current value
func (s *sparseVectorMap) Map(f func(int, int, float32) float32) {
s.next()
value := f(s.matrix.indices[s.old], 0, s.matrix.values[s.old])
if value != 0 {
s.matrix.values[s.old] = value
} else {
s.matrix.remove(s.old)
}
}
// Element of the mask for each tuple that exists in the matrix for which the value of the tuple cast to Boolean is true
func (s *SparseVector) Element(r, c int) bool {
return s.AtVec(r) > 0
} | f32/sparseVector.go | 0.800068 | 0.640917 | sparseVector.go | starcoder |
package template
import (
"fmt"
"strings"
)
// Data is the template data used to render the Moq template.
type Data struct {
PkgName string
Mocks []MockData
StubImpl bool
SyncPkg string
}
// MockData is the data used to generate a mock for some interface.
type MockData struct {
InterfaceName string
MockName string
Methods []MethodData
}
// MethodData is the data which represents a method on some interface.
type MethodData struct {
Name string
Params []ParamData
Returns []ParamData
}
// ArgList is the string representation of method parameters, ex:
// 's string, n int, foo bar.Baz'.
func (m MethodData) ArgList() string {
params := make([]string, len(m.Params))
for i, p := range m.Params {
params[i] = p.MethodArg()
}
return strings.Join(params, ", ")
}
// ArgCallList is the string representation of method call parameters,
// ex: 's, n, foo'. In case of a last variadic parameter, it will be of
// the format 's, n, foos...'
func (m MethodData) ArgCallList() string {
params := make([]string, len(m.Params))
for i, p := range m.Params {
params[i] = p.CallName()
}
return strings.Join(params, ", ")
}
// ReturnArgTypeList is the string representation of method return
// types, ex: 'bar.Baz', '(string, error)'.
func (m MethodData) ReturnArgTypeList() string {
params := make([]string, len(m.Returns))
for i, p := range m.Returns {
params[i] = p.TypeString()
}
if len(m.Returns) > 1 {
return fmt.Sprintf("(%s)", strings.Join(params, ", "))
}
return strings.Join(params, ", ")
}
// ReturnArgNameList is the string representation of values being
// returned from the method, ex: 'foo', 's, err'.
func (m MethodData) ReturnArgNameList() string {
params := make([]string, len(m.Returns))
for i, p := range m.Returns {
params[i] = p.Name
}
return strings.Join(params, ", ")
}
// ParamData is the data which represents a parameter to some method of
// an interface.
type ParamData struct {
Name string
Type string
Pointer bool
Variadic bool
}
// MethodArg is the representation of the parameter in the function
// signature, ex: 'name a.Type'.
func (p ParamData) MethodArg() string {
if p.Variadic {
return fmt.Sprintf("%s ...%s", p.Name, p.TypeString()[2:])
}
return fmt.Sprintf("%s %s", p.Name, p.TypeString())
}
// CallName returns the string representation of the parameter to be
// used for a method call. For a variadic paramter, it will be of the
// format 'foos...'.
func (p ParamData) CallName() string {
if p.Variadic {
return p.Name + "..."
}
return p.Name
}
// TypeString returns the variable type with the package qualifier in the
// format 'pkg.Type'.
func (p ParamData) TypeString() string {
result := ""
if p.Variadic {
result += "[]"
}
if p.Pointer {
result += "*"
}
result += p.Type
return result
} | internal/forked/github.com/matryer/moq/template/template_data.go | 0.655336 | 0.420719 | template_data.go | starcoder |
package tune
// Channels represents the available channels keyed by station
// and channel identifier.
var Channels = map[string]map[int]*Channel{
"classicalradio.com": map[int]*Channel{
373: {"Classical Relaxation", "http://listen.classicalradio.com/premium_high/classicalrelaxation.pls"},
372: {"Classical Piano Trios", "http://listen.classicalradio.com/premium_high/classicalpianotrios.pls"},
381: {"Operas", "http://listen.classicalradio.com/premium_high/operas.pls"},
427: {"Flute Works", "http://listen.classicalradio.com/premium_high/fluteworks.pls"},
425: {"Classical Guitar Works", "http://listen.classicalradio.com/premium_high/classicalguitarworks.pls"},
362: {"Bach", "http://listen.classicalradio.com/premium_high/bach.pls"},
429: {"Schubert", "http://listen.classicalradio.com/premium_high/schubert.pls"},
426: {"Gregorian Chant", "http://listen.classicalradio.com/premium_high/gregorianchant.pls"},
366: {"Brahms", "http://listen.classicalradio.com/premium_high/brahms.pls"},
369: {"Chopin", "http://listen.classicalradio.com/premium_high/chopin.pls"},
391: {"Solo Piano", "http://listen.classicalradio.com/premium_high/solopiano.pls"},
380: {"Mozart", "http://listen.classicalradio.com/premium_high/mozart.pls"},
364: {"Baroque Period", "http://listen.classicalradio.com/premium_high/baroqueperiod.pls"},
365: {"Beethoven", "http://listen.classicalradio.com/premium_high/beethoven.pls"},
370: {"Choral Works", "http://listen.classicalradio.com/premium_high/choralworks.pls"},
360: {"20th Century", "http://listen.classicalradio.com/premium_high/20thcentury.pls"},
361: {"21st Century", "http://listen.classicalradio.com/premium_high/21stcentury.pls"},
363: {"Ballets", "http://listen.classicalradio.com/premium_high/ballets.pls"},
367: {"Cello Works", "http://listen.classicalradio.com/premium_high/celloworks.pls"},
368: {"Chamber Works", "http://listen.classicalradio.com/premium_high/chamberworks.pls"},
371: {"Classical Period", "http://listen.classicalradio.com/premium_high/classicalperiod.pls"},
428: {"Opera Highlights", "http://listen.classicalradio.com/premium_high/operahighlights.pls"},
374: {"Concertos", "http://listen.classicalradio.com/premium_high/concertos.pls"},
383: {"Organ Works", "http://listen.classicalradio.com/premium_high/organworks.pls"},
375: {"Contemporary Period", "http://listen.classicalradio.com/premium_high/contemporaryperiod.pls"},
386: {"Easy Classical", "http://listen.classicalradio.com/premium_high/easyclassical.pls"},
376: {"Handel", "http://listen.classicalradio.com/premium_high/handel.pls"},
378: {"Haydn", "http://listen.classicalradio.com/premium_high/haydn.pls"},
377: {"Harpsichord Works", "http://listen.classicalradio.com/premium_high/harpsichordworks.pls"},
379: {"Medieval Period", "http://listen.classicalradio.com/premium_high/medievalperiod.pls"},
382: {"Orchestral Works", "http://listen.classicalradio.com/premium_high/orchestralworks.pls"},
384: {"Overtures", "http://listen.classicalradio.com/premium_high/overtures.pls"},
387: {"Renaissance Period", "http://listen.classicalradio.com/premium_high/renaissanceperiod.pls"},
388: {"Romantic Period", "http://listen.classicalradio.com/premium_high/romanticperiod.pls"},
385: {"Piano Works", "http://listen.classicalradio.com/premium_high/pianoworks.pls"},
389: {"Sacred Works", "http://listen.classicalradio.com/premium_high/sacredworks.pls"},
390: {"Solo Instruments", "http://listen.classicalradio.com/premium_high/soloinstruments.pls"},
392: {"Sonatas", "http://listen.classicalradio.com/premium_high/sonatas.pls"},
393: {"Songs & Lieder", "http://listen.classicalradio.com/premium_high/songsnlieders.pls"},
394: {"String Works", "http://listen.classicalradio.com/premium_high/stringworks.pls"},
395: {"Symphonies", "http://listen.classicalradio.com/premium_high/symphonies.pls"},
396: {"Tchaikovsky", "http://listen.classicalradio.com/premium_high/tchaikovsky.pls"},
397: {"Violin Works", "http://listen.classicalradio.com/premium_high/violinworks.pls"},
398: {"Vivaldi", "http://listen.classicalradio.com/premium_high/vivaldi.pls"},
399: {"Wind Works", "http://listen.classicalradio.com/premium_high/windworks.pls"},
},
"di.fm": map[int]*Channel{
424: {"Future Bass", "http://listen.di.fm/premium_high/futurebass.pls"},
2: {"Vocal Trance", "http://listen.di.fm/premium_high/vocaltrance.pls"},
400: {"Chill & Tropical House", "http://listen.di.fm/premium_high/chillntropicalhouse.pls"},
1: {"Trance", "http://listen.di.fm/premium_high/trance.pls"},
11: {"Lounge", "http://listen.di.fm/premium_high/lounge.pls"},
3: {"Chillout", "http://listen.di.fm/premium_high/chillout.pls"},
142: {"Vocal Chillout", "http://listen.di.fm/premium_high/vocalchillout.pls"},
224: {"ChillHop", "http://listen.di.fm/premium_high/chillhop.pls"},
275: {"Chillstep", "http://listen.di.fm/premium_high/chillstep.pls"},
351: {"Indie Dance", "http://listen.di.fm/premium_high/indiedance.pls"},
402: {"Melodic Progressive", "http://listen.di.fm/premium_high/melodicprogressive.pls"},
7: {"Progressive", "http://listen.di.fm/premium_high/progressive.pls"},
4: {"House", "http://listen.di.fm/premium_high/house.pls"},
210: {"Mainstage", "http://listen.di.fm/premium_high/mainstage.pls"},
59: {"Minimal", "http://listen.di.fm/premium_high/minimal.pls"},
5: {"Hard Dance", "http://listen.di.fm/premium_high/harddance.pls"},
6: {"EuroDance", "http://listen.di.fm/premium_high/eurodance.pls"},
278: {"Vocal Lounge", "http://listen.di.fm/premium_high/vocallounge.pls"},
215: {"UMF Radio", "http://listen.di.fm/premium_high/umfradio.pls"},
66: {"Tech House", "http://listen.di.fm/premium_high/techhouse.pls"},
293: {"Jungle", "http://listen.di.fm/premium_high/jungle.pls"},
292: {"Future Garage", "http://listen.di.fm/premium_high/futuregarage.pls"},
290: {"Bass & Jackin' House", "http://listen.di.fm/premium_high/bassnjackinhouse.pls"},
56: {"Electro House", "http://listen.di.fm/premium_high/electro.pls"},
209: {"Big Room House", "http://listen.di.fm/premium_high/bigroomhouse.pls"},
294: {"Nightcore", "http://listen.di.fm/premium_high/nightcore.pls"},
230: {"Trap", "http://listen.di.fm/premium_high/trap.pls"},
67: {"PsyChill", "http://listen.di.fm/premium_high/psychill.pls"},
8: {"Goa-Psy Trance", "http://listen.di.fm/premium_high/goapsy.pls"},
178: {"Progressive Psy", "http://listen.di.fm/premium_high/progressivepsy.pls"},
289: {"Bassline", "http://listen.di.fm/premium_high/bassline.pls"},
9: {"Hardcore", "http://listen.di.fm/premium_high/hardcore.pls"},
180: {"Downtempo Lounge", "http://listen.di.fm/premium_high/downtempolounge.pls"},
10: {"DJ Mixes", "http://listen.di.fm/premium_high/djmixes.pls"},
213: {"Russian Club Hits", "http://listen.di.fm/premium_high/russianclubhits.pls"},
403: {"Atmospheric Breaks", "http://listen.di.fm/premium_high/atmosphericbreaks.pls"},
12: {"Ambient", "http://listen.di.fm/premium_high/ambient.pls"},
285: {"Psybient", "http://listen.di.fm/premium_high/psybient.pls"},
13: {"Drum and Bass", "http://listen.di.fm/premium_high/drumandbass.pls"},
295: {"Nu Disco", "http://listen.di.fm/premium_high/nudisco.pls"},
198: {"Glitch Hop", "http://listen.di.fm/premium_high/glitchhop.pls"},
349: {"Jazz House", "http://listen.di.fm/premium_high/jazzhouse.pls"},
325: {"Big Beat", "http://listen.di.fm/premium_high/bigbeat.pls"},
348: {"Dub", "http://listen.di.fm/premium_high/dub.pls"},
208: {"EcLectronica", "http://listen.di.fm/premium_high/eclectronica.pls"},
14: {"Oldschool Techno & Trance ", "http://listen.di.fm/premium_high/classictechno.pls"},
175: {"Epic Trance", "http://listen.di.fm/premium_high/epictrance.pls"},
353: {"Detroit House & Techno", "http://listen.di.fm/premium_high/detroithousentechno.pls"},
324: {"00s Club Hits", "http://listen.di.fm/premium_high/00sclubhits.pls"},
355: {"Dub Techno", "http://listen.di.fm/premium_high/dubtechno.pls"},
15: {"Breaks", "http://listen.di.fm/premium_high/breaks.pls"},
404: {"Indie Beats", "http://listen.di.fm/premium_high/indiebeats.pls"},
16: {"Gabber", "http://listen.di.fm/premium_high/gabber.pls"},
347: {"Electronics", "http://listen.di.fm/premium_high/electronics.pls"},
352: {"Liquid Trap", "http://listen.di.fm/premium_high/liquidtrap.pls"},
326: {"EBM", "http://listen.di.fm/premium_high/ebm.pls"},
276: {"Hard Techno", "http://listen.di.fm/premium_high/hardtechno.pls"},
291: {"Drumstep", "http://listen.di.fm/premium_high/drumstep.pls"},
36: {"Techno", "http://listen.di.fm/premium_high/techno.pls"},
327: {"Electro Swing", "http://listen.di.fm/premium_high/electroswing.pls"},
280: {"Electronic Pioneers", "http://listen.di.fm/premium_high/electronicpioneers.pls"},
47: {"Soulful House", "http://listen.di.fm/premium_high/soulfulhouse.pls"},
174: {"Deep House", "http://listen.di.fm/premium_high/deephouse.pls"},
182: {"Deep Tech", "http://listen.di.fm/premium_high/deeptech.pls"},
57: {"Tribal House", "http://listen.di.fm/premium_high/tribalhouse.pls"},
58: {"Funky House", "http://listen.di.fm/premium_high/funkyhouse.pls"},
137: {"Deep Nu-Disco", "http://listen.di.fm/premium_high/deepnudisco.pls"},
288: {"Underground Techno", "http://listen.di.fm/premium_high/undergroundtechno.pls"},
296: {"Oldschool Rave", "http://listen.di.fm/premium_high/oldschoolrave.pls"},
104: {"Oldschool House", "http://listen.di.fm/premium_high/oldschoolhouse.pls"},
64: {"Space Dreams", "http://listen.di.fm/premium_high/spacemusic.pls"},
60: {"Hardstyle", "http://listen.di.fm/premium_high/hardstyle.pls"},
68: {"Chillout Dreams", "http://listen.di.fm/premium_high/chilloutdreams.pls"},
105: {"Liquid DnB", "http://listen.di.fm/premium_high/liquiddnb.pls"},
181: {"Dark DnB", "http://listen.di.fm/premium_high/darkdnb.pls"},
69: {"Classic EuroDance", "http://listen.di.fm/premium_high/classiceurodance.pls"},
346: {"Dark PsyTrance", "http://listen.di.fm/premium_high/darkpsytrance.pls"},
176: {"Hands Up", "http://listen.di.fm/premium_high/handsup.pls"},
70: {"Club Sounds", "http://listen.di.fm/premium_high/club.pls"},
90: {"Classic Trance", "http://listen.di.fm/premium_high/classictrance.pls"},
125: {"Classic Vocal Trance", "http://listen.di.fm/premium_high/classicvocaltrance.pls"},
177: {"Club Dubstep", "http://listen.di.fm/premium_high/clubdubstep.pls"},
91: {"Dubstep", "http://listen.di.fm/premium_high/dubstep.pls"},
184: {"Liquid Dubstep", "http://listen.di.fm/premium_high/liquiddubstep.pls"},
286: {"Electropop", "http://listen.di.fm/premium_high/electropop.pls"},
92: {"Disco House", "http://listen.di.fm/premium_high/discohouse.pls"},
183: {"Classic EuroDisco", "http://listen.di.fm/premium_high/classiceurodisco.pls"},
350: {"IDM", "http://listen.di.fm/premium_high/idm.pls"},
53: {"Future Synthpop", "http://listen.di.fm/premium_high/futuresynthpop.pls"},
117: {"Latin House", "http://listen.di.fm/premium_high/latinhouse.pls"},
124: {"Oldschool Acid", "http://listen.di.fm/premium_high/oldschoolacid.pls"},
},
"jazzradio.com": map[int]*Channel{
413: {"Modern Big Band", "http://listen.jazzradio.com/premium_high/modernbigband.pls"},
328: {"Jazz Ballads", "http://listen.jazzradio.com/premium_high/jazzballads.pls"},
115: {"Current Jazz", "http://listen.jazzradio.com/premium_high/currentjazz.pls"},
84: {"Smooth Jazz", "http://listen.jazzradio.com/premium_high/smoothjazz.pls"},
116: {"Smooth Jazz 24'7", "http://listen.jazzradio.com/premium_high/smoothjazz247.pls"},
112: {"Paris Café", "http://listen.jazzradio.com/premium_high/pariscafe.pls"},
103: {"Trumpet Jazz", "http://listen.jazzradio.com/premium_high/trumpetjazz.pls"},
199: {"Mellow Smooth Jazz", "http://listen.jazzradio.com/premium_high/mellowsmoothjazz.pls"},
412: {"Mellow Piano Jazz", "http://listen.jazzradio.com/premium_high/mellowpianojazz.pls"},
82: {"Mellow Jazz", "http://listen.jazzradio.com/premium_high/mellowjazz.pls"},
102: {"Saxophone Jazz", "http://listen.jazzradio.com/premium_high/saxophonejazz.pls"},
78: {"Cool Jazz", "http://listen.jazzradio.com/premium_high/cooljazz.pls"},
75: {"Classic Jazz", "http://listen.jazzradio.com/premium_high/classicjazz.pls"},
113: {"Gypsy Jazz", "http://listen.jazzradio.com/premium_high/gypsyjazz.pls"},
301: {"Dave Koz & Friends", "http://listen.jazzradio.com/premium_high/davekoz.pls"},
170: {"Smooth Lounge", "http://listen.jazzradio.com/premium_high/smoothlounge.pls"},
73: {"Straight-Ahead", "http://listen.jazzradio.com/premium_high/straightahead.pls"},
83: {"Piano Jazz", "http://listen.jazzradio.com/premium_high/pianojazz.pls"},
134: {"Bass Jazz", "http://listen.jazzradio.com/premium_high/bassjazz.pls"},
81: {"Guitar Jazz", "http://listen.jazzradio.com/premium_high/guitarjazz.pls"},
300: {"Flamenco Jazz", "http://listen.jazzradio.com/premium_high/flamencojazz.pls"},
135: {"Vibraphone Jazz", "http://listen.jazzradio.com/premium_high/vibraphonejazz.pls"},
74: {"Bebop", "http://listen.jazzradio.com/premium_high/bebop.pls"},
77: {"Hard Bop", "http://listen.jazzradio.com/premium_high/hardbop.pls"},
97: {"<NAME>", "http://listen.jazzradio.com/premium_high/pianotrios.pls"},
85: {"Bossa Nova", "http://listen.jazzradio.com/premium_high/bossanova.pls"},
196: {"Smooth Bossa Nova", "http://listen.jazzradio.com/premium_high/smoothbossanova.pls"},
87: {"Fusion Lounge", "http://listen.jazzradio.com/premium_high/fusionlounge.pls"},
79: {"Contemporary Vocals", "http://listen.jazzradio.com/premium_high/vocaljazz.pls"},
99: {"Smooth Vocals", "http://listen.jazzradio.com/premium_high/smoothvocals.pls"},
96: {"Vocal Legends", "http://listen.jazzradio.com/premium_high/vocallegends.pls"},
86: {"Smooth Uptempo", "http://listen.jazzradio.com/premium_high/smoothuptempo.pls"},
76: {"Swing & Big Band", "http://listen.jazzradio.com/premium_high/swingnbigband.pls"},
80: {"Latin Jazz", "http://listen.jazzradio.com/premium_high/latinjazz.pls"},
98: {"Timeless Classics", "http://listen.jazzradio.com/premium_high/timelessclassics.pls"},
95: {"Sinatra Style", "http://listen.jazzradio.com/premium_high/sinatrastyle.pls"},
200: {"Blues Rock", "http://listen.jazzradio.com/premium_high/bluesrock.pls"},
89: {"Blues", "http://listen.jazzradio.com/premium_high/blues.pls"},
},
"radiotunes.com": map[int]*Channel{
61: {"Love Music", "http://listen.radiotunes.com/premium_high/lovemusic.pls"},
21: {"Top Hits", "http://listen.radiotunes.com/premium_high/tophits.pls"},
20: {"Smooth Jazz", "http://listen.radiotunes.com/premium_high/smoothjazz.pls"},
22: {"80s Hits", "http://listen.radiotunes.com/premium_high/the80s.pls"},
52: {"Solo Piano", "http://listen.radiotunes.com/premium_high/solopiano.pls"},
18: {"New Age", "http://listen.radiotunes.com/premium_high/newage.pls"},
111: {"Vocal Smooth Jazz", "http://listen.radiotunes.com/premium_high/vocalsmoothjazz.pls"},
120: {"Relaxation", "http://listen.radiotunes.com/premium_high/relaxation.pls"},
118: {"Smooth Jazz 24'7", "http://listen.radiotunes.com/premium_high/smoothjazz247.pls"},
46: {"Classic Rock", "http://listen.radiotunes.com/premium_high/classicrock.pls"},
132: {"Soft Rock", "http://listen.radiotunes.com/premium_high/softrock.pls"},
173: {"Smooth Lounge", "http://listen.radiotunes.com/premium_high/smoothlounge.pls"},
197: {"Café de Paris", "http://listen.radiotunes.com/premium_high/cafedeparis.pls"},
422: {"Chill & Tropical House", "http://listen.radiotunes.com/premium_high/chillntropicalhouse.pls"},
24: {"70s Hits", "http://listen.radiotunes.com/premium_high/hit70s.pls"},
297: {"Mellow Smooth Jazz", "http://listen.radiotunes.com/premium_high/mellowsmoothjazz.pls"},
39: {"Oldies", "http://listen.radiotunes.com/premium_high/oldies.pls"},
109: {"Dreamscapes", "http://listen.radiotunes.com/premium_high/dreamscapes.pls"},
17: {"Mostly Classical", "http://listen.radiotunes.com/premium_high/classical.pls"},
141: {"80s Rock Hits", "http://listen.radiotunes.com/premium_high/80srock.pls"},
334: {"Lounge", "http://listen.radiotunes.com/premium_high/lounge.pls"},
228: {"80s Dance", "http://listen.radiotunes.com/premium_high/80sdance.pls"},
41: {"Uptempo Smooth Jazz", "http://listen.radiotunes.com/premium_high/uptemposmoothjazz.pls"},
358: {"00s Hits", "http://listen.radiotunes.com/premium_high/hit00s.pls"},
188: {"Mellow Jazz", "http://listen.radiotunes.com/premium_high/mellowjazz.pls"},
48: {"DaTempo Lounge", "http://listen.radiotunes.com/premium_high/datempolounge.pls"},
121: {"Vocal New Age", "http://listen.radiotunes.com/premium_high/vocalnewage.pls"},
49: {"Alternative Rock", "http://listen.radiotunes.com/premium_high/altrock.pls"},
407: {"Smooth Beats", "http://listen.radiotunes.com/premium_high/smoothbeats.pls"},
423: {"Indie Dance", "http://listen.radiotunes.com/premium_high/indiedance.pls"},
23: {"Roots Reggae", "http://listen.radiotunes.com/premium_high/rootsreggae.pls"},
71: {"Dance Hits", "http://listen.radiotunes.com/premium_high/dancehits.pls"},
25: {"Country", "http://listen.radiotunes.com/premium_high/country.pls"},
119: {"Nature", "http://listen.radiotunes.com/premium_high/nature.pls"},
54: {"Bossa Nova", "http://listen.radiotunes.com/premium_high/bossanova.pls"},
299: {"00s R&B", "http://listen.radiotunes.com/premium_high/00srnb.pls"},
303: {"Meditation", "http://listen.radiotunes.com/premium_high/meditation.pls"},
186: {"90s Hits", "http://listen.radiotunes.com/premium_high/hit90s.pls"},
229: {"90s R&B", "http://listen.radiotunes.com/premium_high/90srnb.pls"},
214: {"60s Hits", "http://listen.radiotunes.com/premium_high/hit60s.pls"},
19: {"Classical Guitar", "http://listen.radiotunes.com/premium_high/guitar.pls"},
273: {"Old School Funk & Soul", "http://listen.radiotunes.com/premium_high/oldschoolfunknsoul.pls"},
128: {"Modern Blues", "http://listen.radiotunes.com/premium_high/modernblues.pls"},
27: {"Salsa", "http://listen.radiotunes.com/premium_high/salsa.pls"},
420: {"Romántica Latina", "http://listen.radiotunes.com/premium_high/romanticalatina.pls"},
187: {"Smooth Bossa Nova", "http://listen.radiotunes.com/premium_high/smoothbossanova.pls"},
298: {"D<NAME> & Friends", "http://listen.radiotunes.com/premium_high/davekoz.pls"},
417: {"Latin Pop Hits", "http://listen.radiotunes.com/premium_high/latinpophits.pls"},
406: {"Sleep Relaxation", "http://listen.radiotunes.com/premium_high/sleeprelaxation.pls"},
55: {"Piano Jazz", "http://listen.radiotunes.com/premium_high/pianojazz.pls"},
227: {"60s Rock", "http://listen.radiotunes.com/premium_high/60srock.pls"},
414: {"Bolero", "http://listen.radiotunes.com/premium_high/bolero.pls"},
50: {"Movie Soundtracks", "http://listen.radiotunes.com/premium_high/soundtracks.pls"},
108: {"Romantica", "http://listen.radiotunes.com/premium_high/romantica.pls"},
335: {"Vocal Chillout", "http://listen.radiotunes.com/premium_high/vocalchillout.pls"},
43: {"World", "http://listen.radiotunes.com/premium_high/world.pls"},
330: {"Chillout", "http://listen.radiotunes.com/premium_high/chillout.pls"},
336: {"Vocal Lounge", "http://listen.radiotunes.com/premium_high/vocallounge.pls"},
343: {"Classical Period", "http://listen.radiotunes.com/premium_high/classicalperiod.pls"},
331: {"Downtempo Lounge", "http://listen.radiotunes.com/premium_high/downtempolounge.pls"},
305: {"Urban Pop Hits", "http://listen.radiotunes.com/premium_high/urbanpophits.pls"},
93: {"American Songbook", "http://listen.radiotunes.com/premium_high/americansongbook.pls"},
51: {"Contemporary Christian", "http://listen.radiotunes.com/premium_high/christian.pls"},
354: {"80s Alt & New Wave", "http://listen.radiotunes.com/premium_high/80saltnnewwave.pls"},
129: {"Metal", "http://listen.radiotunes.com/premium_high/metal.pls"},
306: {"Relaxing Ambient Piano", "http://listen.radiotunes.com/premium_high/relaxingambientpiano.pls"},
139: {"Pop Rock", "http://listen.radiotunes.com/premium_high/poprock.pls"},
356: {"Blues Rock", "http://listen.radiotunes.com/premium_high/bluesrock.pls"},
130: {"Hard Rock", "http://listen.radiotunes.com/premium_high/hardrock.pls"},
322: {"Slow R&B", "http://listen.radiotunes.com/premium_high/slowjams.pls"},
342: {"Baroque Period", "http://listen.radiotunes.com/premium_high/baroque.pls"},
344: {"Mozart", "http://listen.radiotunes.com/premium_high/mozart.pls"},
26: {"Jazz Classics", "http://listen.radiotunes.com/premium_high/jazzclassics.pls"},
72: {"Bebop Jazz", "http://listen.radiotunes.com/premium_high/bebop.pls"},
42: {"Urban Hits", "http://listen.radiotunes.com/premium_high/urbanjamz.pls"},
38: {"Classic Hip-Hop", "http://listen.radiotunes.com/premium_high/classicrap.pls"},
140: {"Club Bollywood", "http://listen.radiotunes.com/premium_high/clubbollywood.pls"},
415: {"Cuban Lounge", "http://listen.radiotunes.com/premium_high/cubanlounge.pls"},
94: {"Classical Piano Trios", "http://listen.radiotunes.com/premium_high/classicalpianotrios.pls"},
329: {"Ambient", "http://listen.radiotunes.com/premium_high/ambient.pls"},
307: {"Disco Party", "http://listen.radiotunes.com/premium_high/discoparty.pls"},
37: {"Indie Rock", "http://listen.radiotunes.com/premium_high/indierock.pls"},
131: {"Modern Rock", "http://listen.radiotunes.com/premium_high/modernrock.pls"},
418: {"Latin Rock", "http://listen.radiotunes.com/premium_high/latinrock.pls"},
332: {"EDM Fest", "http://listen.radiotunes.com/premium_high/edmfest.pls"},
333: {"EuroDance", "http://listen.radiotunes.com/premium_high/eurodance.pls"},
202: {"Classic Motown", "http://listen.radiotunes.com/premium_high/classicmotown.pls"},
345: {"Romantic Period", "http://listen.radiotunes.com/premium_high/romantic.pls"},
110: {"Jpop", "http://listen.radiotunes.com/premium_high/jpop.pls"},
304: {"Reggaeton", "http://listen.radiotunes.com/premium_high/reggaeton.pls"},
416: {"Flamenco", "http://listen.radiotunes.com/premium_high/flamenco.pls"},
421: {"Tango", "http://listen.radiotunes.com/premium_high/tango.pls"},
419: {"Norteña", "http://listen.radiotunes.com/premium_high/nortena.pls"},
},
"rockradio.com": map[int]*Channel{
408: {"00s Rock", "http://listen.rockradio.com/premium_high/00srock.pls"},
409: {"70s Rock", "http://listen.rockradio.com/premium_high/70srock.pls"},
410: {"Alternative Rock", "http://listen.rockradio.com/premium_high/alternativerock.pls"},
143: {"Classic Rock", "http://listen.rockradio.com/premium_high/classicrock.pls"},
201: {"Blues Rock", "http://listen.rockradio.com/premium_high/bluesrock.pls"},
189: {"Classic Metal", "http://listen.rockradio.com/premium_high/classicmetal.pls"},
154: {"80s Rock", "http://listen.rockradio.com/premium_high/80srock.pls"},
226: {"60s Rock", "http://listen.rockradio.com/premium_high/60srock.pls"},
164: {"Symphonic Metal", "http://listen.rockradio.com/premium_high/symphonicmetal.pls"},
150: {"Hard Rock", "http://listen.rockradio.com/premium_high/hardrock.pls"},
152: {"Soft Rock", "http://listen.rockradio.com/premium_high/softrock.pls"},
272: {"Industrial", "http://listen.rockradio.com/premium_high/industrial.pls"},
166: {"90s Rock", "http://listen.rockradio.com/premium_high/90srock.pls"},
160: {"Modern Rock", "http://listen.rockradio.com/premium_high/modernrock.pls"},
192: {"Thrash Metal", "http://listen.rockradio.com/premium_high/thrashmetal.pls"},
169: {"Hair Bands", "http://listen.rockradio.com/premium_high/hairbands.pls"},
195: {"90s Alternative", "http://listen.rockradio.com/premium_high/alternative90s.pls"},
167: {"Classic Hard Rock", "http://listen.rockradio.com/premium_high/classichardrock.pls"},
153: {"Pop Rock", "http://listen.rockradio.com/premium_high/poprock.pls"},
194: {"80s Alternative", "http://listen.rockradio.com/premium_high/alternative80s.pls"},
191: {"Progressive Rock", "http://listen.rockradio.com/premium_high/progressiverock.pls"},
193: {"Rock Ballads", "http://listen.rockradio.com/premium_high/rockballads.pls"},
161: {"Punk Rock", "http://listen.rockradio.com/premium_high/punkrock.pls"},
148: {"Metal", "http://listen.rockradio.com/premium_high/metal.pls"},
149: {"Heavy Metal", "http://listen.rockradio.com/premium_high/heavymetal.pls"},
222: {"Melodic Death Metal", "http://listen.rockradio.com/premium_high/melodicdeathmetal.pls"},
287: {"Grunge", "http://listen.rockradio.com/premium_high/grunge.pls"},
155: {"Death Metal", "http://listen.rockradio.com/premium_high/deathmetal.pls"},
163: {"Power Metal", "http://listen.rockradio.com/premium_high/powermetal.pls"},
165: {"Nu Metal", "http://listen.rockradio.com/premium_high/numetal.pls"},
158: {"Metalcore", "http://listen.rockradio.com/premium_high/metalcore.pls"},
144: {"Indie Rock", "http://listen.rockradio.com/premium_high/indierock.pls"},
159: {"Screamo-Emo", "http://listen.rockradio.com/premium_high/screamoemo.pls"},
157: {"Black Metal", "http://listen.rockradio.com/premium_high/blackmetal.pls"},
},
} | channels.go | 0.541651 | 0.498352 | channels.go | starcoder |
package agg
import (
"fmt"
"math"
"github.com/emer/etable/etable"
)
// QuantilesIdx returns the given quantile(s) of non-Null, non-NaN elements in given
// IdxView indexed view of an etable.Table, for given column index.
// Column must be a 1d Column -- returns nil for n-dimensional columns.
// qs are 0-1 values, 0 = min, 1 = max, .5 = median, etc. Uses linear interpolation.
// Because this requires a sort, it is more efficient to get as many quantiles
// as needed in one pass.
func QuantilesIdx(ix *etable.IdxView, colIdx int, qs []float64) []float64 {
nq := len(qs)
if nq == 0 {
return nil
}
col := ix.Table.Cols[colIdx]
if col.NumDims() > 1 { // only valid for 1D
return nil
}
rvs := make([]float64, nq)
six := ix.Clone() // leave original indexes intact
six.Filter(func(et *etable.Table, row int) bool { // get rid of nulls in this column
if col.IsNull1D(row) {
return false
}
return true
})
six.SortCol(colIdx, true)
sz := len(six.Idxs) - 1 // length of our own index list
fsz := float64(sz)
for i, q := range qs {
val := 0.0
qi := q * fsz
lwi := math.Floor(qi)
lwii := int(lwi)
if lwii >= sz {
val = col.FloatVal1D(six.Idxs[sz])
} else if lwii < 0 {
val = col.FloatVal1D(six.Idxs[0])
} else {
phi := qi - lwi
lwv := col.FloatVal1D(six.Idxs[lwii])
hiv := col.FloatVal1D(six.Idxs[lwii+1])
val = (1-phi)*lwv + phi*hiv
}
rvs[i] = val
}
return rvs
}
// Quantiles returns the given quantile(s) of non-Null, non-NaN elements in given
// IdxView indexed view of an etable.Table, for given column name.
// If name not found, nil is returned -- use Try version for error message.
// Column must be a 1d Column -- returns nil for n-dimensional columns.
// qs are 0-1 values, 0 = min, 1 = max, .5 = median, etc. Uses linear interpolation.
// Because this requires a sort, it is more efficient to get as many quantiles
// as needed in one pass.
func Quantiles(ix *etable.IdxView, colNm string, qs []float64) []float64 {
colIdx := ix.Table.ColIdx(colNm)
if colIdx == -1 {
return nil
}
return QuantilesIdx(ix, colIdx, qs)
}
// QuantilesTry returns the given quantile(s) of non-Null, non-NaN elements in given
// IdxView indexed view of an etable.Table, for given column name
// If name not found, error message is returned.
// Column must be a 1d Column -- returns nil for n-dimensional columns.
// qs are 0-1 values, 0 = min, 1 = max, .5 = median, etc. Uses linear interpolation.
// Because this requires a sort, it is more efficient to get as many quantiles
// as needed in one pass.
func QuantilesTry(ix *etable.IdxView, colNm string, qs []float64) ([]float64, error) {
colIdx, err := ix.Table.ColIdxTry(colNm)
if err != nil {
return nil, err
}
rv := QuantilesIdx(ix, colIdx, qs)
if rv == nil {
return nil, fmt.Errorf("etable agg.QuantilesTry: either qs: %v empty or column: %v not 1D", qs, colNm)
}
return rv, nil
} | agg/quantiles.go | 0.723895 | 0.423339 | quantiles.go | starcoder |
package proc
import (
"fmt"
"math"
"github.com/akualab/dsp"
narray "github.com/akualab/narray/na64"
)
const defaultBufSize = 1000
// Value is an multidimensional array that satisfies the framer interface.
type Value *narray.NArray
// Scale returns a scaled vector.
func Scale(alpha float64) dsp.Processer {
return dsp.NewProc(defaultBufSize, func(idx int, in ...dsp.Processer) (dsp.Value, error) {
vec, err := dsp.Processers(in).Get(idx)
if err != nil {
return nil, err
}
return narray.Scale(nil, vec.(*narray.NArray), alpha), nil
})
}
// AddScaled adds frames from all inputs and scales the added values.
// Will panic if input frame sizes don't match.
func AddScaled(size int, alpha float64) dsp.Processer {
return dsp.NewProc(defaultBufSize, func(idx int, in ...dsp.Processer) (dsp.Value, error) {
numInputs := len(in)
v := narray.New(size)
for i := 0; i < numInputs; i++ {
vec, err := in[i].(dsp.Framer).Get(idx)
if err != nil {
return nil, err
}
narray.Add(v, v, vec.(*narray.NArray))
}
narray.Scale(v, v, alpha)
return v, nil
})
}
// Sub subtracts in1 from in0. The inputs can be of type Framer of OneValuer.
// (The method uses reflection to get the type. For higher performance, implement a custom processor.)
// Will panic if input frame sizes don't match.
func Sub() dsp.Processer {
return dsp.NewProc(defaultBufSize, func(idx int, in ...dsp.Processer) (dsp.Value, error) {
if len(in) != 2 {
return nil, fmt.Errorf("proc Sub needs 2 inputs got %d", len(in))
}
vec0, e0 := dsp.Get(in[0], idx)
if e0 != nil {
return nil, e0
}
vec1, e1 := dsp.Get(in[1], idx)
if e1 != nil {
return nil, e1
}
return narray.Sub(nil, vec0.(*narray.NArray), vec1.(*narray.NArray)), nil
})
}
// Join stacks multiple input vectors into a single vector. Output vector size equals sum of input vector sizes.
// Blocks until all input vectors are available.
func Join() dsp.Processer {
return dsp.NewProc(defaultBufSize, func(idx int, in ...dsp.Processer) (dsp.Value, error) {
numInputs := len(in)
framers, err := dsp.Processers(in).CheckInputs(numInputs)
if err != nil {
return nil, err
}
v := []float64{}
for i := 0; i < numInputs; i++ {
vec, err := framers[i].Get(idx)
if err != nil {
return nil, err
}
v = append(v, vec.(*narray.NArray).Data...)
}
na := narray.NewArray(v, len(v))
return na, nil
})
}
// SpectralEnergy computes the real FFT energy of the input frame.
// FFT size is 2^(logSize+1) and the size of the output vector is 2^logSize.
// See dsp.RealFT and dsp.DFTEnergy for details.
func SpectralEnergy(logSize int) dsp.Processer {
fs := 1 << uint(logSize) // output frame size
dftSize := 2 * fs
return dsp.NewProc(defaultBufSize, func(idx int, in ...dsp.Processer) (dsp.Value, error) {
dft := make([]float64, dftSize, dftSize) // TODO: do not allocate every time. use slice pool?
vec, err := dsp.Processers(in).Get(idx)
if err != nil {
return nil, err
}
copy(dft, vec.(*narray.NArray).Data) // zero padded
RealFT(dft, dftSize, true)
egy := DFTEnergy(dft)
return narray.NewArray(egy, len(egy)), nil
})
}
// Filterbank computes filterbank energies using the provided indices and coefficients.
func Filterbank(indices []int, coeff [][]float64) dsp.Processer {
nf := len(indices) // num filterbanks
return dsp.NewProc(defaultBufSize, func(idx int, in ...dsp.Processer) (dsp.Value, error) {
vec, err := dsp.Processers(in).Get(idx)
if err != nil {
return nil, err
}
fb := make([]float64, nf, nf)
for i := 0; i < nf; i++ {
for k := 0; k < len(coeff[i]); k++ {
fb[i] += coeff[i][k] * vec.(*narray.NArray).Data[indices[i]+k]
}
}
return narray.NewArray(fb, len(fb)), nil
})
}
// Log returns the natural logarithm of the input.
func Log() dsp.Processer {
return dsp.NewProc(defaultBufSize, func(idx int, in ...dsp.Processer) (dsp.Value, error) {
vec, err := dsp.Processers(in).Get(idx)
if err != nil {
return nil, err
}
return narray.Log(nil, vec.(*narray.NArray)), nil
})
}
// Sum returns the sum of the elements of the input frame.
func Sum() dsp.Processer {
return dsp.NewProc(defaultBufSize, func(idx int, in ...dsp.Processer) (dsp.Value, error) {
vec, err := dsp.Processers(in).Get(idx)
if err != nil {
return nil, err
}
sum := narray.New(1)
v := vec.(*narray.NArray)
sum.Set(v.Sum(), 0)
return sum, nil
})
}
/*
MaxNorm returns a norm value as follows:
define: y[n] = norm[n-1] * alpha where alpha < 1
define: norm(v) as sqrt(v . v) where "." is the dot product.
max[n] = max(y[n], norm(x[n])
The max value is computed in the range {0...idx}
*/
func MaxNorm(bufSize int, alpha float64) dsp.Processer {
return dsp.NewProc(bufSize, func(idx int, in ...dsp.Processer) (dsp.Value, error) {
max := 0.0
norm := 0.0
for i := 0; i <= idx; i++ {
y := norm * alpha
vec, err := dsp.Processers(in).Get(idx)
if err != nil {
return nil, err
}
na := vec.(*narray.NArray)
norm = math.Sqrt(narray.Dot(na, na))
max = math.Max(y, norm)
}
res := narray.New(1)
res.Set(max, 0)
return res, nil
})
}
// DCT returns the Discrete Cosine Transform of the input vector.
func DCT(inSize, outSize int) dsp.Processer {
dct := GenerateDCT(outSize+1, inSize)
return dsp.NewProc(defaultBufSize, func(idx int, in ...dsp.Processer) (dsp.Value, error) {
input, err := dsp.Processers(in).Get(idx)
if err != nil {
return nil, err
}
size := input.(*narray.NArray).Shape[0]
if inSize != size {
return nil, fmt.Errorf("mismatch in size [%d] and input frame size [%d]", inSize, size)
}
v := make([]float64, outSize, outSize)
for i := 1; i <= outSize; i++ {
for j := 0; j < inSize; j++ {
v[i-1] += input.(*narray.NArray).Data[j] * dct[i][j]
}
}
return narray.NewArray(v, len(v)), nil
})
}
/*
MAProc computes the average for the last M samples.
for i >= M:
i
AVG[i] = 1/M * sum X[j]
j=i-M+1
for 0 < i < M
i
AVG[i] = 1/(i+1) * sum X[j]
j=0
Where AVG is the output vector and X is the input vector.
Will panic if output size is different from input size.
If param avg in not nil, it will be used as the initial avg
for i < M.
*/
type MAProc struct {
dim, bufSize int
winSize int
*dsp.Proc
}
// NewMAProc creates a new MA processor.
func NewMAProc(dim, winSize, bufSize int) *MAProc {
ma := &MAProc{
dim: dim,
bufSize: bufSize,
winSize: winSize,
Proc: dsp.NewProc(bufSize, nil),
}
return ma
}
// Get implements the dsp.dsp.Processer interface.
func (ma *MAProc) Get(idx int) (dsp.Value, error) {
val, ok := ma.GetCache(idx)
if ok {
return val, nil
}
c := 1.0 / float64(ma.winSize)
start := idx - ma.winSize + 1
if idx < ma.winSize {
c = 1.0 / float64(idx+1)
start = 0
}
sum := narray.New(ma.dim)
// TODO: no need to add every time, use a circular buffer.
for j := start; j <= idx; j++ {
v, e := ma.Framer(0).Get(j)
if e != nil {
return nil, e
}
narray.Add(sum, sum, v.(*narray.NArray))
}
narray.Scale(sum, sum, c)
ma.SetCache(idx, sum)
return sum, nil
}
/*
DiffProc computes a weighted difference between samples as follows:
for delta < i < N-delta-1:
delta-1
diff[i] = sum c_j * { x[i+j+1] - x[i-j-1] }
j=0
where x is the input data stream, i is the frame index. and N
is the number of frames. For other frame indices replace delta with:
for i <= delta : delta' = i AND for i >= N-delta-1: delta' = N-1-i
Param "dim" must match the size of the input vectors.
Param "coeff" is the slice of coefficients.
*/
type DiffProc struct {
dim int
delta int
buf []dsp.Value
coeff []float64
cacheSize int
*dsp.Proc
}
// NewDiffProc returns a new diff processor.
func NewDiffProc(dim, bufSize int, coeff []float64) *DiffProc {
delta := len(coeff)
dp := &DiffProc{
delta: delta,
dim: dim,
coeff: coeff,
Proc: dsp.NewProc(bufSize, nil),
}
return dp
}
// Get implements the dsp.dsp.Processer interface.
func (dp *DiffProc) Get(idx int) (dsp.Value, error) {
if idx < 0 {
return nil, dsp.ErrOOB
}
val, ok := dp.GetCache(idx)
if ok {
return val, nil
}
res := narray.New(dp.dim)
for j := 0; j < dp.delta; j++ {
plus, ep := dp.Framer(0).Get(idx + j + 1)
if ep != nil {
return nil, ep
}
minus, em := dp.Framer(0).Get(idx - j - 1)
if em == dsp.ErrOOB {
// Repeat next frame.
v, em := dp.Get(idx + 1)
if em != nil {
return nil, em
}
res = v.(*narray.NArray)
break
}
if em != nil {
return nil, em
}
narray.AddScaled(res, plus.(*narray.NArray), dp.coeff[j])
narray.AddScaled(res, minus.(*narray.NArray), -dp.coeff[j])
}
dp.SetCache(idx, res)
return res, nil
}
// MaxXCorrIndex returns the lag that maximizes the cross-correlation between two inputs.
// The param lagLimit is the highest lag value to be explored.
// Input vectors may have different lengths.
// xcor[i] = x[n] * y[n-i]
// Returns the value of i that maximizes xcorr[i] and the max correlation value in a two-dimensional vector.
// value[0]=lag, value[1]=xcorr
func MaxXCorrIndex(lagLimit int) dsp.Processer {
return dsp.NewProc(defaultBufSize, func(idx int, in ...dsp.Processer) (dsp.Value, error) {
if len(in) != 2 {
return nil, fmt.Errorf("proc Corr needs 2 inputs got %d", len(in))
}
if idx < 0 {
return nil, fmt.Errorf("got negative index: %d", idx)
}
vec0, e0 := in[0].(dsp.Framer).Get(idx)
if e0 != nil {
return nil, e0
}
vec1, e1 := in[1].(dsp.Framer).Get(idx)
if e1 != nil {
return nil, e1
}
maxLag := 0
maxCorr := -math.MaxFloat64
n0 := len(vec0.(*narray.NArray).Data)
n1 := len(vec1.(*narray.NArray).Data)
for lag := 0; lag < lagLimit; lag++ {
end := n0
if n1+lag < end {
end = len(vec1.(*narray.NArray).Data) + lag
}
if lag > end {
break
}
sum := 0.0
for i := lag; i < end; i++ {
sum += vec0.(*narray.NArray).Data[i] * vec1.(*narray.NArray).Data[i-lag]
}
if sum > maxCorr {
maxCorr = sum
maxLag = lag
}
}
return narray.NewArray([]float64{float64(maxLag), maxCorr}, 2), nil
})
}
// MaxWin returns the elementwise max vector of the input stream.
func MaxWin() dsp.Processer {
return dsp.NewOneProc(func(in ...dsp.Processer) (dsp.Value, error) {
var max *narray.NArray
var i int
for {
vec, err := dsp.Processers(in).Get(i)
if err == dsp.ErrOOB {
return max, nil
}
if err != nil {
return nil, err
}
if i == 0 {
max = narray.New(vec.(*narray.NArray).Shape[0])
max.SetValue(-math.MaxFloat64)
}
narray.MaxArray(max, vec.(*narray.NArray), max)
i++
}
})
}
// Mean returns the mean vector of the input stream.
// N-1
// mean = sum in_frame[i] where mean and in_frame are vectors.
// i=0
func Mean() dsp.Processer {
return dsp.NewOneProc(func(in ...dsp.Processer) (dsp.Value, error) {
var mean *narray.NArray
var i int
for {
vec, err := dsp.Processers(in).Get(i)
if err == dsp.ErrOOB {
return narray.Scale(mean, mean, 1/float64(i)), nil
}
if err != nil {
return nil, err
}
if i == 0 {
mean = narray.New(vec.(*narray.NArray).Shape[0])
}
narray.Add(mean, mean, vec.(*narray.NArray))
i++
}
})
}
// MSE returns the mean squared error of two inputs.
func MSE() dsp.Processer {
return dsp.NewProc(defaultBufSize, func(idx int, in ...dsp.Processer) (dsp.Value, error) {
framers, err := dsp.Processers(in).CheckInputs(2)
if err != nil {
return nil, err
}
vec0, e0 := framers[0].Get(idx)
if e0 != nil {
return nil, e0
}
vec1, e1 := framers[1].Get(idx)
if e1 != nil {
return nil, e1
}
n := float64(vec0.(*narray.NArray).Shape[0])
mse := narray.Sub(nil, vec0.(*narray.NArray), vec1.(*narray.NArray))
narray.Mul(mse, mse, mse)
narray.Scale(mse, mse, 1.0/n)
return mse, nil
})
} | proc/proc.go | 0.729809 | 0.461563 | proc.go | starcoder |
package main
import "fmt"
const (
RED = 0
BLACK = 1
)
type interval struct {
low int
high int
}
type i_node struct {
i interval
m int
color int
parent *i_node
left_child *i_node
right_child *i_node
}
type i_tree struct {
root_node *i_node
size int
}
var nil_node = __nil_node()
func assert(result bool) {
if !result {
panic(fmt.Sprintf("Assert failed!"))
}
}
func overlap(ix interval, iy interval) bool {
if ix.high <= iy.low || iy.high <= ix.low {
return false
} else {
return true
}
}
func __nil_node() *i_node {
node := &i_node{}
node.color = BLACK
node.parent = node
node.left_child = node
node.right_child = node
return node
}
func new_node() *i_node {
node := &i_node{
color: RED,
left_child: nil_node,
right_child: nil_node,
parent: nil_node,
}
return node
}
func (tree *i_tree) left_rotate(x *i_node) {
y := x.right_child // Set y
x.right_child = y.left_child // Turn y's left subtree into x's right subtree
if y.left_child != nil_node {
y.left_child.parent = x
}
y.parent = x.parent // Link x's parent to y
if x.parent == nil_node {
tree.root_node = y
} else if x == x.parent.left_child {
x.parent.left_child = y
} else {
x.parent.right_child = y
}
y.left_child = x // Put x on y's left
x.parent = y
}
func (tree *i_tree) right_rotate(y *i_node) {
x := y.left_child // Set x
y.left_child = x.right_child // Turn x's right subtree into y's left subtree
if x.right_child != nil_node {
x.right_child.parent = y
}
x.parent = y.parent // Link y's parent to x
if y.parent == nil_node {
tree.root_node = x
} else if y == y.parent.left_child {
y.parent.left_child = x
} else {
y.parent.right_child = x
}
x.right_child = y // Put y on x's right
y.parent = x
}
func (tree *i_tree) insert_fixup(z *i_node) {
for z.parent.color == RED {
if z.parent == z.parent.parent.left_child {
y := z.parent.parent.right_child
if y.color == RED {
z.parent.color = BLACK // CASE 1
y.color = BLACK // CASE 1
z.parent.parent.color = RED // CASE 1
z = z.parent.parent // CASE 1
} else {
if z == z.parent.right_child {
z = z.parent // CASE 2
tree.left_rotate(z) // CASE 2
}
z.parent.color = BLACK // CASE 3
z.parent.parent.color = RED // CASE 3
tree.right_rotate(z.parent.parent) // CASE 3
}
} else {
y := z.parent.parent.left_child
if y.color == RED {
z.parent.color = BLACK // CASE 1
y.color = BLACK // CASE 1
z.parent.parent.color = RED // CASE 1
z = z.parent.parent // CASE 1
} else {
if z == z.parent.left_child {
z = z.parent // CASE 2
tree.right_rotate(z) // CASE 2
}
z.parent.color = BLACK // CASE 3
z.parent.parent.color = RED // CASE 3
tree.left_rotate(z.parent.parent) // CASE 3
}
}
}
tree.root_node.color = BLACK
}
func (tree *i_tree) __insert(i interval) {
x := tree.root_node
y := nil_node // trailing pointer of x
z := new_node()
tree.size++
z.i = i
for x != nil_node {
y = x
if z.i.low < x.i.low {
x = x.left_child
} else {
x = x.right_child
}
}
z.parent = y
if y == nil_node {
tree.root_node = z
} else if z.i.low < y.i.low {
y.left_child = z
} else {
y.right_child = z
}
tree.insert_fixup(z)
}
func (x *i_node) minimum() *i_node {
for x.left_child != nil_node {
x = x.left_child
}
return x
}
func (x *i_node) maximum() *i_node {
for x.right_child != nil_node {
x = x.right_child
}
return x
}
func (x *i_node) predecessor() *i_node {
if x.left_child != nil_node {
return x.left_child.maximum()
}
y := x.parent
for y != nil_node && x == y.left_child {
x = y
y = y.parent
}
return y
}
func (x *i_node) successor() *i_node {
if x.right_child != nil_node {
return x.right_child.minimum()
}
y := x.parent
for y != nil_node && x == y.right_child {
x = y
y = y.parent
}
return y
}
func (tree *i_tree) transplant(u *i_node, v *i_node) {
if u.parent == nil_node {
tree.root_node = v
} else if u == u.parent.left_child {
u.parent.left_child = v
} else {
u.parent.right_child = v
}
v.parent = u.parent
}
func (tree *i_tree) delete_fixup(x *i_node) {
for x != tree.root_node && x.color == BLACK {
if x == x.parent.left_child {
w := x.parent.right_child
if w.color == RED {
w.color = BLACK // CASE 1
x.parent.color = RED // CASE 1
tree.left_rotate(x.parent) // CASE 1
w = x.parent.right_child // CASE 1
}
if w.left_child.color == BLACK && w.right_child.color == BLACK {
w.color = RED // CASE 2
x = x.parent // CASE 2
} else {
if w.right_child.color == BLACK {
w.left_child.color = BLACK // CASE 3
w.color = RED // CASE 3
tree.right_rotate(w) // CASE 3
w = x.parent.right_child // CASE 3
}
w.color = x.parent.color // CASE 4
x.parent.color = BLACK // CASE 4
w.right_child.color = BLACK // CASE 4
tree.left_rotate(x.parent) // CASE 4
x = tree.root_node // CASE 4
}
} else {
w := x.parent.left_child
if w.color == RED {
w.color = BLACK // CASE 1
x.parent.color = RED // CASE 1
tree.right_rotate(x.parent) // CASE 1
w = x.parent.left_child // CASE 1
}
if w.right_child.color == BLACK && w.left_child.color == BLACK {
w.color = RED // CASE 2
x = x.parent // CASE 2
} else {
if w.left_child.color == BLACK {
w.right_child.color = BLACK // CASE 3
w.color = RED // CASE 3
tree.left_rotate(w) // CASE 3
w = x.parent.left_child // CASE 3
}
w.color = x.parent.color // CASE 4
x.parent.color = BLACK // CASE 4
w.left_child.color = BLACK // CASE 4
tree.right_rotate(x.parent) // CASE 4
x = tree.root_node // CASE 4
}
}
}
}
func (tree *i_tree) __delete(z *i_node) {
x := nil_node
y := z
y_original_color := y.color
tree.size--
if z.left_child == nil_node {
x = z.right_child
tree.transplant(z, z.right_child)
} else if z.right_child == nil_node {
x = z.left_child
tree.transplant(z, z.left_child)
} else {
y = z.right_child.minimum()
y_original_color = y.color
x = y.right_child
if y.parent == z {
if x == nil_node {
x.parent = y // This line is very important
} else {
assert(x.parent == y)
}
} else {
tree.transplant(y, y.right_child)
y.right_child = z.right_child
y.right_child.parent = y
}
tree.transplant(z, y)
y.left_child = z.left_child
y.left_child.parent = y
y.color = z.color
}
if y_original_color == BLACK {
tree.delete_fixup(x)
}
}
func new_tree() *i_tree {
tree := &i_tree{
root_node: nil_node,
size: 0,
}
return tree
}
func (tree *i_tree) search(i interval) *i_node {
x := tree.root_node
for x != nil_node && !overlap(i, x.i) {
if x.left_child != nil_node && x.left_child.m >= i.low {
x = x.left_child
} else {
x = x.right_child
}
}
return x
}
func (tree *i_tree) insert(i interval) {
node := tree.search(i)
if node == nil_node {
tree.__insert(i)
}
}
func (tree *i_tree) delete(i interval) {
node := tree.search(i)
if node != nil_node {
tree.__delete(node)
}
}
type iterator struct {
node *i_node
}
func (tree *i_tree) iterator() iterator {
return iterator{
node: tree.root_node.minimum(),
}
}
func (i *iterator) has_next() bool {
return i.node != nil_node
}
func (i *iterator) get_next() *i_node {
n := i.node
i.node = i.node.successor()
return n
}
type reverse_iterator struct {
node *i_node
}
func (tree *i_tree) reverse_iterator() reverse_iterator {
return reverse_iterator{
node: tree.root_node.maximum(),
}
}
func (i *reverse_iterator) has_next() bool {
return i.node != nil_node
}
func (i *reverse_iterator) get_next() *i_node {
n := i.node
i.node = i.node.predecessor()
return n
}
func main() {
tree := new_tree()
tree.insert(interval{1, 2})
tree.insert(interval{3, 4})
for i := tree.iterator(); i.has_next(); {
fmt.Println(i.get_next().i)
}
} | interval_tree.go | 0.58059 | 0.486332 | interval_tree.go | starcoder |
package msgpack
import (
"bytes"
"fmt"
"io"
"math"
"sync"
"time"
"github.com/segmentio/objconv/objutil"
)
// Emitter implements a MessagePack emitter that satisfies the objconv.Emitter
// interface.
type Emitter struct {
w io.Writer
b [240]byte
// This stack is used to cache arrays that are emitted in streaming mode,
// where the length of the array is not known before outputing all the
// elements.
stack []*context
// sback is used as the initial backing array for the stack slice to avoid
// dynamic memory allocations for the most common use cases.
sback [8]*context
}
type context struct {
b bytes.Buffer // buffer where the array elements are cached
w io.Writer // the previous writer where b will be flushed
n int // the number of elements written to the array
}
func NewEmitter(w io.Writer) *Emitter {
e := &Emitter{w: w}
e.stack = e.sback[:0]
return e
}
func (e *Emitter) Reset(w io.Writer) {
e.w = w
e.stack = e.stack[:0]
}
func (e *Emitter) EmitNil() (err error) {
e.b[0] = Nil
_, err = e.w.Write(e.b[:1])
return
}
func (e *Emitter) EmitBool(v bool) (err error) {
if v {
e.b[0] = True
} else {
e.b[0] = False
}
_, err = e.w.Write(e.b[:1])
return
}
func (e *Emitter) EmitInt(v int64, _ int) (err error) {
n := 0
if v >= 0 {
switch {
case v <= objutil.Int8Max:
e.b[0] = byte(v) | PositiveFixintTag
n = 1
case v <= objutil.Int16Max:
e.b[0] = Int16
putUint16(e.b[1:], uint16(v))
n = 3
case v <= objutil.Int32Max:
e.b[0] = Int32
putUint32(e.b[1:], uint32(v))
n = 5
default:
e.b[0] = Int64
putUint64(e.b[1:], uint64(v))
n = 9
}
} else {
switch {
case v >= -31:
e.b[0] = byte(v) | NegativeFixintTag
n = 1
case v >= objutil.Int8Min:
e.b[0] = Int8
e.b[1] = byte(v)
n = 2
case v >= objutil.Int16Min:
e.b[0] = Int16
putUint16(e.b[1:], uint16(v))
n = 3
case v >= objutil.Int32Min:
e.b[0] = Int32
putUint32(e.b[1:], uint32(v))
n = 5
default:
e.b[0] = Int64
putUint64(e.b[1:], uint64(v))
n = 9
}
}
_, err = e.w.Write(e.b[:n])
return
}
func (e *Emitter) EmitUint(v uint64, _ int) (err error) {
n := 0
switch {
case v <= objutil.Uint8Max:
e.b[0] = Uint8
e.b[1] = byte(v)
n = 2
case v <= objutil.Uint16Max:
e.b[0] = Uint16
putUint16(e.b[1:], uint16(v))
n = 3
case v <= objutil.Uint32Max:
e.b[0] = Uint32
putUint32(e.b[1:], uint32(v))
n = 5
default:
e.b[0] = Uint64
putUint64(e.b[1:], v)
n = 9
}
_, err = e.w.Write(e.b[:n])
return
}
func (e *Emitter) EmitFloat(v float64, bitSize int) (err error) {
switch bitSize {
case 32:
e.b[0] = Float32
putUint32(e.b[1:], math.Float32bits(float32(v)))
_, err = e.w.Write(e.b[:5])
default:
e.b[0] = Float64
putUint64(e.b[1:], math.Float64bits(v))
_, err = e.w.Write(e.b[:9])
}
return
}
func (e *Emitter) EmitString(v string) (err error) {
n := len(v)
switch {
case n <= 31:
e.b[0] = byte(n) | FixstrTag
n = 1
case n <= objutil.Uint8Max:
e.b[0] = Str8
e.b[1] = byte(n)
n = 2
case n <= objutil.Uint16Max:
e.b[0] = Str16
putUint16(e.b[1:], uint16(n))
n = 3
case n <= objutil.Uint32Max:
e.b[0] = Str32
putUint32(e.b[1:], uint32(n))
n = 5
default:
err = fmt.Errorf("objconv/msgpack: string of length %d is too long to be encoded", n)
return
}
for {
n1 := len(v)
n2 := len(e.b[n:])
if n1 > n2 {
n1 = n2
}
copy(e.b[n:], v[:n1])
if _, err = e.w.Write(e.b[:n+n1]); err != nil {
return
}
v = v[n1:]
n = 0
if len(v) == 0 {
return
}
}
}
func (e *Emitter) EmitBytes(v []byte) (err error) {
n := len(v)
switch {
case n <= objutil.Uint8Max:
e.b[0] = Bin8
e.b[1] = byte(n)
n = 2
case n <= objutil.Uint16Max:
e.b[0] = Bin16
putUint16(e.b[1:], uint16(n))
n = 3
case n <= objutil.Uint32Max:
e.b[0] = Bin32
putUint32(e.b[1:], uint32(n))
n = 5
default:
err = fmt.Errorf("objconv/msgpack: byte slice of length %d is too long to be encoded", n)
return
}
if _, err = e.w.Write(e.b[:n]); err != nil {
return
}
_, err = e.w.Write(v)
return
}
func (e *Emitter) EmitTime(v time.Time) (err error) {
const int34Max = 17179869183
x := ExtTime
n := 0
s := v.Unix()
ns := v.Nanosecond()
if ns == 0 && s >= 0 && s <= objutil.Uint32Max {
e.b[0] = Fixext4
e.b[1] = byte(x)
putUint32(e.b[2:], uint32(s))
n = 6
} else if s >= 0 && s <= int34Max {
e.b[0] = Fixext8
e.b[1] = byte(x)
putUint64(e.b[2:], uint64(s)|(uint64(ns)<<34))
n = 10
} else {
e.b[0] = Ext8
e.b[1] = 12
e.b[2] = byte(x)
putUint32(e.b[3:], uint32(ns))
putUint64(e.b[7:], uint64(s))
n = 15
}
_, err = e.w.Write(e.b[:n])
return
}
func (e *Emitter) EmitDuration(v time.Duration) (err error) {
return e.EmitString(string(objutil.AppendDuration(e.b[:0], v)))
}
func (e *Emitter) EmitError(v error) (err error) {
return e.EmitString(v.Error())
}
func (e *Emitter) EmitArrayBegin(n int) (err error) {
var c *context
if n < 0 {
c = contextPool.Get().(*context)
c.b.Truncate(0)
c.n = 0
c.w = e.w
e.w = &c.b
} else {
err = e.emitArray(n)
}
e.stack = append(e.stack, c)
return
}
func (e *Emitter) EmitArrayEnd() (err error) {
i := len(e.stack) - 1
c := e.stack[i]
e.stack = e.stack[:i]
if c != nil {
e.w = c.w
if c.b.Len() != 0 {
c.n++
}
if err = e.emitArray(c.n); err == nil {
_, err = c.b.WriteTo(c.w)
}
contextPool.Put(c)
}
return
}
func (e *Emitter) EmitArrayNext() (err error) {
if c := e.stack[len(e.stack)-1]; c != nil {
c.n++
}
return
}
func (e *Emitter) EmitMapBegin(n int) (err error) {
if n < 0 {
err = fmt.Errorf("objconv/msgpack: encoding maps of unknown length is not supported (n = %d)", n)
return
}
switch {
case n <= 15:
e.b[0] = byte(n) | FixmapTag
n = 1
case n <= objutil.Uint16Max:
e.b[0] = Map16
putUint16(e.b[1:], uint16(n))
n = 3
case n <= objutil.Uint32Max:
e.b[0] = Map32
putUint32(e.b[1:], uint32(n))
n = 5
default:
err = fmt.Errorf("objconv/msgpack: map of length %d is too long to be encoded", n)
return
}
_, err = e.w.Write(e.b[:n])
return
}
func (e *Emitter) EmitMapEnd() (err error) {
return
}
func (e *Emitter) EmitMapValue() (err error) {
return
}
func (e *Emitter) EmitMapNext() (err error) {
return
}
func (e *Emitter) emitArray(n int) (err error) {
switch {
case n <= 15:
e.b[0] = byte(n) | FixarrayTag
n = 1
case n <= objutil.Uint16Max:
e.b[0] = Array16
putUint16(e.b[1:], uint16(n))
n = 3
case n <= objutil.Uint32Max:
e.b[0] = Array32
putUint32(e.b[1:], uint32(n))
n = 5
default:
err = fmt.Errorf("objconv/msgpack: array of length %d is too long to be encoded", n)
return
}
_, err = e.w.Write(e.b[:n])
return
}
var contextPool = sync.Pool{
New: func() interface{} { return &context{} },
} | msgpack/emit.go | 0.678007 | 0.45744 | emit.go | starcoder |
package main
import (
"errors"
"math"
"sort"
onlinestats "github.com/dgryski/go-onlinestats"
"github.com/montanaflynn/stats"
)
type rank struct {
X float64
Y float64
Xrank float64
Yrank float64
}
type Float64Data []float64
func (f Float64Data) Len() int { return len(f) }
func (f Float64Data) Get(i int) float64 { return f[i] }
func Spearman2(data1, data2 Float64Data) (float64, error) {
//defer Measure(time.Now())
r, _ := onlinestats.Spearman(data1, data2)
return r, nil
}
func Spearman(data1, data2 Float64Data) (float64, error) {
//defer Measure(time.Now())
if data1.Len() < 3 || data2.Len() != data1.Len() {
return math.NaN(), errors.New("invalid size of data")
}
ranks := []rank{}
for index := 0; index < data1.Len(); index++ {
x := data1.Get(index)
y := data2.Get(index)
ranks = append(ranks, rank{
X: x,
Y: y,
})
}
sort.Slice(ranks, func(i int, j int) bool {
return ranks[i].X < ranks[j].X
})
for position := 0; position < len(ranks); position++ {
ranks[position].Xrank = float64(position) + 1
duplicateValues := []int{position}
for nested, p := range ranks {
if ranks[position].X == p.X {
if position != nested {
duplicateValues = append(duplicateValues, nested)
}
}
}
sum := 0
for _, val := range duplicateValues {
sum += val
}
avg := float64((sum + len(duplicateValues))) / float64(len(duplicateValues))
ranks[position].Xrank = avg
for index := 1; index < len(duplicateValues); index++ {
ranks[duplicateValues[index]].Xrank = avg
}
position += len(duplicateValues) - 1
}
sort.Slice(ranks, func(i int, j int) bool {
return ranks[i].Y < ranks[j].Y
})
for position := 0; position < len(ranks); position++ {
ranks[position].Yrank = float64(position) + 1
duplicateValues := []int{position}
for nested, p := range ranks {
if ranks[position].Y == p.Y {
if position != nested {
duplicateValues = append(duplicateValues, nested)
}
}
}
sum := 0
for _, val := range duplicateValues {
sum += val
}
// fmt.Println(sum + len(duplicateValues))
avg := float64((sum + len(duplicateValues))) / float64(len(duplicateValues))
ranks[position].Yrank = avg
for index := 1; index < len(duplicateValues); index++ {
ranks[duplicateValues[index]].Yrank = avg
}
position += len(duplicateValues) - 1
}
xRanked := []float64{}
yRanked := []float64{}
for _, rank := range ranks {
xRanked = append(xRanked, rank.Xrank)
yRanked = append(yRanked, rank.Yrank)
}
return stats.Pearson(xRanked, yRanked)
} | correlation.go | 0.569613 | 0.414662 | correlation.go | starcoder |
package math
func SumInt(x, y int) int {
return x + y
}
func SumInt8(x, y int8) int16 {
return int16(x) + int16(y)
}
func SumInt16(x, y int16) int32 {
return int32(x) + int32(y)
}
func SumInt32(x, y int32) int64 {
return int64(x) + int64(y)
}
func SumInt64(x, y int64) int64 {
return x + y
}
func SumUint(x, y uint) uint {
return x + y
}
func SumUint8(x, y uint8) uint16 {
return uint16(x) + uint16(y)
}
func SumUint16(x, y uint16) uint32 {
return uint32(x) + uint32(y)
}
func SumUint32(x, y uint32) uint64 {
return uint64(x) + uint64(y)
}
func SumUint64(x, y uint64) uint64 {
return x + y
}
func SumFloat32(x, y float32) float32 {
return x + y
}
func SumFloat64(x, y float64) float64 {
return x + y
}
func SumArrayInt(x []int) int {
var sum int
for i := 0; i < len(x); i++ {
sum += x[i]
}
return sum
}
func SumArrayInt8(x []int8) int {
var sum int
for i := 0; i < len(x); i++ {
sum += int(x[i])
}
return sum
}
func SumArrayInt16(x []int16) int {
var sum int
for i := 0; i < len(x); i++ {
sum += int(x[i])
}
return sum
}
func SumArrayInt32(x []int32) int {
var sum int
for i := 0; i < len(x); i++ {
sum += int(x[i])
}
return sum
}
func SumArrayInt64(x []int64) int {
var sum int
for i := 0; i < len(x); i++ {
sum += int(x[i])
}
return sum
}
func SumArrayUint(x []uint) uint {
var sum uint
for i := 0; i < len(x); i++ {
sum += x[i]
}
return sum
}
func SumArrayUint8(x []uint8) uint {
var sum uint
for i := 0; i < len(x); i++ {
sum += uint(x[i])
}
return sum
}
func SumArrayUint16(x []uint16) uint {
var sum uint
for i := 0; i < len(x); i++ {
sum += uint(x[i])
}
return sum
}
func SumArrayUint32(x []uint32) uint {
var sum uint
for i := 0; i < len(x); i++ {
sum += uint(x[i])
}
return sum
}
func SumArrayUint64(x []uint64) uint {
var sum uint
for i := 0; i < len(x); i++ {
sum += uint(x[i])
}
return sum
}
func SumArrayFloat32(x []float32) float32 {
var sum float32
for i := 0; i < len(x); i++ {
sum += x[i]
}
return sum
}
func SumArrayFloat64(x []float64) float64 {
var sum float64
for i := 0; i < len(x); i++ {
sum += x[i]
}
return sum
} | math/sum.go | 0.683314 | 0.540924 | sum.go | starcoder |
package style
import (
"RenG/src/config"
"RenG/src/core"
"RenG/src/lang/ast"
"RenG/src/lang/evaluator"
"RenG/src/lang/object"
"fmt"
"strconv"
)
func StyleEval(node ast.Node, texture *core.SDL_Texture, env *object.Environment) object.Object {
switch node := node.(type) {
case *ast.BlockStatement:
return evalBlockStatements(node, texture, env)
case *ast.ExpressionStatement:
return StyleEval(node.Expression, texture, env)
case *ast.PrefixExpression:
if rightValue, ok := node.Right.(*ast.Identifier); ok {
return evalAssignPrefixExpression(node.Operator, rightValue, env)
} else {
right := StyleEval(node.Right, texture, env)
if isError(right) {
return right
}
return evalPrefixExpression(node.Operator, right)
}
case *ast.InfixExpression:
if leftValue, ok := node.Left.(*ast.Identifier); ok && isAssign(node.Operator) {
right := StyleEval(node.Right, texture, env)
if isError(right) {
return right
}
return evalAssignInfixExpression(node.Operator, leftValue, right, env)
} else {
left := StyleEval(node.Left, texture, env)
if isError(left) {
return left
}
right := StyleEval(node.Right, texture, env)
if isError(right) {
return right
}
return evalInfixExpression(node.Operator, left, right)
}
case *ast.IfExpression:
return evalIfExpression(node, texture, env)
case *ast.ForExpression:
return evalForExpression(node, texture, env)
case *ast.WhileExpression:
return evalWhileExpression(node, texture, env)
case *ast.CallFunctionExpression:
function := StyleEval(node.Function, texture, env)
if isError(function) {
return function
}
args := evalExpressions(node.Arguments, texture, env)
if len(args) == 1 && isError(args[0]) {
return args[0]
}
return applyFunction(function, texture, args)
case *ast.IndexExpression:
left := StyleEval(node.Left, texture, env)
if isError(left) {
return left
}
index := StyleEval(node.Index, texture, env)
if isError(index) {
return index
}
return evalIndexExpression(left, index)
case *ast.Identifier:
return evalIdentifier(node, env)
case *ast.Boolean:
return &object.Boolean{Value: node.Value}
case *ast.IntegerLiteral:
return &object.Integer{Value: node.Value}
case *ast.FloatLiteral:
return &object.Float{Value: node.Value}
case *ast.StringLiteral:
return evalStringLiteral(node, texture, env)
case *ast.ArrayLiteral:
elements := evalExpressions(node.Elements, texture, env)
if len(elements) == 1 && isError(elements[0]) {
return elements[0]
}
return &object.Array{Elements: elements}
case *ast.ColorExpression:
colorObj := StyleEval(node.Value, texture, env)
if isError(colorObj) {
return colorObj
}
if color, ok := colorObj.(*object.String); ok {
hex := make([]int64, 3)
switch color.Value[:1] {
case "#":
hex[0], _ = strconv.ParseInt(color.Value[1:3], 16, 32)
hex[1], _ = strconv.ParseInt(color.Value[3:5], 16, 32)
hex[2], _ = strconv.ParseInt(color.Value[5:7], 16, 32)
default:
return newError("Color support only hex code")
}
config.MainFont.ChangeTextColor(texture, config.Renderer, texture.TextTexture.Text, core.CreateColor(int(hex[0]), int(hex[1]), int(hex[2])))
}
}
return nil
}
func evalBlockStatements(block *ast.BlockStatement, texture *core.SDL_Texture, env *object.Environment) object.Object {
var result object.Object
for _, statement := range block.Statements {
result = StyleEval(statement, texture, env)
if result != nil {
rt := result.Type()
if rt == object.ERROR_OBJ {
return result
}
}
}
return result
}
func evalExpressions(exps []ast.Expression, texture *core.SDL_Texture, env *object.Environment) []object.Object {
var result []object.Object
for _, e := range exps {
evaluated := StyleEval(e, texture, env)
if isError(evaluated) {
return []object.Object{evaluated}
}
result = append(result, evaluated)
}
return result
}
func evalStringLiteral(str *ast.StringLiteral, texture *core.SDL_Texture, env *object.Environment) *object.String {
result := &object.String{Value: str.Value}
// TODO : 최적화하기
// 일단 고쳤지만 여러 최적화가 필요할듯
var (
index = 0
expIndex = 0
)
for stringIndex := 0; stringIndex < len(str.Values); stringIndex++ {
for isCurrentExp(index, str) {
val := StyleEval(str.Exp[expIndex].Exp, texture, env)
switch value := val.(type) {
case *object.Integer:
result.Value += strconv.Itoa(int(value.Value))
case *object.Float:
result.Value += fmt.Sprintf("%f", value.Value)
case *object.Boolean:
result.Value += strconv.FormatBool(value.Value)
case *object.String:
result.Value += value.Value
default:
result.Value = "ErrorType"
}
expIndex++
index++
}
result.Value += str.Values[stringIndex].Str
index++
}
return result
}
func evalIndexExpression(left, index object.Object) object.Object {
switch {
case left.Type() == object.ARRAY_OBJ && index.Type() == object.INTEGER_OBJ:
return evalArrayIndexExpression(left, index)
default:
return newError("index operator not supported : %s", left.Type())
}
}
func evalArrayIndexExpression(array, index object.Object) object.Object {
arrayObject := array.(*object.Array)
idx := index.(*object.Integer).Value
max := int64(len(arrayObject.Elements) - 1)
if idx < 0 || idx > max {
return NULL
}
return arrayObject.Elements[idx]
}
func evalIfExpression(ie *ast.IfExpression, texture *core.SDL_Texture, env *object.Environment) object.Object {
condition := StyleEval(ie.Condition, texture, env)
if isError(condition) {
return condition
}
if isTruthy(condition) {
return StyleEval(ie.Consequence, texture, env)
}
for _, ee := range ie.Elif {
if ee != nil {
elifCondition := StyleEval(ee.Condition, texture, env)
if isError(elifCondition) {
return elifCondition
}
if isTruthy(elifCondition) {
return StyleEval(ee.Consequence, texture, env)
}
}
}
if ie.Alternative != nil {
return StyleEval(ie.Alternative, texture, env)
} else {
return NULL
}
}
func evalForExpression(node *ast.ForExpression, texture *core.SDL_Texture, env *object.Environment) object.Object {
var define, condition, result, run object.Object
define = StyleEval(node.Define, texture, env)
if isError(define) {
return define
}
condition = StyleEval(node.Condition, texture, env)
if isError(condition) {
return condition
}
for isTruthy(condition) {
result = StyleEval(node.Body, texture, env)
if isError(result) {
return result
}
run = StyleEval(node.Run, texture, env)
if isError(run) {
return run
}
condition = StyleEval(node.Condition, texture, env)
if isError(condition) {
return condition
}
}
return nil
}
func evalWhileExpression(node *ast.WhileExpression, texture *core.SDL_Texture, env *object.Environment) object.Object {
condition := StyleEval(node.Condition, texture, env)
if isError(condition) {
return condition
}
for isTruthy(condition) {
result := StyleEval(node.Body, texture, env)
if isError(result) {
return result
}
condition = StyleEval(node.Condition, texture, env)
if isError(condition) {
return condition
}
}
return nil
}
func evalIdentifier(node *ast.Identifier, env *object.Environment) object.Object {
if val, ok := env.Get(node.Value); ok {
return val
}
if builtin, ok := evaluator.FunctionBuiltins[node.Value]; ok {
return builtin
}
return newError("identifier not found: " + node.Value)
} | src/reng/style/eval.go | 0.525369 | 0.472257 | eval.go | starcoder |
package main
import "fmt"
// A representation of the state of the game
type position struct {
boatOnWestBank bool // true is west bank, false is east bank
westMissionaries int // west bank missionaries
westCannibals int // west bank cannibals
eastMissionaries int // east bank missionaries
eastCannibals int // east bank cannibals
}
// Is this a legal position? In particular, does it have
// more cannibals than missionaries on either bank? Because that is illegal.
func valid(pos position) bool {
if pos.westMissionaries+pos.eastMissionaries == 3 {
if pos.westCannibals+pos.eastCannibals == 3 {
if pos.eastMissionaries == 0 || pos.westMissionaries == 0 {
return true
} else if pos.westMissionaries >= pos.westCannibals && pos.eastMissionaries >= pos.eastCannibals {
return true
}
}
}
return false
}
// What are all of the next positions we can go to legally from the current position
// Returns nil if there are no valid positions
func (init_pos position) successors() []position {
all_positions := []position{}
canibals_to_send := [5]int{1, 2, 1, 0, 0}
miss_to_send := [5]int{0, 0, 1, 2, 1}
for round := range canibals_to_send {
new_pos := init_pos
if init_pos.boatOnWestBank {
// can we send people?
if canibals_to_send[round] <= init_pos.westCannibals && miss_to_send[round] <= init_pos.westMissionaries {
if canibals_to_send[round] > 0 {
new_pos.westCannibals -= canibals_to_send[round]
new_pos.eastCannibals += canibals_to_send[round]
new_pos.boatOnWestBank = false
}
if miss_to_send[round] > 0 {
new_pos.westMissionaries -= miss_to_send[round]
new_pos.eastMissionaries += miss_to_send[round]
new_pos.boatOnWestBank = false
}
}
} else {
if canibals_to_send[round] <= init_pos.eastCannibals && miss_to_send[round] <= init_pos.eastMissionaries {
if canibals_to_send[round] > 0 {
new_pos.westCannibals += canibals_to_send[round]
new_pos.eastCannibals -= canibals_to_send[round]
new_pos.boatOnWestBank = true
}
if miss_to_send[round] > 0 {
new_pos.westMissionaries += miss_to_send[round]
new_pos.eastMissionaries -= miss_to_send[round]
new_pos.boatOnWestBank = true
}
}
}
if valid(new_pos) && new_pos != init_pos {
all_positions = append(all_positions, new_pos)
}
}
return all_positions
}
// A recursive depth-first search that goes through to find the goal and returns the path to get there
// Returns nil if no solution found
func dfs(start position, goal position, solution []position, visited map[position]bool) []position {
new_depth := start.successors()
visited[start] = true
if start != solution[0] {
solution = append(solution, start)
}
for _, new_pos := range new_depth {
if visited[new_pos] == false {
if new_pos == goal {
solution = append(solution, new_pos)
return solution
} else {
result := dfs(new_pos, goal, solution, visited)
if result != nil {
return result
}
}
}
}
return nil
}
func main() {
//start := position{boatOnWestBank: true, westMissionaries: 3, westCannibals: 3, eastMissionaries: 0, eastCannibals: 0}
//goal := position{boatOnWestBank: false, westMissionaries: 0, westCannibals: 0, eastMissionaries: 3, eastCannibals: 3}
//solution := dfs(start, goal, []position{start}, make(map[position]bool))
//fmt.Println(solution)
p := position{true, 3, 3, 0, 0}
fmt.Println(p.successors())
} | missionaries.go | 0.602529 | 0.437343 | missionaries.go | starcoder |
package main
import (
"math"
"github.com/skandragon/dysonsphere/internal/cs"
"github.com/skandragon/dysonsphere/internal/cs/mathf"
"github.com/skandragon/dysonsphere/types"
)
// StarData holds all the statistics for a single star.
type StarData struct {
Index int32 `json:"index"`
Level float32 `json:"level"`
ID int32 `json:"id"`
Seed int32 `json:"seed"`
ResourceCoef float32 `json:"resource_coef"`
Name string `json:"name"`
Position *VectorLF3 `json:"position"`
UPosition *VectorLF3 `json:"u_position"`
Mass float32 `json:"mass"`
Age float32 `json:"age"`
Lifetime float32 `json:"lifetime"`
Temperature float32 `json:"temperature"`
Type types.StarType `json:"type"`
Spectr types.SpectralType `json:"spectr"`
Color float32 `json:"color"`
ClassFactor float32 `json:"class_factor"`
Luminosity float32 `json:"luminosity"`
Radius float32 `json:"radius"`
AcdiskRadius float32 `json:"acdisk_radius"`
HabitableRadius float32 `json:"habitable_radius"`
LightBalanceRadius float32 `json:"light_balance_radius"`
OrbitScaler float32 `json:"orbit_scaler"`
DysonRadius float32 `json:"dyson_radius"`
PhysicsRadius float32 `json:"physics_radius"`
}
// Make the special starting star.
func makeBirthStar(seed int32, universe UniqueStarnameChecker) *StarData {
star := &StarData{
Mass: 1,
Lifetime: 50,
Temperature: 8500,
Luminosity: 1,
HabitableRadius: 1,
LightBalanceRadius: 1,
DysonRadius: 10,
OrbitScaler: 1,
Index: 0,
Level: 0,
ID: 1,
Seed: seed,
ResourceCoef: 0.6,
Position: VectorLF3Zero(),
UPosition: VectorLF3Zero(),
}
random := cs.MakePRNGSequence(seed)
seed2 := random.Next()
seed3 := random.Next()
random2 := cs.MakePRNGSequence(seed3)
r := random2.NextDouble()
r2 := random2.NextDouble()
num := random2.NextDouble()
rn := random2.NextDouble()
rt := random2.NextDouble()
num2 := random2.NextDouble()*0.2 + 0.9
y := random2.NextDouble()*0.4 - 0.2
num3 := math.Pow(2.0, y)
num4 := randNormal(0, 0.08, r, r2)
num4 = mathf.Clamp(num4, -0.2, 0.2)
star.Mass = mathf.Pow(2, num4)
d := 2.0 + 0.4*(1.0-float64(star.Mass))
star.Lifetime = float32(10000 * math.Pow(0.1, math.Log10(float64(star.Mass)*0.5)/math.Log10(d)+1.0) * num2)
star.Age = float32(num*0.4 + 0.3)
num5 := (1 - mathf.Pow(mathf.Clamp01(star.Age), 20)*0.5) * star.Mass
star.Temperature = float32(math.Pow(float64(num5), 0.56+0.14/(math.Log10(float64(num5)+4)/math.Log10(5.0)))*4450.0 + 1300.0)
num6 := math.Log10((float64(star.Temperature)-1300.0)/4500.0)/math.Log10(2.6) - 0.5
if num6 < 0.0 {
num6 *= 4.0
}
if num6 > 2.0 {
num6 = 2.0
} else if num6 < -4.0 {
num6 = -4.0
}
star.Spectr = types.SpectralType(mathf.RoundToInt(float32(num6) + 4))
star.Color = mathf.Clamp01((float32(num6) + 3.5) * 0.2)
star.ClassFactor = float32(num6)
star.Luminosity = mathf.Pow(num5, 0.7)
star.Radius = mathf.Pow(star.Mass, 0.4) * float32(num3)
star.AcdiskRadius = 0
p := float32(num6) + 2
star.HabitableRadius = mathf.Pow(1.7, p) + 0.2*mathf.Min(1, star.OrbitScaler)
star.LightBalanceRadius = mathf.Pow(1.7, p)
star.OrbitScaler = mathf.Pow(1.35, p)
if star.OrbitScaler < 1 {
star.OrbitScaler = mathf.Lerp(star.OrbitScaler, 1, 0.6)
}
setStarAge(star, star.Age, rn, rt)
star.DysonRadius = star.OrbitScaler * 0.28
if star.DysonRadius*40000.0 < (star.PhysicsRadius * 1.5) {
star.DysonRadius = ((star.PhysicsRadius * 1.5) / 40000.0)
}
star.Name = randomStarName(seed2, star, universe)
return star
}
func setStarAge(star *StarData, age float32, rn float64, rt float64) {
num := float32(rn*0.1 + 0.95)
num2 := float32(rt*0.4 + 0.8)
num3 := float32(rt*9.0 + 1.0)
star.Age = age
if age >= 1 {
if star.Mass >= 18 {
star.Type = types.StarTypeBlackHole
star.Spectr = types.SpectralTypeX
star.Mass *= 2.5 * num2
star.Radius *= 1
star.AcdiskRadius = star.Radius * 5
star.Temperature = 0
star.Luminosity *= 0.001 * num
star.HabitableRadius = 0
star.LightBalanceRadius *= 0.4 * num
} else if star.Mass >= 7 {
star.Type = types.StarTypeNeutronStar
star.Spectr = types.SpectralTypeX
star.Mass *= 0.2 * num
star.Radius *= 0.15
star.AcdiskRadius = star.Radius * 9
star.Temperature = num3 * 10000000
star.Luminosity *= 0.1 * num
star.HabitableRadius = 0
star.LightBalanceRadius *= 3 * num
star.OrbitScaler *= 1.5 * num
} else {
star.Type = types.StarTypeWhiteDwarf
star.Spectr = types.SpectralTypeX
star.Mass *= 0.2 * num
star.Radius *= 0.2
star.AcdiskRadius = 0
star.Temperature = num2 * 150000
star.Luminosity *= 0.04 * num2
star.HabitableRadius *= 0.15 * num2
star.LightBalanceRadius *= 0.2 * num
}
} else if age >= 0.96 {
num4 := mathf.Pow(5.0, mathf.Abs(mathf.Log10(star.Mass)-0.7)) * 5.0
if num4 > 10 {
num4 = float32((mathf.Log(num4*0.1) + 1) * 10)
}
num5 := float32(1 - mathf.Pow(star.Age, 30)*0.5)
star.Type = types.StarTypeGiantStar
star.Mass = num5 * star.Mass
star.Radius = num4 * num2
star.AcdiskRadius = 0
star.Temperature = num5 * star.Temperature
star.Luminosity = 1.6 * star.Luminosity
star.HabitableRadius = 9 * star.HabitableRadius
star.LightBalanceRadius = 3 * star.HabitableRadius
star.OrbitScaler = 3.3 * star.OrbitScaler
}
}
func createStar(galaxy *Universe, pos *VectorLF3, id int32, seed int32, needType types.StarType, needSpectr types.SpectralType) *StarData {
star := &StarData{
Mass: 1,
Lifetime: 50,
Temperature: 8500,
Luminosity: 1,
HabitableRadius: 1,
LightBalanceRadius: 1,
DysonRadius: 10,
OrbitScaler: 1,
ResourceCoef: 1,
Index: id - 1,
Seed: seed,
ID: id,
Position: pos,
}
if galaxy.StarCount > 1 {
star.Level = float32(star.Index) / float32(galaxy.StarCount-1)
}
random := cs.MakePRNGSequence(seed)
seed2 := random.Next()
seed3 := random.Next()
num := float32(pos.Magnitude())
num2 := num / 32
if num2 > 1 {
num2 = mathf.Log(num2) + 1
num2 = mathf.Log(num2) + 1
num2 = mathf.Log(num2) + 1
num2 = mathf.Log(num2) + 1
num2 = mathf.Log(num2) + 1
}
star.ResourceCoef = mathf.Pow(7, num2) * 0.6
random2 := cs.MakePRNGSequence(seed3)
num3 := random2.NextDouble()
num4 := random2.NextDouble()
num5 := random2.NextDouble()
rn := random2.NextDouble()
rt := random2.NextDouble()
num6 := (random2.NextDouble() - 0.5) * 0.2
num7 := random2.NextDouble()*0.2 + 0.9
num8 := random2.NextDouble()*0.4 - 0.2
num9 := math.Pow(2.0, num8)
num10 := mathf.Lerp(-0.98, 0.88, star.Level)
if num10 < 0 {
num10 -= 0.65
} else {
num10 += 0.65
}
standardDeviation := float32(0.33)
if needType == types.StarTypeGiantStar {
if num8 <= -0.08 {
num10 = 1.6
} else {
num10 = -1.5
}
standardDeviation = 0.3
}
num11 := randNormal(num10, standardDeviation, num3, num4)
if needSpectr == types.SpectralTypeM {
num11 = -3
} else if needSpectr == types.SpectralTypeO {
num11 = 3
}
if num11 > 0 {
num11 *= 2
}
num11 = mathf.Clamp(num11, -2.4, 4.65) + float32(num6) + 1
if needType == types.StarTypeBlackHole {
star.Mass = 18 + float32(num3*num4)*30
} else if needType == types.StarTypeNeutronStar {
star.Mass = 7 + float32(num3)*11
} else if needType == types.StarTypeWhiteDwarf {
star.Mass = 1 + float32(num4)*5
} else {
star.Mass = mathf.Pow(2, num11)
}
d := 5.0
if star.Mass < 2 {
d = 2 + 0.4*(1-float64(star.Mass))
}
star.Lifetime = float32(10000.0 * math.Pow(0.1, math.Log10(float64(star.Mass)*0.5)/math.Log10(d)+1) * num7)
if needType == types.StarTypeGiantStar {
star.Lifetime = float32(10000.0 * math.Pow(0.1, math.Log10(float64(star.Mass)*0.58)/math.Log10(d)+1) * num7)
star.Age = float32(num5)*0.04 + 0.96
} else if needType == types.StarTypeBlackHole || needType == types.StarTypeNeutronStar || needType == types.StarTypeWhiteDwarf {
star.Age = float32(num5)*0.4 + 1
if needType == types.StarTypeWhiteDwarf {
star.Lifetime += 10000
} else if needType == types.StarTypeNeutronStar {
star.Lifetime += 1000
}
} else if star.Mass < 0.5 {
star.Age = float32(num5)*0.12 + 0.02
} else if star.Mass < 0.8 {
star.Age = float32(num5)*0.4 + 0.1
} else {
star.Age = float32(num5)*0.7 + 0.2
}
num12 := star.Lifetime * star.Age
if num12 > 5000 {
num12 = (mathf.Log(num12/5000) + 1) * 5000
}
if num12 > 8000 {
num13 := num12 / 8000
num13 = mathf.Log(num13) + 1
num13 = mathf.Log(num13) + 1
num13 = mathf.Log(num13) + 1
num12 = num13 * 8000
}
star.Lifetime = num12 / star.Age
num14 := (1 - mathf.Pow(mathf.Clamp01(star.Age), 20)*0.5) * star.Mass
star.Temperature = float32(math.Pow(float64(num14), 0.56+0.14/(math.Log10(float64(num14+4))/math.Log10(5.0)))*44.50 + 1300)
num15 := math.Log10((float64(star.Temperature)-1300)/4500)/math.Log10(2.6) - 0.5
if num15 < 0 {
num15 *= 4
}
if num15 > 2 {
num15 = 2
} else if num15 < -4 {
num15 = -4
}
star.Spectr = types.SpectralType(mathf.RoundToInt(float32(num15) + 4))
star.Color = mathf.Clamp01((float32(num15) + 3.5) * 0.2)
star.ClassFactor = float32(num15)
star.Luminosity = mathf.Pow(num14, 0.7)
star.Radius = float32(math.Pow(float64(star.Mass), 0.4) * num9)
star.AcdiskRadius = 0
p := float32(num15) + 2
star.HabitableRadius = mathf.Pow(1.7, p) + 0.25*mathf.Min(1, star.OrbitScaler)
star.LightBalanceRadius = mathf.Pow(1.7, p)
star.OrbitScaler = mathf.Pow(1.35, p)
if star.OrbitScaler < 1 {
star.OrbitScaler = mathf.Lerp(star.OrbitScaler, 1, 0.6)
}
setStarAge(star, star.Age, rn, rt)
star.DysonRadius = star.OrbitScaler * 0.28
if star.DysonRadius*40000 < star.PhysicsRadius*1.5 {
star.DysonRadius = star.PhysicsRadius * 1.5 / 40000
}
star.UPosition = star.Position.Times(2400000)
star.Name = randomStarName(seed2, star, galaxy)
return star
}
func randNormal(averageValue float32, standardDeviation float32, r1 float64, r2 float64) float32 {
return averageValue + standardDeviation*mathf.Sqrt(-2.0*mathf.Log(float32(1.0-r1)))*mathf.Sin(float32(6.283185307179586*r2))
}
var (
orbitRadius = []float64{
0, 0.4, 0.7, 1, 1.4, 1.9, 2.5, 3.3, 4.3, 5.5, 6.9, 8.4, 10,
11.7, 13.5, 15.4, 17.5,
}
) | cmd/parsefile/stargen.go | 0.604983 | 0.447521 | stargen.go | starcoder |
package twistededwards
import (
"math/big"
"github.com/consensys/gnark/backend"
"github.com/consensys/gnark/frontend"
)
// Point point on a twisted Edwards curve in a Snark cs
type Point struct {
X, Y frontend.Variable
}
// MustBeOnCurve checks if a point is on the twisted Edwards curve
// ax^2 + y^2 = 1 + d*x^2*y^2
func (p *Point) MustBeOnCurve(cs *frontend.ConstraintSystem, curve EdCurve) {
one := big.NewInt(1)
l1 := cs.LinearExpression(cs.Term(p.X, &curve.A))
l2 := cs.LinearExpression(cs.Term(p.X, one))
axx := cs.Mul(l1, l2)
yy := cs.Mul(p.Y, p.Y)
lhs := cs.Add(axx, yy)
l1 = cs.LinearExpression(cs.Term(p.X, &curve.D))
l2 = cs.LinearExpression(cs.Term(p.X, one))
dxx := cs.Mul(l1, l2)
dxxyy := cs.Mul(dxx, yy)
rhs := cs.Add(dxxyy, one)
cs.AssertIsEqual(lhs, rhs)
}
// AddFixedPoint Adds two points, among which is one fixed point (the base), on a twisted edwards curve (eg jubjub)
// p1, base, ecurve are respectively: the point to add, a known base point, and the parameters of the twisted edwards curve
func (p *Point) AddFixedPoint(cs *frontend.ConstraintSystem, p1 *Point, x, y interface{}, curve EdCurve) *Point {
// https://eprint.iacr.org/2008/013.pdf
var dxy, bxa big.Int
bx := backend.FromInterface(x)
by := backend.FromInterface(y)
dxy.Mul(&bx, &by).Mul(&dxy, &curve.D)
bxa.Mul(&bx, &curve.A).Neg(&bxa) // -ax1
n1 := cs.LinearExpression(cs.Term(p1.X, &by), cs.Term(p1.Y, &bx)) // x1y2+x2y1
n2 := cs.LinearExpression(cs.Term(p1.Y, &by), cs.Term(p1.X, &bxa)) // y1y2-ax1x2
ld := cs.LinearExpression(cs.Term(p1.X, &dxy)) // dx1x2y2
_d := cs.Mul(ld, p1.Y) // dx1x2y2y1
d1 := cs.Add(1, _d) // 1+dx1x2y2y1
d2 := cs.Sub(1, _d) // 1-dx1x2y2y1
p.X = cs.Div(n1, d1)
p.Y = cs.Div(n2, d2)
return p
}
// AddGeneric Adds two points on a twisted edwards curve (eg jubjub)
// p1, p2, c are respectively: the point to add, a known base point, and the parameters of the twisted edwards curve
func (p *Point) AddGeneric(cs *frontend.ConstraintSystem, p1, p2 *Point, curve EdCurve) *Point {
// https://eprint.iacr.org/2008/013.pdf
res := Point{}
one := big.NewInt(1)
oneWire := cs.Constant(one)
beta := cs.Mul(p1.X, p2.Y)
gamma := cs.Mul(p1.Y, p2.X)
delta := cs.Mul(p1.Y, p2.Y)
epsilon := cs.Mul(p1.X, p2.X)
tau := cs.Mul(delta, epsilon)
num := cs.LinearExpression(
cs.Term(beta, one),
cs.Term(gamma, one),
)
den := cs.LinearExpression(
cs.Term(oneWire, one),
cs.Term(tau, &curve.D),
)
res.X = cs.Div(num, den)
var minusa big.Int
minusa.Neg(&curve.A).Mod(&minusa, &curve.Modulus)
num = cs.LinearExpression(
cs.Term(delta, one),
cs.Term(epsilon, &minusa),
)
var minusd big.Int
minusd.Neg(&curve.D).Mod(&minusd, &curve.Modulus)
den = cs.LinearExpression(
cs.Term(oneWire, one),
cs.Term(tau, &minusd),
)
res.Y = cs.Div(num, den)
p.X = res.X
p.Y = res.Y
return p
}
// Double doubles a points in SNARK coordinates
func (p *Point) Double(cs *frontend.ConstraintSystem, p1 *Point, curve EdCurve) *Point {
p.AddGeneric(cs, p1, p1, curve)
return p
}
// ScalarMulNonFixedBase computes the scalar multiplication of a point on a twisted Edwards curve
// p1: base point (as snark point)
// curve: parameters of the Edwards curve
// scal: scalar as a SNARK constraint
// Standard left to right double and add
func (p *Point) ScalarMulNonFixedBase(cs *frontend.ConstraintSystem, p1 *Point, scalar frontend.Variable, curve EdCurve) *Point {
// first unpack the scalar
b := cs.ToBinary(scalar, 256)
res := Point{
cs.Constant(0),
cs.Constant(1),
}
for i := len(b) - 1; i >= 0; i-- {
res.Double(cs, &res, curve)
tmp := Point{}
tmp.AddGeneric(cs, &res, p1, curve)
res.X = cs.Select(b[i], tmp.X, res.X)
res.Y = cs.Select(b[i], tmp.Y, res.Y)
}
p.X = res.X
p.Y = res.Y
return p
}
// ScalarMulFixedBase computes the scalar multiplication of a point on a twisted Edwards curve
// x, y: coordinates of the base point
// curve: parameters of the Edwards curve
// scal: scalar as a SNARK constraint
// Standard left to right double and add
func (p *Point) ScalarMulFixedBase(cs *frontend.ConstraintSystem, x, y interface{}, scalar frontend.Variable, curve EdCurve) *Point {
// first unpack the scalar
b := cs.ToBinary(scalar, 256)
res := Point{
cs.Constant(0),
cs.Constant(1),
}
for i := len(b) - 1; i >= 0; i-- {
res.Double(cs, &res, curve)
tmp := Point{}
tmp.AddFixedPoint(cs, &res, x, y, curve)
res.X = cs.Select(b[i], tmp.X, res.X)
res.Y = cs.Select(b[i], tmp.Y, res.Y)
}
p.X = res.X
p.Y = res.Y
return p
} | std/algebra/twistededwards/point.go | 0.82478 | 0.462534 | point.go | starcoder |
package point
import (
"adventofcode2021/pkg/strutil"
"fmt"
"strings"
)
//go:generate go run ../gen/main.go -pkgName=point -typeName=Point -output=rotate.go
type Point struct {
X, Y, Z int
}
func Parse(input string) Point {
s := strings.Split(input, ",")
if len(s) != 3 {
panic(fmt.Sprintf("Should 3 integers separated by comma. Got: %s", input))
}
v := strutil.MustAtoiSlice(s)
return Point{v[0], v[1], v[2]}
}
func (p1 Point) Substract(p2 Point) Vector {
return Vector{p1.X - p2.X, p1.Y - p2.Y, p1.Z - p2.Z}
}
func (p Point) Offset(v Vector) Point {
return Point{p.X + v.X, p.Y + v.Y, p.Z + v.Z}
}
func (p1 Point) Compare(p2 Point) int {
if p1.X < p2.X {
return -1
} else if p1.X > p2.X {
return 1
} else if p1.Y < p2.Y {
return -1
} else if p1.Y > p2.Y {
return 1
} else if p1.Z < p2.Z {
return -1
} else if p1.Z > p2.Z {
return 1
} else {
return 0
}
}
func (p Point) String() string {
return fmt.Sprintf("P(%d,%d,%d)", p.X, p.Y, p.Z)
}
type Vector struct {
X, Y, Z int
}
func (v Vector) String() string {
return fmt.Sprintf("V(%d,%d,%d)", v.X, v.Y, v.Z)
}
type Line struct {
P1, P2 Point
}
func calcDistances(ps []Point) map[Vector][]Line {
dist := make(map[Vector][]Line)
for i := 0; i < len(ps); i++ {
for j := i + 1; j < len(ps); j++ {
p1 := ps[i]
p2 := ps[j]
if p1.Compare(p2) > 0 {
p1, p2 = p2, p1
}
l := Line{p1, p2}
d := p2.Substract(p1)
dist[d] = append(dist[d], l)
}
}
return dist
}
func ParseScanners(input string) [][]Point {
var result [][]Point
scanner := -1
for _, line := range strutil.SplitLines(input) {
if strings.HasPrefix(line, "--- scanner") {
result = append(result, make([]Point, 0))
scanner++
continue
} else if line == "" {
continue
}
if scanner >= 0 {
result[scanner] = append(result[scanner], Parse(line))
}
}
return result
}
func FindOverlap(scan1 []Point, scan2 []Point) (
found bool,
over1 map[Point]struct{},
over2 map[Point]struct{},
rotation2 int,
offset2 Vector,
) {
for rotID := 0; rotID < 24; rotID++ {
// rotate scanner 2 points
rotScan2 := make([]Point, 0, len(scan2))
for _, p := range scan2 {
rotScan2 = append(rotScan2, p.Rotate(rotID))
}
dist1 := calcDistances(scan1)
dist2 := calcDistances(rotScan2)
uniqueVec := make(map[Vector]int)
for d := range dist1 {
uniqueVec[d]++
}
for d := range dist2 {
uniqueVec[d]++
}
count := 0
var matchingVec []Vector
for d, c := range uniqueVec {
if c > 1 {
count++
matchingVec = append(matchingVec, d)
}
}
// 12 points should have 66 (12 * 11 / 2) matching lines with the same manhattan distance
if count >= 66 {
found = true
rotation2 = rotID
// Determine the 12 matching points
over1 = make(map[Point]struct{})
for _, d := range matchingVec {
for _, l := range dist1[d] {
over1[l.P1] = struct{}{}
over1[l.P2] = struct{}{}
}
}
over2 = make(map[Point]struct{})
for _, d := range matchingVec {
for _, l := range dist2[d] {
over2[l.P1.Rotate(-rotID)] = struct{}{}
over2[l.P2.Rotate(-rotID)] = struct{}{}
}
}
// Determine scanner 2 offset vector
p1 := dist1[matchingVec[0]][0].P1
p2 := dist2[matchingVec[0]][0].P1
offset2 = p1.Substract(p2)
break
}
}
return
} | day19/point/point.go | 0.547222 | 0.436262 | point.go | starcoder |
package main
import (
"fmt"
"math"
"strings"
)
/**
--- Day 5: Binary Boarding ---
You board your plane only to discover a new problem: you dropped your boarding pass! You aren't sure which seat is yours, and all of the flight attendants are busy with the flood of people that suddenly made it through passport control.
You write a quick program to use your phone's camera to scan all of the nearby boarding passes (your puzzle input); perhaps you can find your seat through process of elimination.
Instead of zones or groups, this airline uses binary space partitioning to seat people. A seat might be specified like FBFBBFFRLR, where F means "front", B means "back", L means "left", and R means "right".
The first 7 characters will either be F or B; these specify exactly one of the 128 rows on the plane (numbered 0 through 127). Each letter tells you which half of a region the given seat is in. Start with the whole list of rows; the first letter indicates whether the seat is in the front (0 through 63) or the back (64 through 127). The next letter indicates which half of that region the seat is in, and so on until you're left with exactly one row.
For example, consider just the first seven characters of FBFBBFFRLR:
Start by considering the whole range, rows 0 through 127.
F means to take the lower half, keeping rows 0 through 63.
B means to take the upper half, keeping rows 32 through 63.
F means to take the lower half, keeping rows 32 through 47.
B means to take the upper half, keeping rows 40 through 47.
B keeps rows 44 through 47.
F keeps rows 44 through 45.
The final F keeps the lower of the two, row 44.
The last three characters will be either L or R; these specify exactly one of the 8 columns of seats on the plane (numbered 0 through 7). The same process as above proceeds again, this time with only three steps. L means to keep the lower half, while R means to keep the upper half.
For example, consider just the last 3 characters of FBFBBFFRLR:
Start by considering the whole range, columns 0 through 7.
R means to take the upper half, keeping columns 4 through 7.
L means to take the lower half, keeping columns 4 through 5.
The final R keeps the upper of the two, column 5.
So, decoding FBFBBFFRLR reveals that it is the seat at row 44, column 5.
Every seat also has a unique seat ID: multiply the row by 8, then add the column. In this example, the seat has ID 44 * 8 + 5 = 357.
Here are some other boarding passes:
BFFFBBFRRR: row 70, column 7, seat ID 567.
FFFBBBFRRR: row 14, column 7, seat ID 119.
BBFFBBFRLL: row 102, column 4, seat ID 820.
As a sanity check, look through your list of boarding passes. What is the findHighest seat ID on a boarding pass?
*/
func day5_part1() {
contents := getFilesContents("day05.input")
passes := strings.Split(contents, "\n")
rows := make([]int, 128)
for i := range rows {
rows[i] = i
}
cols := []int{0, 1, 2, 3, 4, 5, 6, 7}
highestID := 0.0
// demopass := "BBBFBBFRRR"
for _, pass := range passes {
foundrow, foundcol := findRow(pass, rows, cols)
id := foundrow*8 + foundcol
highestID = math.Max(float64(id), float64(highestID))
fmt.Println(foundrow, foundcol, foundrow*8+foundcol, highestID)
}
}
func findRow(str string, rows []int, cols []int) (int, int) {
for i := 0; i < len(str); i++ {
char := string(str[i])
half := int(math.Floor(float64(len(rows) / 2)))
halfw := int(math.Floor(float64(len(cols) / 2)))
if char == "B" { // Back
rows = rows[half:]
} else if char == "F" { // Front
rows = rows[:half]
} else if char == "R" { // Right
cols = cols[halfw:]
} else if char == "L" { // Left
cols = cols[:halfw]
} else {
continue
}
}
return rows[0], cols[0]
}
/**
--- Part Two ---
Ding! The "fasten seat belt" signs have turned on. Time to find your seat.
It's a completely full flight, so your seat should be the only missing boarding pass in your list. However, there's a catch: some of the seats at the very front and back of the plane don't exist on this aircraft, so they'll be missing from your list as well.
Your seat wasn't at the very front or back, though; the seats with IDs +1 and -1 from yours will be in your list.
What is the ID of your seat?
*/
func day5_part2() {
contents := getFilesContents("day05.input")
passes := strings.Split(contents, "\n")
rows := make([]int, 128)
for i := range rows {
rows[i] = i
}
cols := []int{0, 1, 2, 3, 4, 5, 6, 7}
highestID := 0.0
a := make([][]uint8, 8)
for i := range a {
a[i] = make([]uint8, 128)
}
for _, pass := range passes {
foundrow, foundcol := findRow(pass, rows, cols)
id := foundrow*8 + foundcol
a[foundcol][foundrow] = 1
highestID = math.Max(float64(id), float64(highestID))
// fmt.Println(foundrow, foundcol, foundrow*8+foundcol, highestID)
}
for _, b := range a {
fmt.Println(b)
}
for col, b := range a {
for row, v := range b {
if v == 0 && row > 6 && row < 119{
fmt.Println("row", row, "col", col, "is free", row*8+col)
}
}
}
} | day05.go | 0.697094 | 0.69643 | day05.go | starcoder |
package bta
import (
"go/types"
"log"
)
// Point represents a subject of control flow.
type Point interface {
Next() []Point
Defs() types.Object
Uses() []types.Object
CouldBeTrue(d Division) bool
}
// Division describes the known-ness of a set of variables.
type Division map[types.Object]bool
// Graph stores the control flow structure under examination.
type Graph struct {
point Point
defs types.Object
uses []types.Object
prev, next []*Graph
loopDeps, dataDeps []*Graph
}
// NewGraph creates a new graph given a starting Point.
func NewGraph(p Point) *Graph {
g := newGraph(map[Point]*Graph{}, p)
calculateDependencies(g)
return g
}
// Division calculates the pointwise division of the graph given an initial division.
func (p *Graph) Division(d Division) map[Point]Division {
res := map[Point]Division{p.point: d}
nodes := p.graph()
for _, p := range nodes {
dd := Division{}
for k, v := range d {
dd[k] = v
}
res[p.point] = dd
}
changed := true
update := func(p *Graph, v types.Object, x bool) {
y := res[p.point][v]
x = x && y
if y != x {
changed = true
}
res[p.point][v] = x
}
for changed {
changed = false
for _, p := range nodes {
for _, q := range p.dataDeps {
for k, v := range res[q.point] {
update(p, k, v)
}
}
for _, d := range p.uses {
update(p, p.defs, res[p.point][d])
}
for _, q := range p.loopDeps {
log.Println(p, p.defs, q.point)
update(p, p.defs, q.point.CouldBeTrue(res[q.point]))
}
}
}
return res
}
func newGraph(seen map[Point]*Graph, p Point) *Graph {
g := seen[p]
if g != nil {
return g
}
g = &Graph{point: p}
seen[p] = g
if d := p.Defs(); p != nil {
g.defs = d
}
if d := p.Uses(); p != nil {
g.uses = d
}
for _, q := range p.Next() {
g.link(newGraph(seen, q))
}
return g
}
func (p *Graph) link(q *Graph) {
p.next = append(p.next, q)
q.prev = append(q.prev, p)
}
func (p *Graph) graph() []*Graph {
seen := map[*Graph]bool{}
pp := transitiveClosure(seen, p, func(p *Graph) []*Graph {
return p.next
})
if !seen[p] {
pp = append(pp, p)
}
return pp
}
func (p *Graph) successors() []*Graph {
return transitiveClosure(map[*Graph]bool{}, p, func(p *Graph) []*Graph {
return p.next
})
}
func (p *Graph) precursors() []*Graph {
return transitiveClosure(map[*Graph]bool{}, p, func(p *Graph) []*Graph {
return p.prev
})
}
func transitiveClosure(seen map[*Graph]bool, p *Graph, f func(*Graph) []*Graph) (res []*Graph) {
var step func(p *Graph)
step = func(p *Graph) {
if seen[p] {
return
}
seen[p] = true
res = append(res, p)
for _, p := range f(p) {
step(p)
}
}
step(p)
return res
}
func calculateDependencies(p *Graph) {
for _, p := range p.graph() {
if p.uses == nil {
continue
}
for _, q := range p.precursors() {
if !p.dependsUpon(q) {
continue
}
if p != q {
p.dataDeps = append(p.dataDeps, q)
}
calculateDependencyLoops(p, q)
}
}
}
func calculateDependencyLoops(p, q *Graph) {
if !reachable(p, q) {
return
}
for _, r := range p.precursors() {
if len(r.next) <= 1 {
continue
}
if reachable(p, r) {
p.loopDeps = append(p.loopDeps, r)
}
}
if len(p.loopDeps) == 0 {
p.loopDeps = infiniteLoop
}
}
func (p *Graph) dependsUpon(q *Graph) bool {
for _, d := range p.uses {
if d == q.defs {
return true
}
}
return false
}
// whether it is possible to go from p to q
func reachable(p, q *Graph) bool {
for _, p := range p.successors() {
if p == q {
return true
}
}
return false
}
type infinitePoint struct{}
func (infinitePoint) Defs() types.Object { return nil }
func (infinitePoint) Uses() []types.Object { return nil }
func (infinitePoint) Next() []Point { return nil }
func (infinitePoint) CouldBeTrue(d Division) bool { return false }
var infiniteLoop = []*Graph{&Graph{point: infinitePoint{}}} | bta/bta.go | 0.662469 | 0.501404 | bta.go | starcoder |
// Task
// In your choice of programming language, write a function that finds all customers who share all of their accounts.
// Example
// The following example indicates which customers own which accounts. For instance, the customer with id 1 owns the account with id 10 and the account with id 11.
// Cust Account
// 1 10
// 1 11
// 2 13
// 3 11
// 4 14
// 3 10
// 4 13
// Customers 1 and 3 share all of their accounts, 10 and 11, so they are a match.
// Customers 2 and 4 share account 13 but customer 4 also owns account 14, which customer 2 doesn't. They are not a match.
// build a map of customers (int) to accounts they are associated with ([]int).
// to compute the result, determine which customers have similar account lists.
func SharedAccounts(custAcc []*CustAccMapping) []*AccountCustomer {
// map to track account-customer relations from input
accCust := map[int][]int{}
for _, custAccInstance := range custAcc {
// check if this account is in map
_, accExists := accCust[custAccInstance.Account]
if accExists {
// add to its customer list
accCust[custAccInstance.Account] = append(accCust, custAccInstance.Customer)
} else {
accCust[custAccInstance.Account] = []int{custAccInstance.Customer}
}
}
// determine which account numbers have the same list of customers
result := []AccountCustomer{}
// built a hash-map structure mapping customer lists to account numbers they are associated with
customerListToAccounts := map[hashOfCustList]*AccountCustomer{}
for accountNumber, customerList := range accCust {
// check if cust list hash exists
_, hashExists := customerListToAccounts[hashIntList(customerList)]
if hashExists {
// add to current AccCust object's account list
customerListToAccounts[hashIntList(customerList)].Accounts = append(customerListToAccounts[hashIntList(customerList)].Accounts, accountNumber)
} else {
// create new
customerListToAccounts[hashIntList(customerList)] = &AccountCustomer{Accounts: []int{accountNumber}, Customers: customerList}
}
}
}
func hashIntList(custList []int) string {
}
type AccountCustomer struct {
Accounts []int
Customers []int
}
type CustAccMapping struct {
Account int
Customer int
} | AccountCust.go | 0.537041 | 0.477006 | AccountCust.go | starcoder |
package godash
import (
"errors"
"reflect"
)
// FindBy returns the first element of the slice that the provided validator function returns true for.
// The supplied function must accept an interface{} parameter and return bool.
// If the validator function does not return true for any values in the slice, nil is returned.
func FindBy(slice interface{}, fn validator) (interface{}, error) {
sliceVal := reflect.ValueOf(slice)
if sliceVal.Type().Kind() != reflect.Slice {
return nil, errors.New("godash: invalid parameter type. FindBy func expects parameter 1 to be a slice")
}
for i := 0; i < sliceVal.Len(); i++ {
val := sliceVal.Index(i).Interface()
if match := fn(val); match == true {
return val, nil
}
}
return nil, nil
}
// FindLastBy returns the last element of the slice that the provided validator function returns true for.
// The supplied function must accept an interface{} parameter and return bool.
// If the validator function does not return true for any values in the slice, nil is returned.
func FindLastBy(slice interface{}, fn validator) (interface{}, error) {
sliceVal := reflect.ValueOf(slice)
if sliceVal.Type().Kind() != reflect.Slice {
return nil, errors.New("godash: invalid parameter type. FindBy func expects parameter 1 to be a slice")
}
for i := sliceVal.Len() - 1; i != -1; i-- {
val := sliceVal.Index(i).Interface()
if match := fn(val); match == true {
return val, nil
}
}
return nil, nil
}
// FindIndex returns the index of the first element in a slice that equals the provided value.
// If the value is not found in the slice, -1 is returned.
func FindIndex(slice interface{}, value interface{}) (int, error) {
sliceVal := reflect.ValueOf(slice)
if sliceVal.Type().Kind() != reflect.Slice {
return -1, errors.New("godash: invalid parameter type. FindIndex func expects parameter 1 to be a slice")
}
for i := 0; i < sliceVal.Len(); i++ {
if reflect.DeepEqual(sliceVal.Index(i).Interface(), value) {
return i, nil
}
}
return -1, nil
}
// FindIndexBy returns the index of the first element of a slice that the provided validator function returns true for.
// The supplied function must accept an interface{} parameter and return bool.
// If the validator function does not return true for any values in the slice, -1 is returned.
func FindIndexBy(slice interface{}, fn validator) (int, error) {
sliceVal := reflect.ValueOf(slice)
if sliceVal.Type().Kind() != reflect.Slice {
return -1, errors.New("godash: invalid parameter type. FindIndexBy func expects parameter 1 to be a slice")
}
for i := 0; i < sliceVal.Len(); i++ {
if match := fn(sliceVal.Index(i).Interface()); match == true {
return i, nil
}
}
return -1, nil
}
// FindLastIndex returns the index of the last element in a slice that equals the provided value.
// If the value is not found in the slice, -1 is returned.
func FindLastIndex(slice interface{}, value interface{}) (int, error) {
sliceVal := reflect.ValueOf(slice)
if sliceVal.Type().Kind() != reflect.Slice {
return -1, errors.New("godash: invalid parameter type. FindLastIndex func expects parameter 1 to be a slice")
}
for i := sliceVal.Len() - 1; i != -1; i-- {
if reflect.DeepEqual(sliceVal.Index(i).Interface(), value) {
return i, nil
}
}
return -1, nil
} | find.go | 0.805096 | 0.413655 | find.go | starcoder |
package pow
import (
"encoding/binary"
"math"
"github.com/DanielKrawisz/bmutil/hash"
)
// CalculateTarget calculates the target POW value. payloadLength includes the
// full length of the payload (inluding the width of the initial nonce field).
// ttl is the time difference (in seconds) between ExpiresTime and time.Now().
// Information about nonceTrials and extraBytes can be found at:
// https://bitmessage.org/wiki/Proof_of_work
func CalculateTarget(payloadLength, ttl uint64, data Data) Target {
// All these type conversions are needed for interoperability with Python
// which casts types back to int after performing division.
return Target(math.MaxUint64 / (data.NonceTrialsPerByte * (payloadLength + data.ExtraBytes +
uint64(float64(ttl)*(float64(payloadLength)+float64(data.ExtraBytes))/
math.Pow(2, 16)))))
}
// Check whether the given message and nonce satisfy the given pow target.
func Check(target Target, nonce Nonce, message []byte) bool {
hashData := make([]byte, 8+len(message))
copy(hashData[:8], nonce.Bytes())
copy(hashData[8:], message)
resultHash := hash.DoubleSha512(hashData)
powValue := binary.BigEndian.Uint64(resultHash[0:8])
return powValue <= uint64(target)
}
// DoSequential does the PoW sequentially and returns the nonce value.
func DoSequential(target Target, initialHash []byte) Nonce {
nonce := uint64(1)
nonceBytes := make([]byte, 8)
trialValue := uint64(math.MaxUint64)
for {
binary.BigEndian.PutUint64(nonceBytes, nonce)
resultHash := hash.DoubleSha512(append(nonceBytes, initialHash...))
trialValue = binary.BigEndian.Uint64(resultHash[:8])
if trialValue <= uint64(target) {
return Nonce(nonce)
}
nonce++
}
}
// DoParallel does the POW using parallelCount number of goroutines and returns
// the nonce value.
func DoParallel(target Target, initialHash []byte, parallelCount int) Nonce {
done := make(chan bool)
nonceValue := make(chan Nonce, 1)
for i := 0; i < parallelCount; i++ {
go func(j int) {
nonce := uint64(j) + 1
nonceBytes := make([]byte, 8)
trialValue := uint64(math.MaxUint64)
for {
select {
case <-done: // some other goroutine already finished
return
default:
binary.BigEndian.PutUint64(nonceBytes, nonce)
resultHash := hash.DoubleSha512(append(nonceBytes, initialHash...))
trialValue = binary.BigEndian.Uint64(resultHash[:8])
if trialValue <= uint64(target) {
nonceValue <- Nonce(nonce)
close(done)
}
nonce += uint64(parallelCount) // increment by parallelCount
}
}
}(i)
}
return <-nonceValue
} | pow/pow.go | 0.719482 | 0.496948 | pow.go | starcoder |
package objects
import (
"github.com/ArcCS/Nevermore/permissions"
"strings"
"sync"
)
type CharInventory struct {
ParentId int
Contents []*Character
sync.Mutex
Flags map[string]bool
}
// New CharInventory returns a new basic CharInventory structure
func NewCharInventory(roomID int, o ...*Character) *CharInventory {
i := &CharInventory{
ParentId: roomID,
Contents: make([]*Character, 0, len(o)),
}
for _, ob := range o {
i.Add(ob)
}
return i
}
// Add adds the specified object to the contents.
func (i *CharInventory) Add(o *Character) {
if len(i.Contents) == 0 {
Rooms[i.ParentId].FirstPerson()
}
i.Contents = append(i.Contents, o)
}
// Pass character as a pointer, compare and remove
func (i *CharInventory) Remove(o *Character) {
for c, p := range i.Contents {
if p == o {
copy(i.Contents[c:], i.Contents[c+1:])
i.Contents[len(i.Contents)-1] = nil
i.Contents = i.Contents[:len(i.Contents)-1]
break
}
}
if len(i.Contents) == 0 {
Rooms[i.ParentId].LastPerson()
}
if len(i.Contents) == 0 {
i.Contents = make([]*Character, 0, 10)
}
}
// Search the CharInventory to return a specific instance of something
func (i *CharInventory) SearchAll(alias string) *Character {
if i == nil {
return nil
}
for _, c := range i.Contents {
if strings.Contains(strings.ToLower(c.Name), strings.ToLower(alias)) {
return c
}
}
return nil
}
// Search the CharInventory to return a specific instance of something
func (i *CharInventory) Search(alias string, observer *Character) *Character {
if i == nil {
return nil
}
for _, c := range i.Contents {
if c.Flags["invisible"] == false ||
(c.Flags["invisible"] == true &&
observer.Flags["detect_invisible"] &&
!c.Permission.HasAnyFlags(permissions.Builder, permissions.Dungeonmaster, permissions.Gamemaster)) ||
observer.Permission.HasAnyFlags(permissions.Builder, permissions.Dungeonmaster, permissions.Gamemaster){
if strings.Contains(strings.ToLower(c.Name), strings.ToLower(alias)) {
return c
}
}
}
return nil
}
// Search the CharInventory to return a specific instance of something
func (i *CharInventory) MobSearch(alias string, observer *Mob) *Character {
if i == nil {
return nil
}
for _, c := range i.Contents {
if c.Flags["invisible"] == false ||
(c.Flags["invisible"] == true &&
observer.Flags["detect_invisible"] &&
!c.Permission.HasAnyFlags(permissions.Builder, permissions.Dungeonmaster, permissions.Gamemaster)){
if strings.Contains(strings.ToLower(c.Name), strings.ToLower(alias)) {
return c
}
}
}
return nil
}
// List the items in this CharInventory
func (i *CharInventory) List(observer *Character) []string {
// Determine how many items we need if this is an all request.. and we have only one entry. Return nothing
items := make([]string, 0)
for _, c := range i.Contents {
// List all
if strings.ToLower(c.Name) != strings.ToLower(observer.Name) {
if c.Flags["hidden"] == false ||
(c.Flags["hidden"] == true &&
observer.Permission.HasAnyFlags(permissions.Builder, permissions.Dungeonmaster, permissions.Gamemaster)){
if c.Flags["invisible"] == false ||
(c.Flags["invisible"] == true &&
observer.Flags["detect_invisible"] &&
!c.Permission.HasAnyFlags(permissions.Builder, permissions.Dungeonmaster, permissions.Gamemaster)) ||
observer.Permission.HasAnyFlags(permissions.Builder, permissions.Dungeonmaster, permissions.Gamemaster){
items = append(items, c.Name)
}
}
}
}
return items
}
// ListChars the items in this CharInventory
func (i *CharInventory) ListChars(observer *Character) []*Character {
// Determine how many items we need if this is an all request.. and we have only one entry. Return nothing
items := make([]*Character, 0)
for _, c := range i.Contents {
// List all
if strings.ToLower(c.Name) != strings.ToLower(observer.Name) {
if c.Flags["hidden"] == false ||
(c.Flags["hidden"] == true &&
observer.Permission.HasAnyFlags(permissions.Builder, permissions.Dungeonmaster, permissions.Gamemaster)){
if c.Flags["invisible"] == false ||
(c.Flags["invisible"] == true &&
observer.Flags["detect_invisible"] &&
!c.Permission.HasAnyFlags(permissions.Builder, permissions.Dungeonmaster, permissions.Gamemaster)) ||
observer.Permission.HasAnyFlags(permissions.Builder, permissions.Dungeonmaster, permissions.Gamemaster){
items = append(items, c)
}
}
}
}
return items
}
// MobList lists characters for a mobs point of view
func (i *CharInventory) MobList(observer *Mob) []string {
// Determine how many items we need if this is an all request.. and we have only one entry. Return nothing
items := make([]string, 0)
// List all
for _, c := range i.Contents {
if c.Flags["hidden"] == false {
if c.Flags["invisible"] == false ||
(c.Flags["invisible"] == true &&
observer.Flags["detect_invisible"]){
items = append(items, c.Name)
}
}
}
return items
}
// Free recursively calls Free on all of it's content when the CharInventory
// attribute is freed.
func (i *CharInventory) Free() {
if i == nil {
return
}
for x, t := range i.Contents {
i.Contents[x] = nil
t.Free()
}
} | objects/char_inventory.go | 0.559771 | 0.421492 | char_inventory.go | starcoder |
package util
// Histogram represents an approximate distribution of some variable.
type Histogram interface {
// Returns an approximation of the given percentile of the distribution.
// Note: the argument passed to Percentile() is a number between
// 0 and 1. For example 0.5 corresponds to the median and 0.9 to the
// 90th percentile.
// If the histogram is empty, Percentile() returns 0.0.
Percentile(percentile float64) float64
// Add a sample with a given value and weight.
AddSample(value float64, weight float64)
// Remove a sample with a given value and weight. Note that the total
// weight of samples with a given value cannot be negative.
SubtractSample(value float64, weight float64)
// Returns true if the histogram is empty.
IsEmpty() bool
}
// NewHistogram returns a new Histogram instance using given options.
func NewHistogram(options HistogramOptions) Histogram {
return &histogram{
&options, make([]float64, options.NumBuckets()), 0.0,
options.NumBuckets() - 1, 0}
}
// Simple bucket-based implementation of the Histogram interface. Each bucket
// holds the total weight of samples that belong to it.
// Percentile() returns the middle of the correspodning bucket.
// Resolution (bucket boundaries) of the histogram depends on the options.
// There's no interpolation within buckets (i.e. one sample falls to exactly one
// bucket).
// A bucket is considered empty if its weight is smaller than options.Epsilon().
type histogram struct {
// Bucketing scheme.
options *HistogramOptions
// Cumulative weight of samples in each bucket.
bucketWeight []float64
// Total cumulative weight of samples in all buckets.
totalWeight float64
// Index of the first non-empty bucket if there's any. Otherwise index
// of the last bucket.
minBucket int
// Index of the last non-empty bucket if there's any. Otherwise 0.
maxBucket int
}
func (h *histogram) AddSample(value float64, weight float64) {
if weight < 0.0 {
panic("sample weight must be non-negative")
}
bucket := (*h.options).FindBucket(value)
h.bucketWeight[bucket] += weight
h.totalWeight += weight
if bucket < h.minBucket {
h.minBucket = bucket
}
if bucket > h.maxBucket {
h.maxBucket = bucket
}
}
func (h *histogram) SubtractSample(value float64, weight float64) {
if weight < 0.0 {
panic("sample weight must be non-negative")
}
bucket := (*h.options).FindBucket(value)
epsilon := (*h.options).Epsilon()
if weight > h.bucketWeight[bucket]-epsilon {
weight = h.bucketWeight[bucket]
}
h.totalWeight -= weight
h.bucketWeight[bucket] -= weight
lastBucket := (*h.options).NumBuckets() - 1
for h.bucketWeight[h.minBucket] < epsilon && h.minBucket < lastBucket {
h.minBucket++
}
for h.bucketWeight[h.maxBucket] < epsilon && h.maxBucket > 0 {
h.maxBucket--
}
}
func (h *histogram) Percentile(percentile float64) float64 {
if h.IsEmpty() {
return 0.0
}
partialSum := 0.0
threshold := percentile * h.totalWeight
bucket := h.minBucket
for ; bucket < h.maxBucket; bucket++ {
partialSum += h.bucketWeight[bucket]
if partialSum >= threshold {
break
}
}
bucketStart := (*h.options).GetBucketStart(bucket)
if bucket < (*h.options).NumBuckets()-1 {
// Return the middle point between the bucket boundaries.
bucketEnd := (*h.options).GetBucketStart(bucket + 1)
return (bucketStart + bucketEnd) / 2.0
}
// Return the start of the last bucket (note that the last bucket
// doesn't have an upper bound).
return bucketStart
}
func (h *histogram) IsEmpty() bool {
return h.bucketWeight[h.minBucket] < (*h.options).Epsilon()
} | vertical-pod-autoscaler/recommender/util/histogram.go | 0.920397 | 0.811751 | histogram.go | starcoder |
package starkex
import (
"bytes"
"crypto/hmac"
"hash"
"math/big"
)
// rfc6979 implemented in Golang.
// copy from https://raw.githubusercontent.com/codahale/rfc6979/master/rfc6979.go
/*
Package rfc6979 is an implementation of RFC 6979's deterministic DSA.
Such signatures are compatible with standard Digital Signature Algorithm
(DSA) and Elliptic Curve Digital Signature Algorithm (ECDSA) digital
signatures and can be processed with unmodified verifiers, which need not be
aware of the procedure described therein. Deterministic signatures retain
the cryptographic security features associated with digital signatures but
can be more easily implemented in various environments, since they do not
need access to a source of high-quality randomness.
(https://tools.ietf.org/html/rfc6979)
Provides functions similar to crypto/dsa and crypto/ecdsa.
*/
// mac returns an HMAC of the given key and message.
func mac(alg func() hash.Hash, k, m, buf []byte) []byte {
h := hmac.New(alg, k)
h.Write(m)
return h.Sum(buf[:0])
}
// https://tools.ietf.org/html/rfc6979#section-2.3.2
func bits2int(in []byte, qlen int) *big.Int {
vlen := len(in) * 8
v := new(big.Int).SetBytes(in)
if vlen > qlen {
v = new(big.Int).Rsh(v, uint(vlen-qlen))
}
return v
}
// https://tools.ietf.org/html/rfc6979#section-2.3.3
func int2octets(v *big.Int, rolen int) []byte {
out := v.Bytes()
// pad with zeros if it's too short
if len(out) < rolen {
out2 := make([]byte, rolen)
copy(out2[rolen-len(out):], out)
return out2
}
// drop most significant bytes if it's too long
if len(out) > rolen {
out2 := make([]byte, rolen)
copy(out2, out[len(out)-rolen:])
return out2
}
return out
}
// https://tools.ietf.org/html/rfc6979#section-2.3.4
func bits2octets(in []byte, q *big.Int, qlen, rolen int) []byte {
z1 := bits2int(in, qlen)
z2 := new(big.Int).Sub(z1, q)
if z2.Sign() < 0 {
return int2octets(z1, rolen)
}
return int2octets(z2, rolen)
}
// https://tools.ietf.org/html/rfc6979#section-3.2
func generateSecret(q, x *big.Int, alg func() hash.Hash, hash []byte, extraEntropy []byte) *big.Int {
qlen := q.BitLen()
holen := alg().Size()
rolen := (qlen + 7) >> 3
bx := append(int2octets(x, rolen), bits2octets(hash, q, qlen, rolen)...)
// extra_entropy - extra added data in binary form as per section-3.6 of rfc6979
if len(extraEntropy) > 0 {
bx = append(bx, extraEntropy...)
}
// Step B
v := bytes.Repeat([]byte{0x01}, holen)
// Step C
k := bytes.Repeat([]byte{0x00}, holen)
// Step D
k = mac(alg, k, append(append(v, 0x00), bx...), k)
// Step E
v = mac(alg, k, v, v)
// Step F
k = mac(alg, k, append(append(v, 0x01), bx...), k)
// Step G
v = mac(alg, k, v, v)
// Step H
for {
// Step H1
var t []byte
// Step H2
for len(t) < qlen/8 {
v = mac(alg, k, v, v)
t = append(t, v...)
}
// Step H3
secret := bits2int(t, qlen)
if secret.Cmp(one) >= 0 && secret.Cmp(q) < 0 {
return secret
}
k = mac(alg, k, append(v, 0x00), k)
v = mac(alg, k, v, v)
}
} | math_rfc6979.go | 0.672869 | 0.507629 | math_rfc6979.go | starcoder |
Package dataplane implements packet send/receive functions
*/
package dataplane
import (
"github.com/opennetworkinglab/testvectors-runner/pkg/logger"
pm "github.com/stratum/testvectors/proto/portmap"
)
var log = logger.NewLogger()
// Match is used by verify
type Match uint8
// Match values for verify
const (
Exact = Match(0x1)
In = Match(0x2)
)
// dataPlane interface implements packet send/receive/verify functions
type dataPlane interface {
// start packet capturing
capture() bool
// send packets to a specific port
send(pkts [][]byte, port uint32) bool
// verify packets captured on ports
verify(pkts [][]byte, ports []uint32) bool
// stop packet capturing
stop() bool
}
var dp dataPlane
// CreateDataPlane takes the dataplane mode, packet match type and portmap file name as arguments
// and creates one dataplane instance for packet sending/receiving/verification.
func CreateDataPlane(mode string, matchType string, portmap *pm.PortMap) {
var match Match
switch matchType {
case "exact":
match = Exact
case "in":
match = In
default:
log.Fatalf("Unknown data plane match type: %s", matchType)
}
switch mode {
case "direct":
log.Infof("Creating direct data plane with match type: %s and port map: %s\n", matchType, portmap)
dp = createDirectDataPlane(portmap, match)
case "loopback":
log.Infof("Creating loopback data plane with match type: %s and port map: %s\n", matchType, portmap)
dp = createLoopbackDataPlane(portmap, match)
default:
log.Fatalf("Unknown data plane mode: %s", mode)
}
}
//getPortMapEntryByPortNumber looks up given portmap and returns the first entry that has the same port number as specified.
//If none of the entries match it returns nil
func getPortMapEntryByPortNumber(portmap *pm.PortMap, portNumber uint32) *pm.Entry {
if dp == nil {
log.Error("data plane does not exist")
return nil
}
for _, entry := range portmap.GetEntries() {
if entry.GetPortNumber() == portNumber {
return entry
}
}
return nil
}
//ProcessTrafficStimulus sends packets to specific ports
func ProcessTrafficStimulus(pkts [][]byte, port uint32) bool {
log.Debug("In ProcessTrafficStimulus")
if dp == nil {
log.Error("data plane does not exist")
return false
}
return dp.send(pkts, port)
}
//ProcessTrafficExpectation verifies that packets arrived at specific ports
func ProcessTrafficExpectation(pkts [][]byte, ports []uint32) bool {
log.Debug("In ProcessTrafficExpectation")
if dp == nil {
log.Error("data plane does not exist")
return false
}
return dp.verify(pkts, ports)
}
//Capture starts packet capturing
func Capture() bool {
log.Debug("In Capture")
if dp == nil {
log.Error("data plane does not exist")
return false
}
return dp.capture()
}
//Stop stops packet capturing
func Stop() bool {
log.Debug("In Stop")
if dp == nil {
log.Error("data plane does not exist")
return false
}
return dp.stop()
} | pkg/framework/dataplane/dataplane_oper.go | 0.694303 | 0.490968 | dataplane_oper.go | starcoder |
package openapi
import (
"encoding/json"
"fmt"
"net/url"
"strings"
"time"
)
// Optional parameters for the method 'FetchWorkflowStatistics'
type FetchWorkflowStatisticsParams struct {
// Only calculate statistics since this many minutes in the past. The default 15 minutes. This is helpful for displaying statistics for the last 15 minutes, 240 minutes (4 hours), and 480 minutes (8 hours) to see trends.
Minutes *int `json:"Minutes,omitempty"`
// Only calculate statistics from this date and time and later, specified in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) format.
StartDate *time.Time `json:"StartDate,omitempty"`
// Only calculate statistics from this date and time and earlier, specified in GMT as an [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) date-time.
EndDate *time.Time `json:"EndDate,omitempty"`
// Only calculate real-time statistics on this TaskChannel. Can be the TaskChannel's SID or its `unique_name`, such as `voice`, `sms`, or `default`.
TaskChannel *string `json:"TaskChannel,omitempty"`
// A comma separated list of values that describes the thresholds, in seconds, to calculate statistics on. For each threshold specified, the number of Tasks canceled and reservations accepted above and below the specified thresholds in seconds are computed. For example, `5,30` would show splits of Tasks that were canceled or accepted before and after 5 seconds and before and after 30 seconds. This can be used to show short abandoned Tasks or Tasks that failed to meet an SLA.
SplitByWaitTime *string `json:"SplitByWaitTime,omitempty"`
}
func (params *FetchWorkflowStatisticsParams) SetMinutes(Minutes int) *FetchWorkflowStatisticsParams {
params.Minutes = &Minutes
return params
}
func (params *FetchWorkflowStatisticsParams) SetStartDate(StartDate time.Time) *FetchWorkflowStatisticsParams {
params.StartDate = &StartDate
return params
}
func (params *FetchWorkflowStatisticsParams) SetEndDate(EndDate time.Time) *FetchWorkflowStatisticsParams {
params.EndDate = &EndDate
return params
}
func (params *FetchWorkflowStatisticsParams) SetTaskChannel(TaskChannel string) *FetchWorkflowStatisticsParams {
params.TaskChannel = &TaskChannel
return params
}
func (params *FetchWorkflowStatisticsParams) SetSplitByWaitTime(SplitByWaitTime string) *FetchWorkflowStatisticsParams {
params.SplitByWaitTime = &SplitByWaitTime
return params
}
func (c *ApiService) FetchWorkflowStatistics(WorkspaceSid string, WorkflowSid string, params *FetchWorkflowStatisticsParams) (*TaskrouterV1WorkflowStatistics, error) {
path := "/v1/Workspaces/{WorkspaceSid}/Workflows/{WorkflowSid}/Statistics"
path = strings.Replace(path, "{"+"WorkspaceSid"+"}", WorkspaceSid, -1)
path = strings.Replace(path, "{"+"WorkflowSid"+"}", WorkflowSid, -1)
data := url.Values{}
headers := make(map[string]interface{})
if params != nil && params.Minutes != nil {
data.Set("Minutes", fmt.Sprint(*params.Minutes))
}
if params != nil && params.StartDate != nil {
data.Set("StartDate", fmt.Sprint((*params.StartDate).Format(time.RFC3339)))
}
if params != nil && params.EndDate != nil {
data.Set("EndDate", fmt.Sprint((*params.EndDate).Format(time.RFC3339)))
}
if params != nil && params.TaskChannel != nil {
data.Set("TaskChannel", *params.TaskChannel)
}
if params != nil && params.SplitByWaitTime != nil {
data.Set("SplitByWaitTime", *params.SplitByWaitTime)
}
resp, err := c.requestHandler.Get(c.baseURL+path, data, headers)
if err != nil {
return nil, err
}
defer resp.Body.Close()
ps := &TaskrouterV1WorkflowStatistics{}
if err := json.NewDecoder(resp.Body).Decode(ps); err != nil {
return nil, err
}
return ps, err
} | rest/taskrouter/v1/workspaces_workflows_statistics.go | 0.79653 | 0.411939 | workspaces_workflows_statistics.go | starcoder |
package iso20022
// Chain of parties involved in the settlement of a transaction, including receipts and deliveries, book transfers, treasury deals, or other activities, resulting in the movement of a security or amount of money from one account to another.
type ReceivingPartiesAndAccount14 struct {
// Party that acts on behalf of the buyer of securities when the buyer does not have a direct relationship with the receiving agent.
ReceiversCustodianDetails *PartyIdentificationAndAccount124 `xml:"RcvrsCtdnDtls,omitempty"`
// Party that the receiver's custodian uses to effect the receipt of a security, when the receiver's custodian does not have a direct relationship with the receiving agent.
ReceiversIntermediary1Details *PartyIdentificationAndAccount124 `xml:"RcvrsIntrmy1Dtls,omitempty"`
// Party that interacts with the receiver’s intermediary.
ReceiversIntermediary2Details *PartyIdentificationAndAccount124 `xml:"RcvrsIntrmy2Dtls,omitempty"`
// Party that receives securities from the delivering agent via the place of settlement, for example, securities central depository.
ReceivingAgentDetails *PartyIdentificationAndAccount123 `xml:"RcvgAgtDtls"`
// Identifies the securities settlement system to be used.
SecuritiesSettlementSystem *Max35Text `xml:"SctiesSttlmSys,omitempty"`
// Place where settlement of the securities takes place.
PlaceOfSettlementDetails *PartyIdentification97 `xml:"PlcOfSttlmDtls,omitempty"`
}
func (r *ReceivingPartiesAndAccount14) AddReceiversCustodianDetails() *PartyIdentificationAndAccount124 {
r.ReceiversCustodianDetails = new(PartyIdentificationAndAccount124)
return r.ReceiversCustodianDetails
}
func (r *ReceivingPartiesAndAccount14) AddReceiversIntermediary1Details() *PartyIdentificationAndAccount124 {
r.ReceiversIntermediary1Details = new(PartyIdentificationAndAccount124)
return r.ReceiversIntermediary1Details
}
func (r *ReceivingPartiesAndAccount14) AddReceiversIntermediary2Details() *PartyIdentificationAndAccount124 {
r.ReceiversIntermediary2Details = new(PartyIdentificationAndAccount124)
return r.ReceiversIntermediary2Details
}
func (r *ReceivingPartiesAndAccount14) AddReceivingAgentDetails() *PartyIdentificationAndAccount123 {
r.ReceivingAgentDetails = new(PartyIdentificationAndAccount123)
return r.ReceivingAgentDetails
}
func (r *ReceivingPartiesAndAccount14) SetSecuritiesSettlementSystem(value string) {
r.SecuritiesSettlementSystem = (*Max35Text)(&value)
}
func (r *ReceivingPartiesAndAccount14) AddPlaceOfSettlementDetails() *PartyIdentification97 {
r.PlaceOfSettlementDetails = new(PartyIdentification97)
return r.PlaceOfSettlementDetails
} | ReceivingPartiesAndAccount14.go | 0.687 | 0.427337 | ReceivingPartiesAndAccount14.go | starcoder |
package yasup
import (
crypto "crypto/rand"
"math/big"
"math/rand"
)
var zeroValueUint64 uint64
//Uint64Insert will append elem at the position i. Might return ErrIndexOutOfBounds.
func Uint64Insert(sl *[]uint64, elem uint64, i int) error {
if i < 0 || i > len(*sl) {
return ErrIndexOutOfBounds
}
*sl = append(*sl, elem)
copy((*sl)[i+1:], (*sl)[i:])
(*sl)[i] = elem
return nil
}
//Uint64Delete delete the element at the position i. Might return ErrIndexOutOfBounds.
func Uint64Delete(sl *[]uint64, i int) error {
if i < 0 || i >= len(*sl) {
return ErrIndexOutOfBounds
}
*sl = append((*sl)[:i], (*sl)[i+1:]...)
return nil
}
//Uint64Contains will return true if elem is present in the slice and false otherwise.
func Uint64Contains(sl []uint64, elem uint64) bool {
for i := range sl {
if sl[i] == elem {
return true
}
}
return false
}
//Uint64Index returns the index of the first instance of elem, or -1 if elem is not present.
func Uint64Index(sl []uint64, elem uint64) int {
for i := range sl {
if sl[i] == elem {
return i
}
}
return -1
}
//Uint64LastIndex returns the index of the last instance of elem in the slice, or -1 if elem is not present.
func Uint64LastIndex(sl []uint64, elem uint64) int {
for i := len(sl) - 1; i >= 0; i-- {
if sl[i] == elem {
return i
}
}
return -1
}
//Uint64Count will return an int representing the amount of times that elem is present in the slice.
func Uint64Count(sl []uint64, elem uint64) int {
var n int
for i := range sl {
if sl[i] == elem {
n++
}
}
return n
}
//Uint64Push is equivalent to Uint64Insert with index len(*sl).
func Uint64Push(sl *[]uint64, elem uint64) {
Uint64Insert(sl, elem, len(*sl))
}
//Uint64FrontPush is equivalent to Uint64Insert with index 0.
func Uint64FrontPush(sl *[]uint64, elem uint64) {
Uint64Insert(sl, elem, 0)
}
//Uint64Pop is equivalent to getting and removing the last element of the slice. Might return ErrEmptySlice.
func Uint64Pop(sl *[]uint64) (uint64, error) {
if len(*sl) == 0 {
return zeroValueUint64, ErrEmptySlice
}
last := len(*sl) - 1
ret := (*sl)[last]
Uint64Delete(sl, last)
return ret, nil
}
//Uint64Pop is equivalent to getting and removing the first element of the slice. Might return ErrEmptySlice.
func Uint64FrontPop(sl *[]uint64) (uint64, error) {
if len(*sl) == 0 {
return zeroValueUint64, ErrEmptySlice
}
ret := (*sl)[0]
Uint64Delete(sl, 0)
return ret, nil
}
//Uint64Replace modifies the slice with the first n non-overlapping instances of old replaced by new. If n equals -1, there is no limit on the number of replacements.
func Uint64Replace(sl []uint64, old, new uint64, n int) (replacements int) {
left := n
for i := range sl {
if left == 0 {
break // no replacements left
}
if sl[i] == old {
sl[i] = new
left--
}
}
return n - left
}
//Uint64ReplaceAll is equivalent to Uint64Replace with n = -1.
func Uint64ReplaceAll(sl []uint64, old, new uint64) (replacements int) {
return Uint64Replace(sl, old, new, -1)
}
//Uint64Equals compares two uint64 slices. Returns true if their elements are equal.
func Uint64Equals(a, b []uint64) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
//Uint64FastShuffle will randomly swap the uint64 elements of a slice using math/rand (fast but not cryptographycally secure).
func Uint64FastShuffle(sp []uint64) {
rand.Shuffle(len(sp), func(i, j int) {
sp[i], sp[j] = sp[j], sp[i]
})
}
//Uint64SecureShuffle will randomly swap the uint64 elements of a slice using crypto/rand (resource intensive but cryptographically secure).
func Uint64SecureShuffle(sp []uint64) error {
var i int64
size := int64(len(sp)) - 1
for i = 0; i < size+1; i++ {
bigRandI, err := crypto.Int(crypto.Reader, big.NewInt(size))
if err != nil {
return err
}
randI := bigRandI.Int64()
sp[size-i], sp[randI] = sp[randI], sp[size-i]
}
return nil
} | uint64Slices.go | 0.650245 | 0.416025 | uint64Slices.go | starcoder |
package storetest
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/uni-x/mattermost-server/model"
"github.com/uni-x/mattermost-server/store"
)
func TestRoleStore(t *testing.T, ss store.Store) {
t.Run("Save", func(t *testing.T) { testRoleStoreSave(t, ss) })
t.Run("Get", func(t *testing.T) { testRoleStoreGet(t, ss) })
t.Run("GetByName", func(t *testing.T) { testRoleStoreGetByName(t, ss) })
t.Run("GetNames", func(t *testing.T) { testRoleStoreGetByNames(t, ss) })
t.Run("Delete", func(t *testing.T) { testRoleStoreDelete(t, ss) })
t.Run("PermanentDeleteAll", func(t *testing.T) { testRoleStorePermanentDeleteAll(t, ss) })
}
func testRoleStoreSave(t *testing.T, ss store.Store) {
// Save a new role.
r1 := &model.Role{
Name: model.NewId(),
DisplayName: model.NewId(),
Description: model.NewId(),
Permissions: []string{
"invite_user",
"create_public_channel",
"add_user_to_team",
},
SchemeManaged: false,
}
res1 := <-ss.Role().Save(r1)
assert.Nil(t, res1.Err)
d1 := res1.Data.(*model.Role)
assert.Len(t, d1.Id, 26)
assert.Equal(t, r1.Name, d1.Name)
assert.Equal(t, r1.DisplayName, d1.DisplayName)
assert.Equal(t, r1.Description, d1.Description)
assert.Equal(t, r1.Permissions, d1.Permissions)
assert.Equal(t, r1.SchemeManaged, d1.SchemeManaged)
// Change the role permissions and update.
d1.Permissions = []string{
"invite_user",
"add_user_to_team",
"delete_public_channel",
}
res2 := <-ss.Role().Save(d1)
assert.Nil(t, res2.Err)
d2 := res2.Data.(*model.Role)
assert.Len(t, d2.Id, 26)
assert.Equal(t, r1.Name, d2.Name)
assert.Equal(t, r1.DisplayName, d2.DisplayName)
assert.Equal(t, r1.Description, d2.Description)
assert.Equal(t, d1.Permissions, d2.Permissions)
assert.Equal(t, r1.SchemeManaged, d2.SchemeManaged)
// Try saving one with an invalid ID set.
r3 := &model.Role{
Id: model.NewId(),
Name: model.NewId(),
DisplayName: model.NewId(),
Description: model.NewId(),
Permissions: []string{
"invite_user",
"create_public_channel",
"add_user_to_team",
},
SchemeManaged: false,
}
res3 := <-ss.Role().Save(r3)
assert.NotNil(t, res3.Err)
// Try saving one with a duplicate "name" field.
r4 := &model.Role{
Name: r1.Name,
DisplayName: model.NewId(),
Description: model.NewId(),
Permissions: []string{
"invite_user",
"create_public_channel",
"add_user_to_team",
},
SchemeManaged: false,
}
res4 := <-ss.Role().Save(r4)
assert.NotNil(t, res4.Err)
}
func testRoleStoreGet(t *testing.T, ss store.Store) {
// Save a role to test with.
r1 := &model.Role{
Name: model.NewId(),
DisplayName: model.NewId(),
Description: model.NewId(),
Permissions: []string{
"invite_user",
"create_public_channel",
"add_user_to_team",
},
SchemeManaged: false,
}
res1 := <-ss.Role().Save(r1)
assert.Nil(t, res1.Err)
d1 := res1.Data.(*model.Role)
assert.Len(t, d1.Id, 26)
// Get a valid role
res2 := <-ss.Role().Get(d1.Id)
assert.Nil(t, res2.Err)
d2 := res1.Data.(*model.Role)
assert.Equal(t, d1.Id, d2.Id)
assert.Equal(t, r1.Name, d2.Name)
assert.Equal(t, r1.DisplayName, d2.DisplayName)
assert.Equal(t, r1.Description, d2.Description)
assert.Equal(t, r1.Permissions, d2.Permissions)
assert.Equal(t, r1.SchemeManaged, d2.SchemeManaged)
// Get an invalid role
res3 := <-ss.Role().Get(model.NewId())
assert.NotNil(t, res3.Err)
}
func testRoleStoreGetByName(t *testing.T, ss store.Store) {
// Save a role to test with.
r1 := &model.Role{
Name: model.NewId(),
DisplayName: model.NewId(),
Description: model.NewId(),
Permissions: []string{
"invite_user",
"create_public_channel",
"add_user_to_team",
},
SchemeManaged: false,
}
res1 := <-ss.Role().Save(r1)
assert.Nil(t, res1.Err)
d1 := res1.Data.(*model.Role)
assert.Len(t, d1.Id, 26)
// Get a valid role
res2 := <-ss.Role().GetByName(d1.Name)
assert.Nil(t, res2.Err)
d2 := res1.Data.(*model.Role)
assert.Equal(t, d1.Id, d2.Id)
assert.Equal(t, r1.Name, d2.Name)
assert.Equal(t, r1.DisplayName, d2.DisplayName)
assert.Equal(t, r1.Description, d2.Description)
assert.Equal(t, r1.Permissions, d2.Permissions)
assert.Equal(t, r1.SchemeManaged, d2.SchemeManaged)
// Get an invalid role
res3 := <-ss.Role().GetByName(model.NewId())
assert.NotNil(t, res3.Err)
}
func testRoleStoreGetByNames(t *testing.T, ss store.Store) {
// Save some roles to test with.
r1 := &model.Role{
Name: model.NewId(),
DisplayName: model.NewId(),
Description: model.NewId(),
Permissions: []string{
"invite_user",
"create_public_channel",
"add_user_to_team",
},
SchemeManaged: false,
}
r2 := &model.Role{
Name: model.NewId(),
DisplayName: model.NewId(),
Description: model.NewId(),
Permissions: []string{
"read_channel",
"create_public_channel",
"add_user_to_team",
},
SchemeManaged: false,
}
r3 := &model.Role{
Name: model.NewId(),
DisplayName: model.NewId(),
Description: model.NewId(),
Permissions: []string{
"invite_user",
"delete_private_channel",
"add_user_to_team",
},
SchemeManaged: false,
}
res1 := <-ss.Role().Save(r1)
assert.Nil(t, res1.Err)
d1 := res1.Data.(*model.Role)
assert.Len(t, d1.Id, 26)
res2 := <-ss.Role().Save(r2)
assert.Nil(t, res2.Err)
d2 := res2.Data.(*model.Role)
assert.Len(t, d2.Id, 26)
res3 := <-ss.Role().Save(r3)
assert.Nil(t, res3.Err)
d3 := res3.Data.(*model.Role)
assert.Len(t, d3.Id, 26)
// Get two valid roles.
n4 := []string{r1.Name, r2.Name}
res4 := <-ss.Role().GetByNames(n4)
assert.Nil(t, res4.Err)
roles4 := res4.Data.([]*model.Role)
assert.Len(t, roles4, 2)
assert.Contains(t, roles4, d1)
assert.Contains(t, roles4, d2)
assert.NotContains(t, roles4, d3)
// Get two invalid roles.
n5 := []string{model.NewId(), model.NewId()}
res5 := <-ss.Role().GetByNames(n5)
assert.Nil(t, res5.Err)
roles5 := res5.Data.([]*model.Role)
assert.Len(t, roles5, 0)
// Get one valid one and one invalid one.
n6 := []string{r1.Name, model.NewId()}
res6 := <-ss.Role().GetByNames(n6)
assert.Nil(t, res6.Err)
roles6 := res6.Data.([]*model.Role)
assert.Len(t, roles6, 1)
assert.Contains(t, roles6, d1)
assert.NotContains(t, roles6, d2)
assert.NotContains(t, roles6, d3)
}
func testRoleStoreDelete(t *testing.T, ss store.Store) {
// Save a role to test with.
r1 := &model.Role{
Name: model.NewId(),
DisplayName: model.NewId(),
Description: model.NewId(),
Permissions: []string{
"invite_user",
"create_public_channel",
"add_user_to_team",
},
SchemeManaged: false,
}
res1 := <-ss.Role().Save(r1)
assert.Nil(t, res1.Err)
d1 := res1.Data.(*model.Role)
assert.Len(t, d1.Id, 26)
// Check the role is there.
res2 := <-ss.Role().Get(d1.Id)
assert.Nil(t, res2.Err)
// Delete the role.
res3 := <-ss.Role().Delete(d1.Id)
assert.Nil(t, res3.Err)
// Check the role is deleted there.
res4 := <-ss.Role().Get(d1.Id)
assert.Nil(t, res4.Err)
d2 := res4.Data.(*model.Role)
assert.NotZero(t, d2.DeleteAt)
res5 := <-ss.Role().GetByName(d1.Name)
assert.Nil(t, res5.Err)
d3 := res5.Data.(*model.Role)
assert.NotZero(t, d3.DeleteAt)
// Try and delete a role that does not exist.
res6 := <-ss.Role().Delete(model.NewId())
assert.NotNil(t, res6.Err)
}
func testRoleStorePermanentDeleteAll(t *testing.T, ss store.Store) {
r1 := &model.Role{
Name: model.NewId(),
DisplayName: model.NewId(),
Description: model.NewId(),
Permissions: []string{
"invite_user",
"create_public_channel",
"add_user_to_team",
},
SchemeManaged: false,
}
r2 := &model.Role{
Name: model.NewId(),
DisplayName: model.NewId(),
Description: model.NewId(),
Permissions: []string{
"read_channel",
"create_public_channel",
"add_user_to_team",
},
SchemeManaged: false,
}
store.Must(ss.Role().Save(r1))
store.Must(ss.Role().Save(r2))
res1 := <-ss.Role().GetByNames([]string{r1.Name, r2.Name})
assert.Nil(t, res1.Err)
assert.Len(t, res1.Data.([]*model.Role), 2)
res2 := <-ss.Role().PermanentDeleteAll()
assert.Nil(t, res2.Err)
res3 := <-ss.Role().GetByNames([]string{r1.Name, r2.Name})
assert.Nil(t, res3.Err)
assert.Len(t, res3.Data.([]*model.Role), 0)
} | store/storetest/role_store.go | 0.594316 | 0.643609 | role_store.go | starcoder |
package msgoraph
// Bool is a helper routine that allocates a new bool value
// to store v and returns a pointer to it.
func Bool(v bool) *bool { return &v }
// BoolValue returns the value of the bool pointer passed in or
// false if the pointer is nil.
func BoolValue(v *bool) bool {
if v != nil {
return *v
}
return false
}
// BoolSlice returns a slice of bool pointers given a slice of bools.
func BoolSlice(v []bool) []*bool {
out := make([]*bool, len(v))
for i := range v {
out[i] = &v[i]
}
return out
}
// Float64 returns a pointer to the float64 value passed in.
func Float64(v float64) *float64 {
return &v
}
// Float64Value returns the value of the float64 pointer passed in or
// 0 if the pointer is nil.
func Float64Value(v *float64) float64 {
if v != nil {
return *v
}
return 0
}
// Float64Slice returns a slice of float64 pointers given a slice of float64s.
func Float64Slice(v []float64) []*float64 {
out := make([]*float64, len(v))
for i := range v {
out[i] = &v[i]
}
return out
}
// Int is a helper routine that allocates a new int value
// to store v and returns a pointer to it.
func Int(v int) *int { return &v }
// IntValue returns the value of the int pointer passed in or
// 0 if the pointer is nil.
func IntValue(v *int) int {
if v != nil {
return *v
}
return 0
}
// IntSlice returns a slice of int pointers given a slice of int's.
func IntSlice(v []int) []*int {
out := make([]*int, len(v))
for i := range v {
out[i] = &v[i]
}
return out
}
// Int64 is a helper routine that allocates a new int64 value
// to store v and returns a pointer to it.
func Int64(v int64) *int64 { return &v }
// Int64Value returns the value of the int64 pointer passed in or
// 0 if the pointer is nil.
func Int64Value(v *int64) int64 {
if v != nil {
return *v
}
return 0
}
// Int64Slice returns a slice of int64 pointers given a slice of int64s.
func Int64Slice(v []int64) []*int64 {
out := make([]*int64, len(v))
for i := range v {
out[i] = &v[i]
}
return out
}
// String is a helper routine that allocates a new string value
// to store v and returns a pointer to it.
func String(v string) *string { return &v }
// StringValue returns the value of the string pointer passed in or
// "" if the pointer is nil.
func StringValue(v *string) string {
if v != nil {
return *v
}
return ""
}
// StringSlice returns a slice of string pointers given a slice of strings.
func StringSlice(v []string) []*string {
out := make([]*string, len(v))
for i := range v {
out[i] = &v[i]
}
return out
} | msgraph.go | 0.828176 | 0.431285 | msgraph.go | starcoder |
package sql
import (
"reflect"
"strings"
"gopkg.in/src-d/go-errors.v1"
)
var (
// ErrUnexpectedType is thrown when a received type is not the expected
ErrUnexpectedType = errors.NewKind("value at %d has unexpected type: %s")
)
// Schema is the definition of a table.
type Schema []*Column
// CheckRow checks the row conforms to the schema.
func (s Schema) CheckRow(row Row) error {
expected := len(s)
got := len(row)
if expected != got {
return ErrUnexpectedRowLength.New(expected, got)
}
for idx, f := range s {
v := row[idx]
if f.Check(v) {
continue
}
typ := reflect.TypeOf(v).String()
return ErrUnexpectedType.New(idx, typ)
}
return nil
}
// Copy returns a deep copy of this schema, making a copy of all columns
func (s Schema) Copy() Schema {
ns := make(Schema, len(s))
for i, col := range s {
nc := *col
if nc.Default != nil {
nc.Default = &(*nc.Default)
}
ns[i] = &nc
}
return ns
}
// Contains returns whether the schema contains a column with the given name.
func (s Schema) Contains(column string, source string) bool {
return s.IndexOf(column, source) >= 0
}
// IndexOf returns the index of the given column in the schema or -1 if it's not present.
func (s Schema) IndexOf(column, source string) int {
column = strings.ToLower(column)
source = strings.ToLower(source)
for i, col := range s {
if strings.ToLower(col.Name) == column && strings.ToLower(col.Source) == source {
return i
}
}
return -1
}
// IndexOfColName returns the index of the given column in the schema or -1 if it's not present. Only safe for schemas
// corresponding to a single table, where the source of the column is irrelevant.
func (s Schema) IndexOfColName(column string) int {
column = strings.ToLower(column)
for i, col := range s {
if strings.ToLower(col.Name) == column {
return i
}
}
return -1
}
// Equals checks whether the given schema is equal to this one.
func (s Schema) Equals(s2 Schema) bool {
if len(s) != len(s2) {
return false
}
for i := range s {
if !s[i].Equals(s2[i]) {
return false
}
}
return true
}
// HasAutoIncrement returns true if the schema has an auto increment column.
func (s Schema) HasAutoIncrement() bool {
for _, c := range s {
if c.AutoIncrement {
return true
}
}
return false
}
func IsKeyless(s Schema) bool {
for _, c := range s {
if c.PrimaryKey {
return false
}
}
return true
}
// PrimaryKeySchema defines table metadata for columns and primary key ordering
type PrimaryKeySchema struct {
Schema
PkOrdinals []int
}
// NewPrimaryKeySchema constructs a new PrimaryKeySchema. PK ordinals
// default to the in-order set read from the Schema.
func NewPrimaryKeySchema(s Schema, pkOrds ...int) PrimaryKeySchema {
if len(pkOrds) == 0 {
pkOrds = make([]int, 0)
for i, c := range s {
if c.PrimaryKey {
pkOrds = append(pkOrds, i)
}
}
}
return PrimaryKeySchema{Schema: s, PkOrdinals: pkOrds}
}
// SchemaToPrimaryKeySchema adapts the schema given to a PrimaryKey schema using the primary keys of the table given, if
// present. The resulting PrimaryKeySchema may have an empty key set if the table has no primary keys. Matching for
// ordinals is performed by column name.
func SchemaToPrimaryKeySchema(table Table, sch Schema) PrimaryKeySchema {
var pks []*Column
if pkt, ok := table.(PrimaryKeyTable); ok {
schema := pkt.PrimaryKeySchema()
for _, ordinal := range schema.PkOrdinals {
pks = append(pks, schema.Schema[ordinal])
}
}
ords := make([]int, len(pks))
for i, pk := range pks {
ords[i] = sch.IndexOf(pk.Name, pk.Source)
}
return NewPrimaryKeySchema(sch, ords...)
} | sql/schema.go | 0.711932 | 0.46557 | schema.go | starcoder |
package gokmeans
import (
"math/rand"
"time"
)
// Node represents an observation of floating point values
type Node []float64
// Train takes an array of Nodes (observations), and produces as many centroids as specified by
// clusterCount. It will stop adjusting centroids after maxRounds is reached. If there are less
// observations than the number of centroids requested, then Train will return (false, nil).
func Train(Nodes []Node, clusterCount int, maxRounds int) (bool, []Node) {
if int(len(Nodes)) < clusterCount {
return false, nil
}
// Check to make sure everything is consistent, dimension-wise
stdLen := 0
for i, Node := range Nodes {
curLen := len(Node)
if i > 0 && len(Node) != stdLen {
return false, nil
}
stdLen = curLen
}
centroids := make([]Node, clusterCount)
r := rand.New(rand.NewSource(time.Now().UnixNano()))
// Pick centroid starting points from Nodes
for i := 0; i < clusterCount; i++ {
srcIndex := r.Intn(len(Nodes))
srcLen := len(Nodes[srcIndex])
centroids[i] = make(Node, srcLen)
copy(centroids[i], Nodes[r.Intn(len(Nodes))])
}
return Train2(Nodes, clusterCount, maxRounds, centroids)
}
// Provide initial centroids
func Train2(Nodes []Node, clusterCount int, maxRounds int, centroids []Node) (bool, []Node) {
// Train centroids
movement := true
for i := 0; i < maxRounds && movement; i++ {
movement = false
groups := make(map[int][]Node)
for _, Node := range Nodes {
near := Nearest(Node, centroids)
groups[near] = append(groups[near], Node)
}
for key, group := range groups {
newNode := meanNode(group)
if !equal(centroids[key], newNode) {
centroids[key] = newNode
movement = true
}
}
}
return true, centroids
}
// equal determines if two nodes have the same values.
func equal(node1, node2 Node) bool {
if len(node1) != len(node2) {
return false
}
for i, v := range node1 {
if v != node2[i] {
return false
}
}
return true
}
// Nearest return the index of the closest centroid from nodes
func Nearest(in Node, nodes []Node) int {
count := len(nodes)
results := make(Node, count)
cnt := make(chan int)
for i, node := range nodes {
go func(i int, node, cl Node) {
results[i] = distance(in, node)
cnt <- 1
}(i, node, in)
}
wait(cnt, results)
mindex := 0
curdist := results[0]
for i, dist := range results {
if dist < curdist {
curdist = dist
mindex = i
}
}
return mindex
}
// Distance determines the square Euclidean distance between two nodes
func distance(node1 Node, node2 Node) float64 {
length := len(node1)
squares := make(Node, length, length)
cnt := make(chan int)
for i, _ := range node1 {
go func(i int) {
diff := node1[i] - node2[i]
squares[i] = diff * diff
cnt <- 1
}(i)
}
wait(cnt, squares)
sum := 0.0
for _, val := range squares {
sum += val
}
return sum
}
// meanNode takes an array of Nodes and returns a node which represents the average
// value for the provided nodes. This is used to center the centroids within their cluster.
func meanNode(values []Node) Node {
newNode := make(Node, len(values[0]))
for _, value := range values {
for j := 0; j < len(newNode); j++ {
newNode[j] += value[j]
}
}
for i, value := range newNode {
newNode[i] = value / float64(len(values))
}
return newNode
}
// wait stops a function from continuing until the provided channel has processed as
// many items as there are dimensions in the provided Node.
func wait(c chan int, values Node) {
count := len(values)
<-c
for respCnt := 1; respCnt < count; respCnt++ {
<-c
}
} | gokmeans.go | 0.801081 | 0.600013 | gokmeans.go | starcoder |
package routertestsuite
import (
"bytes"
"encoding/json"
"errors"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
"github.com/ambientkit/ambient"
"github.com/stretchr/testify/assert"
)
// TestSuite performs standard tests.
type TestSuite struct{}
// New returns a router test suite.
func New() *TestSuite {
return new(TestSuite)
}
// Run all the tests.
func (ts *TestSuite) Run(t *testing.T, mux func() ambient.AppRouter) {
ts.TestParams(t, mux())
ts.TestInstance(t, mux())
ts.TestPostForm(t, mux())
ts.TestPostJSON(t, mux())
ts.TestGet(t, mux())
ts.TestDelete(t, mux())
ts.TestHead(t, mux())
ts.TestOptions(t, mux())
ts.TestPatch(t, mux())
ts.TestPut(t, mux())
ts.Test404(t, mux())
ts.Test500NoError(t, mux())
ts.Test500WithError(t, mux())
ts.Test400(t, mux())
ts.TestNotFound(t, mux())
ts.TestBadRequest(t, mux())
}
// defaultServeHTTP is the default ServeHTTP function that receives the status and error from
// the function call.
var defaultServeHTTP = func(w http.ResponseWriter, r *http.Request, err error) {
if err != nil {
switch e := err.(type) {
case ambient.Error:
http.Error(w, e.Error(), e.Status())
default:
http.Error(w, http.StatusText(http.StatusInternalServerError),
http.StatusInternalServerError)
}
}
}
// TestParams .
func (ts *TestSuite) TestParams(t *testing.T, mux ambient.AppRouter) {
mux.SetServeHTTP(defaultServeHTTP)
outParam := ""
mux.Get("/user/{name}", func(w http.ResponseWriter, r *http.Request) (err error) {
outParam = mux.Param(r, "name")
return nil
})
r := httptest.NewRequest("GET", "/user/john", nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, r)
resp := w.Result()
assert.Equal(t, http.StatusOK, resp.StatusCode)
assert.Equal(t, "john", outParam)
}
// TestInstance .
func (ts *TestSuite) TestInstance(t *testing.T, mux ambient.AppRouter) {
mux.SetServeHTTP(defaultServeHTTP)
outParam := ""
mux.Get("/user/{name}", func(w http.ResponseWriter, r *http.Request) (err error) {
outParam = mux.Param(r, "name")
return nil
})
r := httptest.NewRequest("GET", "/user/john", nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, r)
resp := w.Result()
assert.Equal(t, http.StatusOK, resp.StatusCode)
assert.Equal(t, "john", outParam)
}
// TestPostForm .
func (ts *TestSuite) TestPostForm(t *testing.T, mux ambient.AppRouter) {
mux.SetServeHTTP(defaultServeHTTP)
form := url.Values{}
form.Add("username", "jsmith")
outParam := ""
mux.Post("/user", func(w http.ResponseWriter, r *http.Request) (err error) {
r.ParseForm()
outParam = r.FormValue("username")
return nil
})
r := httptest.NewRequest("POST", "/user", strings.NewReader(form.Encode()))
r.Header.Add("Content-Type", "application/x-www-form-urlencoded")
w := httptest.NewRecorder()
mux.ServeHTTP(w, r)
resp := w.Result()
assert.Equal(t, http.StatusOK, resp.StatusCode)
assert.Equal(t, "jsmith", outParam)
}
// TestPostJSON .
func (ts *TestSuite) TestPostJSON(t *testing.T, mux ambient.AppRouter) {
mux.SetServeHTTP(defaultServeHTTP)
j, err := json.Marshal(map[string]interface{}{
"username": "jsmith",
})
assert.Nil(t, err)
outParam := ""
mux.Post("/user", func(w http.ResponseWriter, r *http.Request) (err error) {
b, err := ioutil.ReadAll(r.Body)
assert.Nil(t, err)
r.Body.Close()
outParam = string(b)
assert.Equal(t, `{"username":"jsmith"}`, string(b))
return nil
})
r := httptest.NewRequest("POST", "/user", bytes.NewBuffer(j))
r.Header.Set("Content-Type", "application/json")
w := httptest.NewRecorder()
mux.ServeHTTP(w, r)
resp := w.Result()
assert.Equal(t, http.StatusOK, resp.StatusCode)
assert.Equal(t, `{"username":"jsmith"}`, outParam)
}
// TestGet .
func (ts *TestSuite) TestGet(t *testing.T, mux ambient.AppRouter) {
mux.SetServeHTTP(defaultServeHTTP)
called := false
mux.Get("/user", func(w http.ResponseWriter, r *http.Request) (err error) {
called = true
return nil
})
r := httptest.NewRequest("GET", "/user", nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, r)
assert.Equal(t, true, called)
}
// TestDelete .
func (ts *TestSuite) TestDelete(t *testing.T, mux ambient.AppRouter) {
mux.SetServeHTTP(defaultServeHTTP)
called := false
mux.Delete("/user", func(w http.ResponseWriter, r *http.Request) (err error) {
called = true
return nil
})
r := httptest.NewRequest("DELETE", "/user", nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, r)
assert.Equal(t, true, called)
}
// TestHead .
func (ts *TestSuite) TestHead(t *testing.T, mux ambient.AppRouter) {
mux.SetServeHTTP(defaultServeHTTP)
called := false
mux.Head("/user", func(w http.ResponseWriter, r *http.Request) (err error) {
called = true
return nil
})
r := httptest.NewRequest("HEAD", "/user", nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, r)
assert.Equal(t, true, called)
}
// TestOptions .
func (ts *TestSuite) TestOptions(t *testing.T, mux ambient.AppRouter) {
mux.SetServeHTTP(defaultServeHTTP)
called := false
mux.Options("/user", func(w http.ResponseWriter, r *http.Request) (err error) {
called = true
return nil
})
r := httptest.NewRequest("OPTIONS", "/user", nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, r)
assert.Equal(t, true, called)
}
// TestPatch .
func (ts *TestSuite) TestPatch(t *testing.T, mux ambient.AppRouter) {
mux.SetServeHTTP(defaultServeHTTP)
called := false
mux.Patch("/user", func(w http.ResponseWriter, r *http.Request) (err error) {
called = true
return nil
})
r := httptest.NewRequest("PATCH", "/user", nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, r)
assert.Equal(t, true, called)
}
// TestPut .
func (ts *TestSuite) TestPut(t *testing.T, mux ambient.AppRouter) {
mux.SetServeHTTP(defaultServeHTTP)
called := false
mux.Put("/user", func(w http.ResponseWriter, r *http.Request) (err error) {
called = true
return nil
})
r := httptest.NewRequest("PUT", "/user", nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, r)
assert.Equal(t, true, called)
}
// Test404 .
func (ts *TestSuite) Test404(t *testing.T, mux ambient.AppRouter) {
mux.SetServeHTTP(defaultServeHTTP)
called := false
mux.Get("/user", func(w http.ResponseWriter, r *http.Request) (err error) {
called = true
return nil
})
r := httptest.NewRequest("GET", "/badroute", nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, r)
assert.Equal(t, false, called)
assert.Equal(t, http.StatusNotFound, w.Code)
}
// Test500NoError .
func (ts *TestSuite) Test500NoError(t *testing.T, mux ambient.AppRouter) {
mux.SetServeHTTP(defaultServeHTTP)
called := true
mux.Get("/user", func(w http.ResponseWriter, r *http.Request) (err error) {
called = true
return ambient.StatusError{Code: http.StatusInternalServerError, Err: nil}
})
r := httptest.NewRequest("GET", "/user", nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, r)
assert.Equal(t, true, called)
assert.Equal(t, http.StatusInternalServerError, w.Code)
}
// Test500WithError .
func (ts *TestSuite) Test500WithError(t *testing.T, mux ambient.AppRouter) {
mux.SetServeHTTP(defaultServeHTTP)
called := true
specificError := errors.New("specific error")
mux.Get("/user", func(w http.ResponseWriter, r *http.Request) (err error) {
called = true
return ambient.StatusError{Code: http.StatusInternalServerError, Err: specificError}
})
r := httptest.NewRequest("GET", "/user", nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, r)
assert.Equal(t, true, called)
assert.Equal(t, http.StatusInternalServerError, w.Code)
assert.Equal(t, specificError.Error()+"\n", w.Body.String())
}
// Test400 .
func (ts *TestSuite) Test400(t *testing.T, mux ambient.AppRouter) {
notFound := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNotFound)
},
)
mux.SetServeHTTP(defaultServeHTTP)
mux.SetNotFound(notFound)
r := httptest.NewRequest("GET", "/unknown", nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, r)
assert.Equal(t, http.StatusNotFound, w.Code)
}
// TestNotFound .
func (ts *TestSuite) TestNotFound(t *testing.T, mux ambient.AppRouter) {
mux.SetServeHTTP(defaultServeHTTP)
r := httptest.NewRequest("GET", "/unknown", nil)
w := httptest.NewRecorder()
mux.Error(http.StatusNotFound, w, r)
assert.Equal(t, http.StatusNotFound, w.Code)
}
// TestBadRequest .
func (ts *TestSuite) TestBadRequest(t *testing.T, mux ambient.AppRouter) {
mux.SetServeHTTP(defaultServeHTTP)
r := httptest.NewRequest("GET", "/unknown", nil)
w := httptest.NewRecorder()
mux.Error(http.StatusBadRequest, w, r)
assert.Equal(t, http.StatusBadRequest, w.Code)
} | pkg/routertestsuite/routertestsuite.go | 0.586878 | 0.431225 | routertestsuite.go | starcoder |
package renderer
import (
"reflect"
"github.com/go-gl/gl/v4.6-core/gl"
)
type shaderDataType int
const (
ShaderDataTypeFloat32 shaderDataType = iota
ShaderDataTypeInt32
ShaderDataTypeUint32
ShaderDataTypeBool
)
func (dataType shaderDataType) size() uint32 {
switch dataType {
case ShaderDataTypeFloat32:
return 4
case ShaderDataTypeInt32:
return 4
case ShaderDataTypeUint32:
return 4
case ShaderDataTypeBool:
return 1
}
return 0
}
func (dataType shaderDataType) openGLType() uint32 {
switch dataType {
case ShaderDataTypeFloat32:
return gl.FLOAT
case ShaderDataTypeInt32:
return gl.INT
case ShaderDataTypeUint32:
return gl.UNSIGNED_INT
case ShaderDataTypeBool:
return gl.BOOL
}
panic("unknown shader data type")
}
type VertexBuffer struct {
vertexBuffer, vertexArray uint32
}
type LayoutElement struct {
ShaderDataType shaderDataType
Count uint32
}
func NewVertexBuffer(data interface{}, layout []LayoutElement) *VertexBuffer {
vertexBuffer := &VertexBuffer{}
gl.GenBuffers(1, &vertexBuffer.vertexBuffer)
gl.BindBuffer(gl.ARRAY_BUFFER, vertexBuffer.vertexBuffer)
gl.BufferData(gl.ARRAY_BUFFER, reflect.ValueOf(data).Len()*int(reflect.TypeOf(data).Elem().Size()), gl.Ptr(data), gl.STATIC_DRAW)
gl.GenVertexArrays(1, &vertexBuffer.vertexArray)
gl.BindVertexArray(vertexBuffer.vertexArray)
stride := int32(0)
for _, element := range layout {
stride += int32(element.ShaderDataType.size() * element.Count)
}
offset := uint32(0)
for i, element := range layout {
gl.EnableVertexAttribArray(uint32(i))
gl.VertexAttribPointerWithOffset(uint32(i), int32(element.Count), element.ShaderDataType.openGLType(), false, stride, uintptr(offset))
offset += element.Count * element.ShaderDataType.size()
}
return vertexBuffer
}
func (vertexBuffer *VertexBuffer) Bind() {
// gl.BindBuffer(gl.ARRAY_BUFFER, vertexBuffer.vertexBuffer)
gl.BindVertexArray(vertexBuffer.vertexArray)
}
func (vertexBuffer *VertexBuffer) Delete() {
gl.DeleteBuffers(1, &vertexBuffer.vertexBuffer)
gl.DeleteVertexArrays(1, &vertexBuffer.vertexArray)
} | renderer/vertexBuffer.go | 0.635109 | 0.462412 | vertexBuffer.go | starcoder |
package example
import (
"image"
"time"
"github.com/DrJosh9000/ichigo/engine"
"github.com/DrJosh9000/ichigo/geom"
)
// Level1 creates the level_1 scene.
func Level1() *engine.Scene {
return &engine.Scene{
ID: "level_1",
Bounds: engine.Bounds(image.Rect(-32, -32, 320+32, 240+32)),
Child: engine.MakeContainer(
engine.DummyLoad{
Duration: 2 * time.Second,
},
&engine.Parallax{
CameraID: "game_camera",
Child: &engine.Billboard{
ID: "bg_image",
Pos: geom.Pt3(-160, -20, -100),
Src: engine.ImageRef{Path: "assets/space.png"},
},
Factor: 0.5,
}, // Parallax
&engine.DrawDAG{
ChunkSize: 16,
Child: engine.MakeContainer(
level1PrismMap(),
level1Awakeman(),
), // Container
}, // DrawDAG
), // Container
} // Scene
}
func level1PrismMap() *engine.PrismMap {
return &engine.PrismMap{
ID: "hexagons",
PosToWorld: geom.IntMatrix3x4{
// For each tile in the X direction, go right by 24 and
// forward by 8, etc
0: [4]int{24, 0, 0, 0},
1: [4]int{0, 16, 0, 0},
2: [4]int{8, 0, 16, 0},
},
PrismSize: geom.Int3{X: 32, Y: 16, Z: 16},
PrismTop: []image.Point{
{X: 8, Y: 0},
{X: 0, Y: 8},
{X: 8, Y: 16},
{X: 23, Y: 16},
{X: 31, Y: 8},
{X: 23, Y: 0},
},
Sheet: engine.Sheet{
CellSize: image.Pt(32, 32),
Src: engine.ImageRef{Path: "assets/hexprism32.png"},
},
Map: map[geom.Int3]*engine.Prism{
geom.Pt3(11, 0, -6): {},
geom.Pt3(12, 0, -6): {},
geom.Pt3(9, 0, -5): {},
geom.Pt3(10, 0, -5): {},
geom.Pt3(11, 0, -5): {},
geom.Pt3(12, 0, -5): {},
geom.Pt3(7, 0, -4): {},
geom.Pt3(8, 0, -4): {},
geom.Pt3(9, 0, -4): {},
geom.Pt3(10, 0, -4): {},
geom.Pt3(11, 0, -4): {},
geom.Pt3(12, 0, -4): {},
geom.Pt3(5, 0, -3): {},
geom.Pt3(6, 0, -3): {},
geom.Pt3(7, 0, -3): {},
geom.Pt3(8, 0, -3): {},
geom.Pt3(9, 0, -3): {},
geom.Pt3(10, 0, -3): {},
geom.Pt3(11, 0, -3): {},
geom.Pt3(12, 0, -3): {},
geom.Pt3(3, 0, -2): {},
geom.Pt3(4, 0, -2): {},
geom.Pt3(5, 0, -2): {},
geom.Pt3(6, 0, -2): {},
geom.Pt3(7, 0, -2): {},
geom.Pt3(8, 0, -2): {},
geom.Pt3(9, 0, -2): {},
geom.Pt3(10, 0, -2): {},
geom.Pt3(11, 0, -2): {},
geom.Pt3(12, 0, -2): {},
geom.Pt3(1, 0, -1): {},
geom.Pt3(2, 0, -1): {},
geom.Pt3(3, 0, -1): {},
geom.Pt3(4, 0, -1): {},
geom.Pt3(5, 0, -1): {},
geom.Pt3(6, 0, -1): {},
geom.Pt3(7, 0, -1): {},
geom.Pt3(8, 0, -1): {},
geom.Pt3(9, 0, -1): {},
geom.Pt3(10, 0, -1): {},
geom.Pt3(11, 0, -1): {},
geom.Pt3(12, 0, -1): {},
geom.Pt3(0, 0, 0): {},
geom.Pt3(1, 0, 0): {},
geom.Pt3(2, 0, 0): {},
geom.Pt3(3, 0, 0): {},
geom.Pt3(4, 0, 0): {},
geom.Pt3(5, 0, 0): {},
geom.Pt3(6, 0, 0): {},
geom.Pt3(7, 0, 0): {},
geom.Pt3(8, 0, 0): {},
geom.Pt3(9, 0, 0): {},
geom.Pt3(10, 0, 0): {},
geom.Pt3(11, 0, 0): {},
geom.Pt3(12, 0, 0): {},
geom.Pt3(0, 0, 1): {},
geom.Pt3(1, 0, 1): {},
geom.Pt3(2, 0, 1): {},
geom.Pt3(3, 0, 1): {},
geom.Pt3(4, 0, 1): {},
geom.Pt3(5, 0, 1): {},
geom.Pt3(6, 0, 1): {},
geom.Pt3(7, 0, 1): {},
geom.Pt3(8, 0, 1): {},
geom.Pt3(9, 0, 1): {},
geom.Pt3(10, 0, 1): {},
geom.Pt3(11, 0, 1): {},
geom.Pt3(12, 0, 1): {},
geom.Pt3(0, 0, 2): {},
geom.Pt3(1, 0, 2): {},
geom.Pt3(2, 0, 2): {},
geom.Pt3(3, 0, 2): {},
geom.Pt3(4, 0, 2): {},
geom.Pt3(5, 0, 2): {},
geom.Pt3(6, 0, 2): {},
geom.Pt3(7, 0, 2): {},
geom.Pt3(8, 0, 2): {},
geom.Pt3(9, 0, 2): {},
geom.Pt3(10, 0, 2): {},
geom.Pt3(11, 0, 2): {},
geom.Pt3(12, 0, 2): {},
geom.Pt3(0, 0, 3): {},
geom.Pt3(1, 0, 3): {},
geom.Pt3(2, 0, 3): {},
geom.Pt3(3, 0, 3): {},
geom.Pt3(4, 0, 3): {},
geom.Pt3(5, 0, 3): {},
geom.Pt3(6, 0, 3): {},
geom.Pt3(7, 0, 3): {},
geom.Pt3(8, 0, 3): {},
geom.Pt3(9, 0, 3): {},
geom.Pt3(10, 0, 3): {},
geom.Pt3(11, 0, 3): {},
geom.Pt3(12, 0, 3): {},
geom.Pt3(0, 0, 4): {},
geom.Pt3(1, 0, 4): {},
geom.Pt3(2, 0, 4): {},
geom.Pt3(3, 0, 4): {},
geom.Pt3(4, 0, 4): {},
geom.Pt3(5, 0, 4): {},
geom.Pt3(6, 0, 4): {},
geom.Pt3(7, 0, 4): {},
geom.Pt3(8, 0, 4): {},
geom.Pt3(9, 0, 4): {},
geom.Pt3(10, 0, 4): {},
geom.Pt3(11, 0, 4): {},
geom.Pt3(12, 0, 4): {},
geom.Pt3(0, 0, 5): {},
geom.Pt3(1, 0, 5): {},
geom.Pt3(2, 0, 5): {},
geom.Pt3(3, 0, 5): {},
geom.Pt3(4, 0, 5): {},
geom.Pt3(5, 0, 5): {},
geom.Pt3(6, 0, 5): {},
geom.Pt3(6, -1, 5): {Cell: 1},
geom.Pt3(7, 0, 5): {},
geom.Pt3(8, 0, 5): {},
geom.Pt3(9, 0, 5): {},
geom.Pt3(10, 0, 5): {},
geom.Pt3(11, 0, 5): {},
geom.Pt3(12, 0, 5): {},
geom.Pt3(0, 0, 6): {},
geom.Pt3(1, 0, 6): {},
geom.Pt3(2, 0, 6): {},
geom.Pt3(3, 0, 6): {},
geom.Pt3(4, 0, 6): {},
geom.Pt3(5, 0, 6): {},
geom.Pt3(6, 0, 6): {},
geom.Pt3(7, 0, 6): {},
geom.Pt3(8, 0, 6): {},
geom.Pt3(9, 0, 6): {},
geom.Pt3(10, 0, 6): {},
geom.Pt3(11, 0, 6): {},
geom.Pt3(12, 0, 6): {},
geom.Pt3(0, 0, 7): {},
geom.Pt3(1, 0, 7): {},
geom.Pt3(2, 0, 7): {},
geom.Pt3(3, 0, 7): {},
geom.Pt3(4, 0, 7): {},
geom.Pt3(5, 0, 7): {},
geom.Pt3(6, 0, 7): {},
geom.Pt3(7, 0, 7): {},
geom.Pt3(8, 0, 7): {},
geom.Pt3(9, 0, 7): {},
geom.Pt3(10, 0, 7): {},
geom.Pt3(11, 0, 7): {},
geom.Pt3(12, 0, 7): {},
geom.Pt3(0, 0, 8): {},
geom.Pt3(1, 0, 8): {},
geom.Pt3(2, 0, 8): {},
geom.Pt3(3, 0, 8): {},
geom.Pt3(4, 0, 8): {},
geom.Pt3(5, 0, 8): {},
geom.Pt3(6, 0, 8): {},
geom.Pt3(7, 0, 8): {},
geom.Pt3(8, 0, 8): {},
geom.Pt3(9, 0, 8): {},
geom.Pt3(10, 0, 8): {},
geom.Pt3(0, 0, 9): {},
geom.Pt3(1, 0, 9): {},
geom.Pt3(2, 0, 9): {},
geom.Pt3(3, 0, 9): {},
geom.Pt3(4, 0, 9): {},
geom.Pt3(5, 0, 9): {},
geom.Pt3(6, 0, 9): {},
geom.Pt3(7, 0, 9): {},
geom.Pt3(8, 0, 9): {},
geom.Pt3(0, 0, 10): {},
geom.Pt3(1, 0, 10): {},
geom.Pt3(2, 0, 10): {},
geom.Pt3(3, 0, 10): {},
geom.Pt3(4, 0, 10): {},
geom.Pt3(5, 0, 10): {},
geom.Pt3(6, 0, 10): {},
geom.Pt3(0, 0, 11): {},
geom.Pt3(1, 0, 11): {},
geom.Pt3(2, 0, 11): {},
geom.Pt3(3, 0, 11): {},
geom.Pt3(4, 0, 11): {},
geom.Pt3(0, 0, 12): {},
geom.Pt3(1, 0, 12): {},
geom.Pt3(2, 0, 12): {},
geom.Pt3(0, 0, 13): {},
}, // Map
} // PrismMap
}
func level1Awakeman() *Awakeman {
return &Awakeman{
CameraID: "game_camera",
ToastID: "toast",
Sprite: engine.Sprite{
Actor: engine.Actor{
CollisionDomain: "level_1",
Pos: geom.Pt3(100, -64, 100),
Bounds: geom.Box{
Min: geom.Pt3(-4, -15, -1),
Max: geom.Pt3(4, 1, 1),
},
},
DrawOffset: image.Pt(-5, -15),
Sheet: engine.Sheet{
AnimDefs: map[string]*engine.AnimDef{
"idle_left": {Steps: []engine.AnimStep{
{Cell: 1, Duration: 60},
}},
"idle_right": {Steps: []engine.AnimStep{
{Cell: 0, Duration: 60},
}},
"run_left": {Steps: []engine.AnimStep{
{Cell: 14, Duration: 3},
{Cell: 15, Duration: 5},
{Cell: 16, Duration: 3},
{Cell: 17, Duration: 3},
}},
"run_right": {Steps: []engine.AnimStep{
{Cell: 10, Duration: 3},
{Cell: 11, Duration: 5},
{Cell: 12, Duration: 3},
{Cell: 13, Duration: 3},
}},
"run_vert": {Steps: []engine.AnimStep{
{Cell: 18, Duration: 3},
{Cell: 19, Duration: 5},
{Cell: 20, Duration: 3},
{Cell: 21, Duration: 3},
{Cell: 22, Duration: 3},
{Cell: 23, Duration: 5},
{Cell: 24, Duration: 3},
{Cell: 25, Duration: 3},
}},
"walk_left": {Steps: []engine.AnimStep{
{Cell: 2, Duration: 6},
{Cell: 3, Duration: 6},
{Cell: 4, Duration: 6},
{Cell: 5, Duration: 6},
}},
"walk_right": {Steps: []engine.AnimStep{
{Cell: 6, Duration: 6},
{Cell: 7, Duration: 6},
{Cell: 8, Duration: 6},
{Cell: 9, Duration: 6},
}},
},
CellSize: image.Pt(10, 16),
Src: engine.ImageRef{Path: "assets/aw.png"},
}, // Sheet
}, // Sprite
} // Awakeman
} | example/level1.go | 0.514156 | 0.521167 | level1.go | starcoder |
package assertion
import (
"fmt"
"reflect"
"github.com/cloudfoundry/bosh-init/internal/github.com/onsi/gomega/types"
)
type Assertion struct {
actualInput interface{}
fail types.GomegaFailHandler
offset int
extra []interface{}
}
func New(actualInput interface{}, fail types.GomegaFailHandler, offset int, extra ...interface{}) *Assertion {
return &Assertion{
actualInput: actualInput,
fail: fail,
offset: offset,
extra: extra,
}
}
func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
}
func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
}
func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
}
func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
}
func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
}
func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string {
switch len(optionalDescription) {
case 0:
return ""
default:
return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
}
}
func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
matches, err := matcher.Match(assertion.actualInput)
description := assertion.buildDescription(optionalDescription...)
if err != nil {
assertion.fail(description+err.Error(), 2+assertion.offset)
return false
}
if matches != desiredMatch {
var message string
if desiredMatch {
message = matcher.FailureMessage(assertion.actualInput)
} else {
message = matcher.NegatedFailureMessage(assertion.actualInput)
}
assertion.fail(description+message, 2+assertion.offset)
return false
}
return true
}
func (assertion *Assertion) vetExtras(optionalDescription ...interface{}) bool {
success, message := vetExtras(assertion.extra)
if success {
return true
}
description := assertion.buildDescription(optionalDescription...)
assertion.fail(description+message, 2+assertion.offset)
return false
}
func vetExtras(extras []interface{}) (bool, string) {
for i, extra := range extras {
if extra != nil {
zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface()
if !reflect.DeepEqual(zeroValue, extra) {
message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra)
return false, message
}
}
}
return true, ""
} | internal/github.com/onsi/gomega/internal/assertion/assertion.go | 0.663996 | 0.461381 | assertion.go | starcoder |
package vile
import (
"bytes"
)
/*
* The good example of "rest" is JavaScript
* JavaScript e.g
*
const sum = (...num) => {
console.log(num.reduce((previous, current) => {
return previous + current
}))
}
sum(1, 2, 3, 4, 5)
*
*/
// Cons - create a new list consisting of the first object and the rest of the list
func Cons(car *Object, cdr *Object) *Object {
result := new(Object)
result.Type = ListType
result.car = car
result.cdr = cdr
return result
}
// Car - return the first object in a list
func Car(lst *Object) *Object {
if lst == EmptyList {
return Null
}
return lst.car
}
// Cdr - return the rest of the list
func Cdr(lst *Object) *Object {
if lst == EmptyList {
return lst
}
return lst.cdr
}
// Caar - return the Car of the Car of the list
func Caar(lst *Object) *Object {
return Car(Car(lst))
}
// Cadr - return the Car of the Cdr of the list
func Cadr(lst *Object) *Object {
return Car(Cdr(lst))
}
// Cdar - return the Cdr of the Car of the list
func Cdar(lst *Object) *Object {
return Car(Cdr(lst))
}
// Cddr - return the Cdr of the Cdr of the list
func Cddr(lst *Object) *Object {
return Cdr(Cdr(lst))
}
// Cadar - return the Car of the Cdr of the Car of the list
func Cadar(lst *Object) *Object {
return Car(Cdr(Car(lst)))
}
// Caddr - return the Car of the Cdr of the Cdr of the list
func Caddr(lst *Object) *Object {
return Car(Cdr(Cdr(lst)))
}
// Cdddr - return the Cdr of the Cdr of the Cdr of the list
func Cdddr(lst *Object) *Object {
return Cdr(Cdr(Cdr(lst)))
}
// Cadddr - return the Car of the Cdr of the Cdr of the Cdr of the list
func Cadddr(lst *Object) *Object {
return Car(Cdr(Cdr(Cdr(lst))))
}
// Cddddr - return the Cdr of the Cdr of the Cdr of the Cdr of the list
func Cddddr(lst *Object) *Object {
return Cdr(Cdr(Cdr(Cdr(lst))))
}
var QuoteSymbol = Intern("quote")
var QuasiquoteSymbol = Intern("quasiquote")
var UnquoteSymbol = Intern("unquote")
var UnquoteSymbolSplicing = Intern("unquote-splicing")
// EmptyList - the value of (), terminates linked lists
var EmptyList = initEmpty()
func initEmpty() *Object {
return &Object{Type: ListType} //car and cdr are both nil
}
// ListEqual returns true if the object is equal to the argument
func ListEqual(lst *Object, a *Object) bool {
for lst != EmptyList {
if a == EmptyList {
return false
}
if !Equal(lst.car, a.car) {
return false
}
lst = lst.cdr
a = a.cdr
}
if lst == a {
return true
}
return false
}
func listToString(lst *Object) string {
var buf bytes.Buffer
if lst != EmptyList && lst.cdr != EmptyList && Cddr(lst) == EmptyList {
if lst.car == QuoteSymbol {
buf.WriteString("'")
buf.WriteString(Cadr(lst).String())
return buf.String()
} else if lst.car == QuasiquoteSymbol {
buf.WriteString("`")
buf.WriteString(Cadr(lst).String())
return buf.String()
} else if lst.car == UnquoteSymbol {
buf.WriteString("~")
buf.WriteString(Cadr(lst).String())
return buf.String()
} else if lst.car == UnquoteSymbolSplicing {
buf.WriteString("~")
buf.WriteString(Cadr(lst).String())
return buf.String()
}
}
buf.WriteString("(")
delim := ""
for lst != EmptyList {
buf.WriteString(delim)
delim = " "
buf.WriteString(lst.car.String())
lst = lst.cdr
}
buf.WriteString(")")
return buf.String()
}
func ListLength(lst *Object) int {
if lst == EmptyList {
return 0
}
count := 1
o := lst.cdr
for o != EmptyList {
count++
o = o.cdr
}
return count
}
func MakeList(count int, val *Object) *Object {
result := EmptyList
for i := 0; i < count; i++ {
result = Cons(val, result)
}
return result
}
func ListFromValues(values []*Object) *Object {
p := EmptyList
for i := len(values) - 1; i >= 0; i-- {
v := values[i]
p = Cons(v, p)
}
return p
}
func List(values ...*Object) *Object {
return ListFromValues(values)
}
func listToVector(lst *Object) *Object {
var elems []*Object
for lst != EmptyList {
elems = append(elems, lst.car)
lst = lst.cdr
}
return VectorFromElementsNoCopy(elems)
}
// ToList - convert the argument to a List, if possible
func ToList(obj *Object) (*Object, error) {
switch obj.Type {
case ListType:
return obj, nil
case VectorType:
return ListFromValues(obj.elements), nil
case StructType:
return structToList(obj)
case StringType:
return stringToList(obj), nil
}
return nil, Error(ArgumentErrorKey, "to-list cannot accept ", obj.Type)
}
func ReverseList(lst *Object) *Object {
rev := EmptyList
for lst != EmptyList {
rev = Cons(lst.car, rev)
lst = lst.cdr
}
return rev
}
func Flatten(lst *Object) *Object {
result := EmptyList
tail := EmptyList
for lst != EmptyList {
item := lst.car
switch item.Type {
case ListType:
item = Flatten(item)
case VectorType:
litem, _ := ToList(item)
item = Flatten(litem)
default:
item = List(item)
}
if tail == EmptyList {
result = item
tail = result
} else {
tail.cdr = item
}
for tail.cdr != EmptyList {
tail = tail.cdr
}
lst = lst.cdr
}
return result
}
func Concat(seq1 *Object, seq2 *Object) (*Object, error) {
rev := ReverseList(seq1)
if rev == EmptyList {
return seq2, nil
}
lst := seq2
for rev != EmptyList {
lst = Cons(rev.car, lst)
rev = rev.cdr
}
return lst, nil
} | src/list.go | 0.536313 | 0.471527 | list.go | starcoder |
package rng
import (
"fmt"
"math"
)
// GammaGenerator is a random number generator for gamma distribution.
// The zero value is invalid, use NewGammaGenerator to create a generator
type GammaGenerator struct {
uniform *UniformGenerator
}
// NewGammaGenerator returns a gamma distribution generator
// it is recommended using time.Now().UnixNano() as the seed, for example:
// grng := rng.NewGammaGenerator(time.Now().UnixNano())
func NewGammaGenerator(seed int64) *GammaGenerator {
urng := NewUniformGenerator(seed)
return &GammaGenerator{urng}
}
// Gamma returns a random number of gamma distribution (alpha > 0.0 and beta > 0.0)
func (grng GammaGenerator) Gamma(alpha, beta float64) float64 {
if !(alpha > 0.0) || !(beta > 0.0) {
panic(fmt.Sprintf("Invalid parameter alpha %.2f beta %.2f", alpha, beta))
}
return grng.gamma(alpha, beta)
}
// inspired by random.py
func (grng GammaGenerator) gamma(alpha, beta float64) float64 {
var MAGIC_CONST float64 = 4 * math.Exp(-0.5) / math.Sqrt(2.0)
if alpha > 1.0 {
// Use <NAME> "The generation of Gamma variables with
// non-integral shape parameters", Applied Statistics, (1977), 26, No. 1, p71-74
ainv := math.Sqrt(2.0*alpha - 1.0)
bbb := alpha - math.Log(4.0)
ccc := alpha + ainv
for {
u1 := grng.uniform.Float64()
if !(1e-7 < u1 && u1 < .9999999) {
continue
}
u2 := 1.0 - grng.uniform.Float64()
v := math.Log(u1/(1.0-u1)) / ainv
x := alpha * math.Exp(v)
z := u1 * u1 * u2
r := bbb + ccc*v - x
if r+MAGIC_CONST-4.5*z >= 0.0 || r >= math.Log(z) {
return x * beta
}
}
} else if alpha == 1.0 {
u := grng.uniform.Float64()
for u <= 1e-7 {
u = grng.uniform.Float64()
}
return -math.Log(u) * beta
} else { // alpha between 0.0 and 1.0 (exclusive)
// Uses Algorithm of Statistical Computing - kennedy & Gentle
var x float64
for {
u := grng.uniform.Float64()
b := (math.E + alpha) / math.E
p := b * u
if p <= 1.0 {
x = math.Pow(p, 1.0/alpha)
} else {
x = -math.Log((b - p) / alpha)
}
u1 := grng.uniform.Float64()
if p > 1.0 {
if u1 <= math.Pow(x, alpha-1.0) {
break
}
} else if u1 <= math.Exp(-x) {
break
}
}
return x * beta
}
} | vendor/github.com/leesper/go_rng/gamma.go | 0.774114 | 0.456834 | gamma.go | starcoder |
package tokenattributes
import (
"fmt"
"github.com/jtejido/golucene/core/util"
)
/*
Determines the position of this token relative to the previous Token
in a TokenStream, used in phrase searching.
The default value is one.
Some common uses for this are:
- Set it to zero to put multiple terms in the same position. This
is useful if, e.g., a word has multiple stems. Searches for phrases
including either stem will match. In this case, all but the first
stem's increment should be set to zero: the increment of the first
instance should be one. Repeating a token with an increment of zero
can also be used to boost the scores of matches on that token.
- Set it to values greater than one to inhibit exact phrase matches.
If, for example, one does not want phrases to match across removed
stop words, then one could build a stop word filter that removes
stop words and also sets the incremeent to the number of stop words
removed before each non-stop word. Then axact phrase queries will
only match when the terms occur with no intervening stop words.
*/
type PositionIncrementAttribute interface {
// Set the position increment. The deafult value is one.
SetPositionIncrement(int)
// Returns the position increment of this token.
PositionIncrement() int
}
/* Default implementation of ositionIncrementAttribute */
type PositionIncrementAttributeImpl struct {
positionIncrement int
}
func newPositionIncrementAttributeImpl() util.AttributeImpl {
return &PositionIncrementAttributeImpl{
positionIncrement: 1,
}
}
func (a *PositionIncrementAttributeImpl) Interfaces() []string {
return []string{"PositionIncrementAttribute"}
}
func (a *PositionIncrementAttributeImpl) SetPositionIncrement(positionIncrement int) {
assert2(positionIncrement >= 0, "Increment must be zero or greater: got %v", positionIncrement)
a.positionIncrement = positionIncrement
}
func assert2(ok bool, msg string, args ...interface{}) {
if !ok {
panic(fmt.Sprintf(msg, args...))
}
}
func (a *PositionIncrementAttributeImpl) PositionIncrement() int {
return a.positionIncrement
}
func (a *PositionIncrementAttributeImpl) Clear() {
a.positionIncrement = 1
}
func (a *PositionIncrementAttributeImpl) Clone() util.AttributeImpl {
return &PositionIncrementAttributeImpl{
positionIncrement: a.positionIncrement,
}
}
func (a *PositionIncrementAttributeImpl) CopyTo(target util.AttributeImpl) {
target.(PositionIncrementAttribute).SetPositionIncrement(a.positionIncrement)
} | core/analysis/tokenattributes/position.go | 0.762778 | 0.40928 | position.go | starcoder |
package sqinn
// value types, same as in sqinn/src/handler.h
// Value types for binding query parameters and retrieving column values.
const (
// ValNull represents the NULL value (Go nil)
ValNull byte = 0
// ValInt represents a Go int
ValInt byte = 1
// ValInt64 represents a Go int64
ValInt64 byte = 2
// ValDouble represents a Go float64
ValDouble byte = 6 // the IEEE variant
// ValText represents a Go string
ValText byte = 4
// ValBlob represents a Go []byte
ValBlob byte = 5
)
// An IntValue holds a nullable int value. The zero value is not set (a.k.a. NULL).
type IntValue struct {
// Set is false if the value is NULL, otherwise true.
Set bool
// Value is the int value.
Value int
}
// IsNull returns true if the value is NULL, otherwise true.
func (v IntValue) IsNull() bool { return !v.Set }
// An Int64Value holds a nullable int64 value. The zero value is not set (a.k.a. NULL).
type Int64Value struct {
// Set is false if the value is NULL, otherwise true.
Set bool
// Value is the int64 value.
Value int64
}
// IsNull returns true if the value is NULL, otherwise true.
func (v Int64Value) IsNull() bool { return !v.Set }
// A DoubleValue holds a nullable float64 value. The zero value is not set (a.k.a. NULL).
type DoubleValue struct {
// Set is false if the value is NULL, otherwise true.
Set bool
// Value is the float64 value.
Value float64
}
// IsNull returns true if the value is NULL, otherwise true.
func (v DoubleValue) IsNull() bool { return !v.Set }
// A StringValue holds a nullable string value. The zero value is not set (a.k.a. NULL).
type StringValue struct {
// Set is false if the value is NULL, otherwise true.
Set bool
// Value is the string value.
Value string
}
// IsNull returns true if the value is NULL, otherwise true.
func (v StringValue) IsNull() bool { return !v.Set }
// A BlobValue holds a nullable []byte value. The zero value is not set (a.k.a. NULL).
type BlobValue struct {
// Set is false if the value is NULL, otherwise true.
Set bool
// Value is the []byte value.
Value []byte
}
// IsNull returns true if the value is NULL, otherwise true.
func (v BlobValue) IsNull() bool { return !v.Set }
// An AnyValue can hold any value type.
type AnyValue struct {
Int IntValue // a nullable Go int
Int64 Int64Value // a nullable Go int64
Double DoubleValue // a nullable Go float64
String StringValue // a nullable Go string
Blob BlobValue // a nullable Go []byte
}
// AsInt returns an int value, or 0 if it is not set (NULL), or the value is not an int.
func (a AnyValue) AsInt() int {
return a.Int.Value
}
// AsInt64 returns an int64 value or 0 if it is NULL or the value is not an int64.
func (a AnyValue) AsInt64() int64 {
return a.Int64.Value
}
// AsDouble returns a double value or 0.0 if it is NULL or the value is not a double.
func (a AnyValue) AsDouble() float64 {
return a.Double.Value
}
// AsString returns a string value or "" if it is NULL or the value is not a string.
func (a AnyValue) AsString() string {
return a.String.Value
}
// AsBlob returns a []byte value or nil if it is NULL or the value is not a blob.
func (a AnyValue) AsBlob() []byte {
return a.Blob.Value
}
// A Row represents a query result row and holds a slice of values, one value
// per requested column.
type Row struct {
Values []AnyValue
} | sqinn/values.go | 0.833155 | 0.46794 | values.go | starcoder |
package shape
import "github.com/gregoryv/draw/xy"
// Aligner type aligns multiple shapes
type Aligner struct{}
// HAlignCenter aligns shape[1:] to shape[0] center coordinates horizontally
func (Aligner) HAlignCenter(shapes ...Shape) { hAlign(Center, shapes...) }
// HAlignTop aligns shape[1:] to shape[0] top coordinates horizontally
func (Aligner) HAlignTop(shapes ...Shape) { hAlign(Top, shapes...) }
// HAlignBottom aligns shape[1:] to shape[0] bottom coordinates horizontally
func (Aligner) HAlignBottom(shapes ...Shape) { hAlign(Bottom, shapes...) }
func hAlign(adjust Alignment, objects ...Shape) {
first := objects[0]
_, y := first.Position()
for _, shape := range objects[1:] {
switch adjust {
case Top:
shape.SetY(y)
case Bottom:
shape.SetY(y + first.Height() - shape.Height())
case Center:
diff := (first.Height() - shape.Height()) / 2
shape.SetY(y + diff)
}
}
}
// VAlignCenter aligns shape[1:] to shape[0] center coordinates vertically
func (Aligner) VAlignCenter(shapes ...Shape) { vAlign(Center, shapes...) }
// VAlignLeft aligns shape[1:] to shape[0] left coordinates vertically
func (Aligner) VAlignLeft(shapes ...Shape) { vAlign(Left, shapes...) }
// VAlignRight aligns shape[1:] to shape[0] right coordinates vertically
func (Aligner) VAlignRight(shapes ...Shape) { vAlign(Right, shapes...) }
func vAlign(adjust Alignment, objects ...Shape) {
first := objects[0]
x, _ := first.Position()
for _, shape := range objects[1:] {
switch adjust {
case Left:
shape.SetX(x)
case Right:
shape.SetX(x + first.Width() - shape.Width())
case Center:
if first.Direction() == DirectionLeft {
shape.SetX(x - (first.Width()+shape.Width())/2)
} else {
shape.SetX(x + (first.Width()-shape.Width())/2)
}
}
}
}
type Alignment int
const (
Top Alignment = iota
Left
Right
Bottom
Center
)
func NewDirection(from, to xy.Point) Direction {
switch {
case from.LeftOf(to) && from.Y == to.Y:
return DirectionRight
case from.LeftOf(to) && from.Above(to):
return DirectionDownRight
case from.Above(to) && from.X == to.X:
return DirectionDown
case from.RightOf(to) && from.Above(to):
return DirectionDownLeft
case from.RightOf(to) && from.Y == to.Y:
return DirectionLeft
case from.Below(to) && from.RightOf(to):
return DirectionUpLeft
case from.Below(to) && from.X == to.X:
return DirectionUp
default: // from.LeftOf(to) && from.Below(to):
return DirectionUpRight
}
}
type Direction uint
const (
DirectionRight Direction = (1 << iota)
DirectionLeft
DirectionUp
DirectionDown
DirectionDownRight = DirectionDown | DirectionRight
DirectionDownLeft = DirectionDown | DirectionLeft
DirectionUpLeft = DirectionUp | DirectionLeft
DirectionUpRight = DirectionUp | DirectionRight
)
// Method
func (d Direction) Is(dir Direction) bool {
return (d & dir) == dir
} | shape/align.go | 0.645343 | 0.589628 | align.go | starcoder |
package core
import (
"bytes"
"errors"
"log"
"math"
"os/exec"
"strconv"
"strings"
)
// ScriptSimilarityEstimator utilizes a script to analyze the data based on some external
// algorithm and utilizes various norms to measure the differences between the
// analysis outputs.
type ScriptSimilarityEstimator struct {
AbstractDatasetSimilarityEstimator
analysisScript string // the analysis script to be executed
simType ScriptSimilarityEstimatorType // similarity type - cosine, manhattan, euclidean
inverseIndex map[string]int // inverse index that maps datasets to ints
datasetCoordinates [][]float64 // holds the dataset coordinates
}
// ScriptSimilarityEstimatorType reflects the type of the ScriptSimilarityEstimator
type ScriptSimilarityEstimatorType uint8
const (
scriptSimilarityTypeManhattan ScriptSimilarityEstimatorType = iota
scriptSimilarityTypeEuclidean ScriptSimilarityEstimatorType = iota + 1
scriptSimilarityTypeCosine ScriptSimilarityEstimatorType = iota + 2
)
// Compute method constructs the Similarity Matrix
func (e *ScriptSimilarityEstimator) Compute() error {
return datasetSimilarityEstimatorCompute(e)
}
// Similarity returns the similarity between the two datasets
func (e *ScriptSimilarityEstimator) Similarity(a, b *Dataset) float64 {
var coordsA, coordsB []float64
if id, ok := e.inverseIndex[a.Path()]; ok {
coordsA = e.datasetCoordinates[id]
} else {
coordsA = e.analyzeDataset(a.Path())
}
if id, ok := e.inverseIndex[b.Path()]; ok {
coordsB = e.datasetCoordinates[id]
} else {
coordsB = e.analyzeDataset(b.Path())
}
if e.simType == scriptSimilarityTypeCosine {
val, err := e.cosine(coordsA, coordsB)
if err != nil {
log.Println(err)
}
return val
}
normDegree := 2 // default is EUCLIDEAN distance
if e.simType == scriptSimilarityTypeManhattan {
normDegree = 1
}
val, err := e.norm(coordsA, coordsB, normDegree)
if err != nil {
log.Println(err)
}
return DistanceToSimilarity(val)
}
// Configure sets a number of configuration parameters to the struct. Use this
// method before the execution of the computation
func (e *ScriptSimilarityEstimator) Configure(conf map[string]string) {
if val, ok := conf["concurrency"]; ok {
conv, err := strconv.ParseInt(val, 10, 32)
if err != nil {
log.Println(err)
} else {
e.concurrency = int(conv)
}
} else {
e.concurrency = 1
}
if val, ok := conf["script"]; ok {
e.analysisScript = val
} else {
log.Println("Analysis script not defined - exiting")
}
if val, ok := conf["type"]; ok {
if val == "cosine" {
e.simType = scriptSimilarityTypeCosine
} else if val == "manhattan" {
e.simType = scriptSimilarityTypeManhattan
} else if val == "euclidean" {
e.simType = scriptSimilarityTypeEuclidean
} else {
log.Println("Similarity Type not known, valid values: [cosine manhattan euclidean]")
}
} else {
e.simType = scriptSimilarityTypeEuclidean
}
// execute analysis for each dataset
log.Println("Analyzing datasets")
e.datasetCoordinates = e.analyzeDatasets()
e.inverseIndex = make(map[string]int)
for i, d := range e.datasets {
e.inverseIndex[d.Path()] = i
}
}
// Options returns a list of options that the user can set
func (e *ScriptSimilarityEstimator) Options() map[string]string {
return map[string]string{
"concurrency": "max number of threads to run in parallel",
"script": "path of the analysis script to be executed",
"type": "the type of the similarity - one of: [cosine manhattan euclidean]",
}
}
// Serialize returns a byte array that represents the struct is a serialized version
func (e *ScriptSimilarityEstimator) Serialize() []byte {
buffer := new(bytes.Buffer)
buffer.Write(getBytesInt(int(SimilarityTypeScript)))
buffer.Write(
datasetSimilarityEstimatorSerialize(
e.AbstractDatasetSimilarityEstimator))
// buffer.Write(getBytesInt(e.concurrency))
buffer.Write(getBytesInt(int(e.simType)))
buffer.WriteString(e.analysisScript + "\n")
// write number of coordinates per dataset
buffer.Write(getBytesInt(len(e.datasetCoordinates[0])))
for _, arr := range e.datasetCoordinates {
for _, v := range arr {
buffer.Write(getBytesFloat(v))
}
}
return buffer.Bytes()
}
// Deserialize parses a byte array and forms a ScriptSimilarityEstimator object
func (e *ScriptSimilarityEstimator) Deserialize(b []byte) {
buffer := bytes.NewBuffer(b)
tempInt := make([]byte, 4)
buffer.Read(tempInt) // consume estimator type
buffer.Read(tempInt)
absEstBytes := make([]byte, getIntBytes(tempInt))
buffer.Read(absEstBytes)
e.AbstractDatasetSimilarityEstimator =
*datasetSimilarityEstimatorDeserialize(absEstBytes)
buffer.Read(tempInt)
e.simType = ScriptSimilarityEstimatorType(getIntBytes(tempInt))
line, _ := buffer.ReadString('\n')
e.analysisScript = strings.TrimSpace(line)
e.inverseIndex = make(map[string]int)
for i, d := range e.datasets {
e.inverseIndex[d.Path()] = i
}
tempFloat := make([]byte, 8)
buffer.Read(tempInt)
count := getIntBytes(tempInt)
e.datasetCoordinates = make([][]float64, len(e.datasets))
for i := range e.datasets {
e.datasetCoordinates[i] = make([]float64, count)
for j := range e.datasetCoordinates[i] {
buffer.Read(tempFloat)
e.datasetCoordinates[i][j] = getFloatBytes(tempFloat)
}
}
}
func (e *ScriptSimilarityEstimator) analyzeDatasets() [][]float64 {
c, done := make(chan bool, e.concurrency), make(chan bool)
coords := make([][]float64, len(e.datasets))
for i := 0; i < e.concurrency; i++ {
c <- true
}
for i, d := range e.datasets {
go func(c, done chan bool, i int, path string) {
<-c
coords[i] = e.analyzeDataset(path)
c <- true
done <- true
}(c, done, i, d.Path())
}
for i := 0; i < len(e.datasets); i++ {
<-done
}
return coords
}
// analyzeDataset executed the analysis script into the specified dataset
func (e *ScriptSimilarityEstimator) analyzeDataset(path string) []float64 {
log.Println("Analyzing", path)
cmd := exec.Command(e.analysisScript, path)
out, err := cmd.Output()
if err != nil {
log.Println(err)
}
results := make([]float64, 0)
for _, sv := range strings.Split(string(out), "\t") {
conv, err := strconv.ParseFloat(strings.TrimSpace((sv)), 64)
if err == nil {
results = append(results, conv)
} else {
log.Println(err)
}
}
log.Println("Tuple read:", results)
return results
}
// norm function calculates the norm between two float slices
func (e *ScriptSimilarityEstimator) norm(a, b []float64, normDegree int) (float64, error) {
if len(a) != len(b) {
return -1, errors.New("arrays have different sizes")
}
sum := 0.0
for i := range a {
dif := math.Abs(a[i] - b[i])
sum += math.Pow(dif, float64(normDegree))
}
return math.Pow(sum, 1.0/float64(normDegree)), nil
}
// cosine calculates the cosine similarity between two vectors
func (e *ScriptSimilarityEstimator) cosine(a, b []float64) (float64, error) {
if len(a) != len(b) {
return -1, errors.New("arrays have different sizes")
}
nomin, sumA, sumB := 0.0, 0.0, 0.0
for i := range a {
nomin += a[i] * b[i]
sumA += a[i] * a[i]
sumB += b[i] * b[i]
}
denom := math.Sqrt(sumA) * math.Sqrt(sumB)
if denom == 0.0 {
return -1, errors.New("Zero denominator to cosine similarity")
}
return nomin / denom, nil
} | core/similarityscript.go | 0.634656 | 0.472927 | similarityscript.go | starcoder |
package models
import (
"fmt"
"strings"
"time"
"entgo.io/ent/dialect/sql"
"github.com/adnaan/authn/models/session"
)
// Session is the model entity for the Session schema.
type Session struct {
config `json:"-"`
// ID of the ent.
ID string `json:"id,omitempty"`
// Data holds the value of the "data" field.
Data string `json:"data,omitempty"`
// CreatedAt holds the value of the "created_at" field.
CreatedAt time.Time `json:"created_at,omitempty"`
// UpdatedAt holds the value of the "updated_at" field.
UpdatedAt time.Time `json:"updated_at,omitempty"`
// ExpiresAt holds the value of the "expires_at" field.
ExpiresAt *time.Time `json:"expires_at,omitempty"`
}
// scanValues returns the types for scanning values from sql.Rows.
func (*Session) scanValues(columns []string) ([]interface{}, error) {
values := make([]interface{}, len(columns))
for i := range columns {
switch columns[i] {
case session.FieldID, session.FieldData:
values[i] = &sql.NullString{}
case session.FieldCreatedAt, session.FieldUpdatedAt, session.FieldExpiresAt:
values[i] = &sql.NullTime{}
default:
return nil, fmt.Errorf("unexpected column %q for type Session", columns[i])
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the Session fields.
func (s *Session) assignValues(columns []string, values []interface{}) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case session.FieldID:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field id", values[i])
} else if value.Valid {
s.ID = value.String
}
case session.FieldData:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field data", values[i])
} else if value.Valid {
s.Data = value.String
}
case session.FieldCreatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field created_at", values[i])
} else if value.Valid {
s.CreatedAt = value.Time
}
case session.FieldUpdatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
} else if value.Valid {
s.UpdatedAt = value.Time
}
case session.FieldExpiresAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field expires_at", values[i])
} else if value.Valid {
s.ExpiresAt = new(time.Time)
*s.ExpiresAt = value.Time
}
}
}
return nil
}
// Update returns a builder for updating this Session.
// Note that you need to call Session.Unwrap() before calling this method if this Session
// was returned from a transaction, and the transaction was committed or rolled back.
func (s *Session) Update() *SessionUpdateOne {
return (&SessionClient{config: s.config}).UpdateOne(s)
}
// Unwrap unwraps the Session entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (s *Session) Unwrap() *Session {
tx, ok := s.config.driver.(*txDriver)
if !ok {
panic("models: Session is not a transactional entity")
}
s.config.driver = tx.drv
return s
}
// String implements the fmt.Stringer.
func (s *Session) String() string {
var builder strings.Builder
builder.WriteString("Session(")
builder.WriteString(fmt.Sprintf("id=%v", s.ID))
builder.WriteString(", data=")
builder.WriteString(s.Data)
builder.WriteString(", created_at=")
builder.WriteString(s.CreatedAt.Format(time.ANSIC))
builder.WriteString(", updated_at=")
builder.WriteString(s.UpdatedAt.Format(time.ANSIC))
if v := s.ExpiresAt; v != nil {
builder.WriteString(", expires_at=")
builder.WriteString(v.Format(time.ANSIC))
}
builder.WriteByte(')')
return builder.String()
}
// Sessions is a parsable slice of Session.
type Sessions []*Session
func (s Sessions) config(cfg config) {
for _i := range s {
s[_i].config = cfg
}
} | models/session.go | 0.631367 | 0.404978 | session.go | starcoder |
package maze
import "math/rand"
const numNeighbors = 4
// Cell is a cell in a maze.
type Cell struct {
Row, Col int
North, South, East, West *Cell
links map[*Cell]bool
}
// NewCell returns a new Cell put in (row, col).
func NewCell(row, col int) *Cell {
return &Cell{
Row: row,
Col: col,
links: make(map[*Cell]bool),
}
}
// Link links c and cell bidirectionally.
func (c *Cell) Link(cell *Cell) {
c.LinkDi(cell, true)
}
// LinkDi links c and cell bidirectionally if bidi is true.
// Otherwise, it only links c to cell.
func (c *Cell) LinkDi(cell *Cell, bidi bool) {
c.links[cell] = true
if bidi {
cell.LinkDi(c, false)
}
}
// Unlink unlinks c and cell bidirectionally.
func (c *Cell) Unlink(cell *Cell) {
c.UnlinkDi(cell, true)
}
// UnlinkDi unlinks c and cell bidirectionally if bidi is true.
// Otherwise, it only unlinks c to cell.
func (c *Cell) UnlinkDi(cell *Cell, bidi bool) {
delete(c.links, cell)
if bidi {
cell.UnlinkDi(c, false)
}
}
// Links returns all the cells linked with c.
func (c *Cell) Links() []*Cell {
keys := make([]*Cell, len(c.links))
i := 0
for k := range c.links {
keys[i] = k
i++
}
return keys
}
// IsLinked returns true if cell is linked with c.
// Otherwise, it returns false.
func (c *Cell) IsLinked(cell *Cell) bool {
_, exists := c.links[cell]
return exists
}
// Neighbors returns all the neighbors of c.
func (c *Cell) Neighbors() []*Cell {
var nb []*Cell
if c.North != nil {
nb = append(nb, c.North)
}
if c.South != nil {
nb = append(nb, c.South)
}
if c.East != nil {
nb = append(nb, c.East)
}
if c.West != nil {
nb = append(nb, c.West)
}
return nb
}
// Distances returns a Distances object that holds distances from `c`.
func (c *Cell) Distances() *Distances {
d := NewDistances(c)
f := []*Cell{c}
for len(f) > 0 {
nf := []*Cell{}
for _, cell := range f {
for _, linked := range cell.Links() {
if n := d.Get(linked); n >= 0 {
continue
}
d.Set(linked, d.Get(cell)+1)
nf = append(nf, linked)
}
}
f = nf
}
return d
}
// Shuffle shuffles cells.
func Shuffle(cells []*Cell) []*Cell {
l := len(cells)
ids := rand.Perm(l)
shfl := make([]*Cell, l)
for i, v := range ids {
shfl[i] = cells[v]
}
return shfl
} | go/maze/cell.go | 0.80213 | 0.450843 | cell.go | starcoder |
package vips
// #cgo pkg-config: vips
// #include "bridge.h"
import "C"
import (
"bytes"
"errors"
"io"
"io/ioutil"
"math"
"os"
)
// InputParams are options when importing an image from file or buffer
type InputParams struct {
Reader io.Reader
Image *ImageRef
}
// TransformParams are parameters for the transformation
type TransformParams struct {
PadStrategy Extend
ResizeStrategy ResizeStrategy
CropAnchor Anchor
ReductionSampler Kernel
EnlargementInterpolator Interpolator
ZoomX int
ZoomY int
Invert bool
Rotate Angle
AutoRotate bool
AutoRotateRemoveAngle bool
BlurSigma float64
Flip FlipDirection
Width Scalar
Height Scalar
MaxWidth int
MaxHeight int
CropOffsetX Scalar
CropOffsetY Scalar
MaxScale float64
Label *LabelParams
}
// Transform handles single image transformations
type Transform struct {
input *InputParams
tx *TransformParams
export *ExportParams
targetWidth int
targetHeight int
cropOffsetX int
cropOffsetY int
source []byte
}
// NewTransform constructs a new transform for execution
func NewTransform() *Transform {
return &Transform{
input: &InputParams{},
tx: &TransformParams{
ResizeStrategy: ResizeStrategyAuto,
CropAnchor: AnchorAuto,
ReductionSampler: KernelLanczos3,
EnlargementInterpolator: InterpolateBicubic,
},
export: &ExportParams{
Format: ImageTypeUnknown,
Quality: 90,
Interpretation: InterpretationSRGB,
},
}
}
// Image sets the image to operate on
func (t *Transform) Image(image *ImageRef) *Transform {
t.input.Image = image
return t
}
// LoadFile loads a file into the transform
func (t *Transform) LoadFile(file string) *Transform {
t.input.Reader = LazyOpen(file)
return t
}
// LoadBuffer loads a buffer into the transform
func (t *Transform) LoadBuffer(buf []byte) *Transform {
t.input.Reader = bytes.NewBuffer(buf)
return t
}
// Load loads a buffer into the transform
func (t *Transform) Load(reader io.Reader) *Transform {
t.input.Reader = reader
return t
}
// Output outputs the transform to a buffer and closes it
func (t *Transform) Output(writer io.Writer) *Transform {
t.export.Writer = writer
return t
}
// OutputBytes outputs the transform to a buffer and closes it
func (t *Transform) OutputBytes() *Transform {
t.export.Writer = nil
return t
}
// OutputFile outputs the transform to a file and closes it
func (t *Transform) OutputFile(file string) *Transform {
t.export.Writer = LazyCreate(file)
return t
}
// Zoom an image by repeating pixels. This is fast nearest-neighbour zoom.
func (t *Transform) Zoom(x, y int) *Transform {
t.tx.ZoomX = x
t.tx.ZoomY = y
return t
}
// Anchor sets the anchor for cropping
func (t *Transform) Anchor(anchor Anchor) *Transform {
t.tx.CropAnchor = anchor
return t
}
// CropOffsetX sets the target offset from the crop position
func (t *Transform) CropOffsetX(x int) *Transform {
t.tx.CropOffsetX.SetInt(x)
return t
}
// CropOffsetY sets the target offset from the crop position
func (t *Transform) CropOffsetY(y int) *Transform {
t.tx.CropOffsetY.SetInt(y)
return t
}
// CropRelativeOffsetX sets the target offset from the crop position
func (t *Transform) CropRelativeOffsetX(x float64) *Transform {
t.tx.CropOffsetX.SetScale(x)
return t
}
// CropRelativeOffsetY sets the target offset from the crop position
func (t *Transform) CropRelativeOffsetY(y float64) *Transform {
t.tx.CropOffsetY.SetScale(y)
return t
}
// Kernel sets the sampling kernel for the transform when down-scaling. Defaults to lancosz3
func (t *Transform) Kernel(kernel Kernel) *Transform {
t.tx.ReductionSampler = kernel
return t
}
// Interpolator sets the resampling interpolator when upscaling, defaults to bicubic
func (t *Transform) Interpolator(interp Interpolator) *Transform {
t.tx.EnlargementInterpolator = interp
return t
}
// ResizeStrategy sets the strategy when resizing an image
func (t *Transform) ResizeStrategy(strategy ResizeStrategy) *Transform {
t.tx.ResizeStrategy = strategy
return t
}
// PadStrategy sets the strategy when the image must be padded to maintain aspect ratoi
func (t *Transform) PadStrategy(strategy Extend) *Transform {
t.tx.PadStrategy = strategy
return t
}
// Invert inverts the image color
func (t *Transform) Invert() *Transform {
t.tx.Invert = true
return t
}
// Flip flips the image horizontally or vertically
func (t *Transform) Flip(flip FlipDirection) *Transform {
t.tx.Flip = flip
return t
}
// GaussBlur applies a gaussian blur to the image
func (t *Transform) GaussBlur(sigma float64) *Transform {
t.tx.BlurSigma = sigma
return t
}
// Rotate rotates image by a multiple of 90 degrees
func (t *Transform) Rotate(angle Angle) *Transform {
t.tx.Rotate = angle
return t
}
// AutoRotate rotates image based on image metadata
func (t *Transform) AutoRotate() *Transform {
t.tx.AutoRotate = true
return t
}
// AutoRotateRemoveAngle removes rotatation metadata
func (t *Transform) AutoRotateRemoveAngle() *Transform {
t.tx.AutoRotateRemoveAngle = true
return t
}
// Embed this image appropriately if resized according to a new aspect ratio
func (t *Transform) Embed(extend Extend) *Transform {
t.tx.ResizeStrategy = ResizeStrategyEmbed
t.tx.PadStrategy = extend
return t
}
// Crop an image, width and height must be equal to or less than image size
func (t *Transform) Crop(anchor Anchor) *Transform {
t.tx.ResizeStrategy = ResizeStrategyCrop
return t
}
// Stretch an image without maintaining aspect ratio
func (t *Transform) Stretch() *Transform {
t.tx.ResizeStrategy = ResizeStrategyStretch
return t
}
// Fill an image maintaining aspect ratio filling and overflowing to MaxWidth x MaxHeight
func (t *Transform) Fill() *Transform {
t.tx.ResizeStrategy = ResizeStrategyFill
return t
}
// ScaleWidth scales the image by its width proportionally
func (t *Transform) ScaleWidth(scale float64) *Transform {
t.tx.Width.SetScale(scale)
return t
}
// ScaleHeight scales the height of the image proportionally
func (t *Transform) ScaleHeight(scale float64) *Transform {
t.tx.Height.SetScale(scale)
return t
}
// Scale the image
func (t *Transform) Scale(scale float64) *Transform {
t.tx.Width.SetScale(scale)
t.tx.Height.SetScale(scale)
return t
}
// MaxScale sets the max scale factor that this image can be enlarged or reduced by
func (t *Transform) MaxScale(max float64) *Transform {
t.tx.MaxScale = max
return t
}
// ResizeWidth resizes the image to the given width, maintaining aspect ratio
func (t *Transform) ResizeWidth(width int) *Transform {
t.tx.Width.SetInt(width)
return t
}
// ResizeHeight resizes the image to the given height, maintaining aspect ratio
func (t *Transform) ResizeHeight(height int) *Transform {
t.tx.Height.SetInt(height)
return t
}
// Resize resizes the image to the given width and height
func (t *Transform) Resize(width, height int) *Transform {
t.tx.Width.SetInt(width)
t.tx.Height.SetInt(height)
return t
}
func (t *Transform) MaxWidth(width int) *Transform {
t.tx.MaxWidth = width
return t
}
func (t *Transform) MaxHeight(height int) *Transform {
t.tx.MaxHeight = height
return t
}
func (t *Transform) MaxSize(width, height int) *Transform {
t.tx.MaxWidth = width
t.tx.MaxHeight = height
return t
}
func (t *Transform) Label(lp *LabelParams) *Transform {
if lp.Text == "" {
t.tx.Label = nil
return t
}
label := *lp
// Defaults
if label.Width.IsZero() {
label.Width.SetScale(1)
}
if label.Height.IsZero() {
label.Height.SetScale(1)
}
if label.Font == "" {
label.Font = DefaultFont
}
if label.Opacity == 0 {
label.Opacity = 1
}
t.tx.Label = &label
return t
}
// Format sets the image format of the input image when exporting. Defaults to JPEG
func (t *Transform) Format(format ImageType) *Transform {
t.export.Format = format
return t
}
// Quality sets the quality value for image formats that support it
func (t *Transform) Quality(quality int) *Transform {
t.export.Quality = quality
return t
}
// Compression sets the compression value for image formats that support it
func (t *Transform) Compression(compression int) *Transform {
t.export.Compression = compression
return t
}
// Lossless uses lossless compression for image formats that support both lossy and lossless e.g. webp
func (t *Transform) Lossless() *Transform {
t.export.Lossless = true
return t
}
// StripMetadata strips metadata from the image
func (t *Transform) StripMetadata() *Transform {
t.export.StripMetadata = true
return t
}
// StripProfile strips ICC profile from the image
func (t *Transform) StripProfile() *Transform {
t.export.StripProfile = true
return t
}
// BackgroundColor sets the background color of the image when a transparent
// image is flattened
func (t *Transform) BackgroundColor(color Color) *Transform {
t.export.BackgroundColor = &color
return t
}
// Interpretation sets interpretation for image
func (t *Transform) Interpretation(interpretation Interpretation) *Transform {
t.export.Interpretation = interpretation
return t
}
// Interlaced uses interlaced for image that support it
func (t *Transform) Interlaced() *Transform {
t.export.Interlaced = true
return t
}
// Apply loads the image, applies the transform, and exports it according
// to the parameters specified
func (t *Transform) Apply() ([]byte, ImageType, error) {
defer ShutdownThread()
defer func() {
t.source = nil
}()
startupIfNeeded()
input, imageType, err := t.importImage()
if err != nil {
return nil, ImageTypeUnknown, err
}
if input == nil {
return nil, ImageTypeUnknown, errors.New("vips: image not found")
}
transformed, err := t.transform(input, imageType)
if err != nil {
return nil, ImageTypeUnknown, err
}
defer C.g_object_unref(C.gpointer(transformed))
return t.exportImage(transformed, imageType)
}
// ApplyMemory loads the image, applies the transform, and returns the transformed ImageRef
func (t *Transform) ApplyMemory() (*ImageRef, error) {
defer ShutdownThread()
defer func() {
t.source = nil
}()
startupIfNeeded()
input, imageType, err := t.importImage()
if err != nil {
return nil, err
}
if input == nil {
return nil, errors.New("vips: image not found")
}
transformed, err := t.transform(input, imageType)
if err != nil {
return nil, err
}
return NewImageRef(transformed, imageType), nil
}
func (t *Transform) importImage() (*C.VipsImage, ImageType, error) {
if t.input.Image != nil {
copy, err := vipsCopyImage(t.input.Image.image)
return copy, t.input.Image.Format(), err
}
if t.input.Reader == nil {
panic("no input source specified")
}
var err error
t.source, err = ioutil.ReadAll(t.input.Reader)
if err != nil {
return nil, ImageTypeUnknown, nil
}
return vipsLoadFromBuffer(t.source)
}
func (t *Transform) exportImage(image *C.VipsImage, imageType ImageType) ([]byte, ImageType, error) {
if t.export.Format == ImageTypeUnknown {
t.export.Format = imageType
}
buf, format, err := vipsExportBuffer(image, t.export)
if err != nil {
return nil, ImageTypeUnknown, err
}
if t.export.Writer != nil {
_, err = t.export.Writer.Write(buf)
if err != nil {
return buf, format, err
}
}
return buf, format, err
}
// Blackboard is an object that tracks transient data during a transformation
type Blackboard struct {
*TransformParams
image *C.VipsImage
imageType ImageType
aspectRatio float64
targetWidth int
targetHeight int
targetScale float64
cropOffsetX int
cropOffsetY int
}
// NewBlackboard creates a new blackboard object meant for transformation data
func NewBlackboard(image *C.VipsImage, imageType ImageType, p *TransformParams) *Blackboard {
bb := &Blackboard{
TransformParams: p,
image: image,
imageType: imageType,
}
imageWidth := int(image.Xsize)
imageHeight := int(image.Ysize)
bb.aspectRatio = ratio(imageWidth, imageHeight)
bb.cropOffsetX = p.CropOffsetX.GetRounded(imageWidth)
bb.cropOffsetY = p.CropOffsetY.GetRounded(imageHeight)
if p.Width.Value == 0 && p.Height.Value == 0 && p.MaxWidth == 0 && p.MaxHeight == 0 {
return bb
}
bb.targetWidth = p.Width.GetRounded(imageWidth)
bb.targetHeight = p.Height.GetRounded(imageHeight)
if p.ResizeStrategy == ResizeStrategyFill {
// fill is basically auto, but uses Max sizes to determine final size
// just remove the MaxWidth or MaxHeight accordingly
if p.MaxWidth > 0 && p.MaxHeight > 0 {
if bb.aspectRatio > ratio(p.MaxWidth, p.MaxHeight) {
p.MaxWidth = 0
} else {
p.MaxHeight = 0
}
}
}
if p.MaxWidth > 0 && (bb.targetWidth > p.MaxWidth || imageWidth > p.MaxWidth) {
bb.targetWidth = p.MaxWidth
}
if p.MaxHeight > 0 && (bb.targetHeight > p.MaxHeight || imageHeight > p.MaxHeight) {
bb.targetHeight = p.MaxHeight
}
if bb.MaxScale > 0 {
if bb.targetWidth > 0 && ratio(bb.targetWidth, imageWidth) > bb.MaxScale {
bb.targetWidth = int(float64(imageWidth) * bb.MaxScale)
}
if bb.targetHeight > 0 && ratio(bb.targetHeight, imageHeight) > bb.MaxScale {
bb.targetHeight = int(float64(imageHeight) * bb.MaxScale)
}
}
switch {
case bb.targetWidth > 0 && bb.targetHeight > 0:
// Nothing to do
case bb.targetWidth > 0:
bb.targetHeight = roundFloat(ratio(bb.targetWidth, imageWidth) * float64(imageHeight))
case bb.targetHeight > 0:
bb.targetWidth = roundFloat(ratio(bb.targetHeight, imageHeight) * float64(imageWidth))
}
if p.Width.Relative && p.Height.Relative {
sx, sy := p.Width.Value, p.Height.Value
if sx == 0 {
sx = sy
} else if sy == 0 {
sy = sx
}
if sx == sy {
bb.targetScale = sx
}
}
if bb.MaxScale != 0 && bb.targetScale > bb.MaxScale {
bb.targetScale = bb.MaxScale
}
return bb
}
// Width returns the width of the in-flight image
func (bb *Blackboard) Width() int {
return int(bb.image.Xsize)
}
// Height returns the height of the in-flight image
func (bb *Blackboard) Height() int {
return int(bb.image.Ysize)
}
func (t *Transform) transform(image *C.VipsImage, imageType ImageType) (*C.VipsImage, error) {
bb := NewBlackboard(image, imageType, t.tx)
if err := resize(bb); err != nil {
return image, err
}
if err := postProcess(bb); err != nil {
return image, err
}
return bb.image, nil
}
func resize(bb *Blackboard) error {
var err error
kernel := bb.ReductionSampler
// Check for the simple scale down cases
if bb.targetScale != 0 {
bb.image, err = vipsResize(bb.image, bb.targetScale, bb.targetScale, kernel)
if err != nil {
return err
}
}
if bb.targetHeight == 0 && bb.targetWidth == 0 {
return nil
}
shrinkX := ratio(bb.Width(), bb.targetWidth)
shrinkY := ratio(bb.Height(), bb.targetHeight)
cropMode := bb.ResizeStrategy == ResizeStrategyCrop
stretchMode := bb.ResizeStrategy == ResizeStrategyStretch
embedMode := bb.ResizeStrategy == ResizeStrategyEmbed
if !stretchMode {
if shrinkX > 0 && shrinkY > 0 {
if cropMode {
shrinkX = math.Min(shrinkX, shrinkY)
} else {
shrinkX = math.Max(shrinkX, shrinkY)
}
} else {
if cropMode {
shrinkX = math.Min(shrinkX, shrinkY)
} else {
shrinkX = math.Max(shrinkX, shrinkY)
}
}
shrinkY = shrinkX
}
if shrinkX != 1 || shrinkY != 1 {
bb.image, err = vipsResize(bb.image, 1.0/shrinkX, 1.0/shrinkY, kernel)
if err != nil {
return err
}
// If stretching then we're done.
if stretchMode {
return nil
}
}
// Crop if necessary
if cropMode {
if err := maybeCrop(bb); err != nil {
return err
}
}
if embedMode {
if err := maybeEmbed(bb); err != nil {
return err
}
}
return nil
}
func maybeCrop(bb *Blackboard) error {
var err error
imageW, imageH := bb.Width(), bb.Height()
if bb.targetWidth >= imageW && bb.targetHeight >= imageH {
return nil
}
width := minInt(bb.targetWidth, imageW)
height := minInt(bb.targetHeight, imageH)
left, top := 0, 0
middleX := (imageW - bb.targetWidth + 1) >> 1
middleY := (imageH - bb.targetHeight + 1) >> 1
if bb.cropOffsetX != 0 || bb.cropOffsetY != 0 {
if bb.cropOffsetX >= 0 {
left = middleX + minInt(bb.cropOffsetX, middleX)
} else {
left = middleX - maxInt(bb.cropOffsetX, middleX)
}
if bb.cropOffsetY >= 0 {
top = middleY + minInt(bb.cropOffsetY, middleY)
} else {
top = middleY - maxInt(bb.cropOffsetY, middleY)
}
} else {
switch bb.CropAnchor {
case AnchorTop:
left = middleX
case AnchorBottom:
left = middleX
top = imageH - bb.targetHeight
case AnchorRight:
left = imageW - bb.targetWidth
top = middleY
case AnchorLeft:
top = middleY
case AnchorTopRight:
left = imageW - bb.targetWidth
case AnchorTopLeft:
case AnchorBottomRight:
left = imageW - bb.targetWidth
top = imageH - bb.targetHeight
case AnchorBottomLeft:
top = imageH - bb.targetHeight
default:
left = middleX
top = middleY
}
}
left = maxInt(left, 0)
top = maxInt(top, 0)
if left+width > imageW {
width = imageW - left
bb.targetWidth = width
}
if top+height > imageH {
height = imageH - top
bb.targetHeight = height
}
bb.image, err = vipsExtractArea(bb.image, left, top, width, height)
return err
}
func maybeEmbed(bb *Blackboard) error {
var err error
imageW, imageH := bb.Width(), bb.Height()
// Now we might need to embed to match the target dimensions
if bb.targetWidth > imageW || bb.targetHeight > imageH {
var left, top int
width, height := imageW, imageH
if bb.targetWidth > imageW {
width = bb.targetWidth
left = (bb.targetWidth - imageW) >> 1
}
if bb.targetHeight > imageH {
height = bb.targetHeight
top = (bb.targetHeight - imageH) >> 1
}
bb.image, err = vipsEmbed(bb.image, left, top, width, height, bb.PadStrategy)
if err != nil {
return err
}
}
return nil
}
func postProcess(bb *Blackboard) error {
var err error
if bb.ZoomX > 0 || bb.ZoomY > 0 {
bb.image, err = vipsZoom(bb.image, bb.ZoomX, bb.ZoomY)
if err != nil {
return err
}
}
if bb.Flip != FlipNone {
var err error
switch bb.Flip {
case FlipHorizontal:
bb.image, err = vipsFlip(bb.image, DirectionHorizontal)
case FlipVertical:
bb.image, err = vipsFlip(bb.image, DirectionVertical)
case FlipBoth:
bb.image, err = vipsFlip(bb.image, DirectionHorizontal)
if err == nil {
bb.image, err = vipsFlip(bb.image, DirectionVertical)
}
}
if err != nil {
return err
}
}
if bb.Invert {
bb.image, err = vipsInvert(bb.image)
if err != nil {
return err
}
}
if bb.BlurSigma > 0 {
bb.image, err = vipsGaussianBlur(bb.image, bb.BlurSigma)
if err != nil {
return err
}
}
if bb.Rotate > 0 {
bb.image, err = vipsRotate(bb.image, bb.Rotate)
if err != nil {
return err
}
}
if bb.AutoRotate {
bb.image, err = vipsAutoRotate(bb.image)
if err != nil {
return err
}
}
if bb.AutoRotateRemoveAngle {
vipsAutoRotateRemoveAngle(bb.image)
}
if bb.Label != nil {
bb.image, err = vipsLabel(bb.image, *bb.Label)
if err != nil {
return err
}
}
return nil
}
func minInt(a, b int) int {
return int(math.Min(float64(a), float64(b)))
}
func maxInt(a, b int) int {
return int(math.Max(float64(a), float64(b)))
}
func ratio(x, y int) float64 {
if x == y {
return 1
}
return float64(x) / float64(y)
}
func roundFloat(f float64) int {
if f < 0 {
return int(math.Ceil(f - 0.5))
}
return int(math.Floor(f + 0.5))
}
// LazyFile is a lazy reader or writer
// TODO(d): Move this to AF
type LazyFile struct {
name string
file *os.File
}
func LazyOpen(file string) io.Reader {
return &LazyFile{name: file}
}
func LazyCreate(file string) io.Writer {
return &LazyFile{name: file}
}
func (r *LazyFile) Read(p []byte) (n int, err error) {
if r.file == nil {
f, err := os.Open(r.name)
if err != nil {
return 0, err
}
r.file = f
}
return r.file.Read(p)
}
func (r *LazyFile) Close() error {
if r.file != nil {
r.file.Close()
r.file = nil
}
return nil
}
func (r *LazyFile) Write(p []byte) (n int, err error) {
if r.file == nil {
f, err := os.Create(r.name)
if err != nil {
return 0, err
}
r.file = f
}
return r.file.Write(p)
}
type Scalar struct {
Value float64
Relative bool
}
func ValueOf(value float64) Scalar {
return Scalar{value, false}
}
func ScaleOf(value float64) Scalar {
return Scalar{value, true}
}
func (s *Scalar) IsZero() bool {
return s.Value == 0 && !s.Relative
}
func (s *Scalar) SetInt(value int) {
s.Set(float64(value))
}
func (s *Scalar) Set(value float64) {
s.Value = value
s.Relative = false
}
func (s *Scalar) SetScale(f float64) {
s.Value = f
s.Relative = true
}
func (s *Scalar) Get(base int) float64 {
if s.Relative {
return s.Value * float64(base)
}
return s.Value
}
func (s *Scalar) GetRounded(base int) int {
return roundFloat(s.Get(base))
} | pkg/vips/transform.go | 0.747063 | 0.413536 | transform.go | starcoder |
package dates
import (
"math"
"time"
)
// DateFormat represents the parsing format for a date string
// TimeFormat represents the parsing format for a time string
// DateTimeFormat represents the parsing format for a date time string
const (
DateFormat = "2006-01-02"
TimeFormat = "15:04:05"
DateTimeFormat = "2006-01-02 15:04:05"
)
// DateStringToTime parses a date string to a Time struct
func DateStringToTime(date string) (time.Time, error) {
return time.Parse(DateFormat, date)
}
// DateTimeStringToTime parses a date time string to a Time struct
func DateTimeStringToTime(date string) (time.Time, error) {
return time.Parse(DateTimeFormat, date)
}
// GetDateThisMorning sets the time for today at midnight (in the morning, not evening)
func GetDateThisMorning() time.Time {
return SetTimeToBeginDay(time.Now())
}
// SetTimeToNoon sets the given date to 00:00:00.0
func SetTimeToNoon(date time.Time) time.Time {
return time.Date(date.Year(), date.Month(), date.Day(), 12, 0, 0, 0, time.UTC)
}
// SetTimeToBeginDay sets the given date to 00:00:00.0
func SetTimeToBeginDay(date time.Time) time.Time {
return time.Date(date.Year(), date.Month(), date.Day(), 0, 0, 0, 0, time.UTC)
}
// SetTimeToEndDay sets the given date to 23:59:59.99
func SetTimeToEndDay(date time.Time) time.Time {
return time.Date(date.Year(), date.Month(), date.Day(), 23, 59, 59, 99, time.UTC)
}
// TimeStringFromTime formats a Time object to a time string
func TimeStringFromTime(date time.Time) string {
return date.Format(TimeFormat)
}
// DateStringFromTime formats a Time object to a time string
func DateStringFromTime(date time.Time) string {
return date.Format(DateFormat)
}
// DateTimeStringFromTime formats a Time object to a date time string
func DateTimeStringFromTime(date time.Time) string {
return date.Format(DateTimeFormat)
}
// DifferenceInDays gets the difference of the given dates in days
func DifferenceInDays(startDate time.Time, endDate time.Time) int {
duration := endDate.Sub(startDate)
return roundFloatToInt(duration.Seconds() / 86400)
}
// DifferenceInHours gets the difference of the given dates in hours
func DifferenceInHours(startDate time.Time, endDate time.Time) int {
duration := endDate.Sub(startDate)
return roundFloatToInt(duration.Hours())
}
// roundFloatToInt rounds off any floats to integers
func roundFloatToInt(input float64) int {
var result float64
if input < 0 {
result = math.Ceil(input - 0.5)
} else {
result = math.Floor(input + 0.5)
}
// only interested in integer, ignore fractional
i, _ := math.Modf(result)
return int(i)
}
// IsSameOrBefore determines whether the first date is equal or before the second date
func IsSameOrBefore(date time.Time, comparison time.Time) bool {
return date.Equal(comparison) || date.Before(comparison)
}
// IsSameOrAfter determines whether the first date is equal or after the second date
func IsSameOrAfter(date time.Time, comparison time.Time) bool {
return date.Equal(comparison) || date.After(comparison)
} | dates.go | 0.890889 | 0.555978 | dates.go | starcoder |
package dst
// Geometric distribution (type 0).
// The probability distribution of the number Y = X − 1 of failures before the first success, supported on the set { 0, 1, 2, 3, ... }
// Parameters:
// ρ ∈ (0, 1] probability of success in each trial
// Support:
// k ∈ {0, ... , n}
// GeometricPMF returns the PMF of the Geometric distribution.
func GeometricPMF(ρ float64) func(k int64) float64 {
return func(k int64) float64 { return ρ * pow(1-ρ, float64(k)) }
}
// GeometricLnPMF returns the natural logarithm of the PMF of the Geometric distribution.
func GeometricLnPMF(ρ float64) func(k int64) float64 {
return func(k int64) float64 { return log(1-ρ) + float64(k)*log(ρ) }
}
// GeometricPMFAt returns the value of PMF of Geometric distribution at k.
func GeometricPMFAt(ρ float64, k int64) float64 {
pmf := GeometricPMF(ρ)
return pmf(k)
}
// GeometricCDF returns the value of CDF of the Geometric distribution, at k.
func GeometricCDF(ρ float64) func(k int64) float64 {
return func(k int64) float64 {
if k < 0 {
return NaN
}
return 1 - pow(1-ρ, float64(k+1))
}
}
// GeometricCDFAt returns the value of CDF of the Geometric distribution, at x.
func GeometricCDFAt(ρ float64, k int64) float64 {
cdf := GeometricCDF(ρ)
return cdf(k)
}
/* Not tested, looking strange, commented out, waiting for revision
// GeometricNext returns random number drawn from the Geometric distribution.
//GeometricNext(ρ) => # of GeometricNext(ρ) failures before one success
func GeometricNext(ρ float64) int64 {
if GeometricNext(ρ) == 1 {
return 1 + GeometricNext(ρ)
}
return 0
}
// Geometric returns the random number generator with Geometric distribution.
func Geometric(ρ float64) func() int64 { return func() int64 { return GeometricNext(ρ) } }
*/
// GeometricMean returns the mean of the Geometric distribution.
func GeometricMean(ρ float64) float64 {
return (1 - ρ) / ρ
}
/* to be implemented
// GeometricMedian returns the median of the Geometric distribution.
func GeometricMedian(ρ float64) float64 {
return floor(float64(n)*p)
}
*/
// GeometricMode returns the mode of the Geometric distribution.
func GeometricMode(ρ float64) float64 {
return 0
}
// GeometricVar returns the variance of the Geometric distribution.
func GeometricVar(ρ float64) float64 {
return (1 - ρ) / (ρ * ρ)
}
// GeometricStd returns the standard deviation of the Geometric distribution.
func GeometricStd(ρ float64) float64 {
return sqrt(1-ρ) / ρ
}
// GeometricSkew returns the skewness of the Geometric distribution.
func GeometricSkew(ρ float64) float64 {
return (2 - ρ) / sqrt(1-ρ)
}
// GeometricExKurt returns the excess kurtosis of the Geometric distribution.
func GeometricExKurt(ρ float64) float64 {
return 6 + (ρ*ρ)/(1-ρ)
}
// GeometricMGF returns the moment-generating function of the Geometric distribution.
func GeometricMGF(ρ, t float64) float64 {
return ρ / (1 - (1-ρ)*exp(t))
} | dst/geom0.go | 0.894083 | 0.816553 | geom0.go | starcoder |
package txtproc
import (
"context"
"github.com/opentracing/opentracing-go"
)
// BadWordsData is a struct to map what data to be compared, replaced to what
// and other information you want to carry for example the primary key, etc.
type ReplacerData struct {
StringToCompare string
StringReplacement string
}
// ReplacerDataSeeder is a collection of string (bad words) that need to be replaced.
// This is like an `map[string]string` where string to compare and string replacement can be fetch using `Get` method.
// By defining the interface (rather than using Go map type), we can implement using the best approach we can imagine.
// For example, by always querying into Redis rather than
// This can be implemented using database, in-memory, or anything.
// By having this interface, we can just load/query what we need in that time, not pre-loading all data from database
// to Go map (but you can still do it).
type ReplacerDataSeeder interface {
// Get data from database (in-memory, or SQL or noSql) by sequence.
// This function will be called `Total()/PerBatch()` times,
// and the `batch` will be contain value range between 1 - `Total()/PerBatch()`
// You can treat `batch` as the offset limit, for example:
// Total = 100, PerBatch = 10, it will iterate between 1 to (100/10 = 10).
// More example, when Total = 100, PerBatch = 100, it will iterate once only.
// It your responsibility to tune how many data fetched per batch, as it will only matter with query speed.
// You can query SQL something like this:
// offset = (batch - 1) * PerBatch()
// SELECT * FROM bad_words LIMIT :PerBatch() OFFSET :offset ORDER BY id DESC;
// Function will return `strToCompare` as the compared string, and `replacement` as the replacement string.
// It is up to you whether you want to return partial replacement like 'f*ck' or full replacement like '****'
Get(ctx context.Context, batch int64) (dataReplacer []ReplacerData, err error)
// Total returns the number of total data that need to be checked in collection of bad words.
Total(ctx context.Context) int64
// PerBatch will return how many data retrieved per `Get`.
PerBatch(ctx context.Context) int64
}
// replacerDataDefault will be used when no `ReplacerDataSeeder` passed in `WordReplacerConfig`
type replacerDataDefault struct{}
// Get will not return any string
func (r replacerDataDefault) Get(ctx context.Context, _ int64) (dataReplacer []ReplacerData, err error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "replacerDataDefault.Get")
defer func() {
ctx.Done()
span.Finish()
}()
dataReplacer = []ReplacerData{}
return
}
// Total will always return 1 on default, so it will not do any iteration
func (r replacerDataDefault) Total(ctx context.Context) int64 {
span, ctx := opentracing.StartSpanFromContext(ctx, "replacerDataDefault.Total")
defer func() {
ctx.Done()
span.Finish()
}()
return 0
}
// PerBatch return 1 since Total only return 1. So the iteration will be 0 (total/per batch = 0/1 = 0).
func (r replacerDataDefault) PerBatch(ctx context.Context) int64 {
span, ctx := opentracing.StartSpanFromContext(ctx, "replacerDataDefault.PerBatch")
defer func() {
ctx.Done()
span.Finish()
}()
return 1
}
// newReplacerDataDefault default value for replace the data
func newReplacerDataDefault() ReplacerDataSeeder {
return &replacerDataDefault{}
} | replacer_data.go | 0.808105 | 0.410815 | replacer_data.go | starcoder |
package ekliptic
import (
"crypto/elliptic"
"math/big"
)
// Curve satisfies crypto/elliptic.Curve using the secp256k1 curve paramters.
type Curve struct {
params *elliptic.CurveParams
}
// Params returns the parameters for the curve. Satisfies elliptic.Curve.
func (c *Curve) Params() *elliptic.CurveParams {
if c.params == nil {
c.params = &elliptic.CurveParams{
P: new(big.Int).Set(Secp256k1_P),
N: new(big.Int).Set(Secp256k1_CurveOrder),
B: new(big.Int).Set(Secp256k1_B),
Gx: new(big.Int).Set(Secp256k1_GeneratorX),
Gy: new(big.Int).Set(Secp256k1_GeneratorY),
BitSize: 256,
Name: "secp256k1",
}
}
return c.params
}
// IsOnCurve reports whether the given (x,y) lies on the curve. Satisfies elliptic.Curve.
// Note: The elliptic.Curve interface requires that infinity is NOT on the curve.
func (_ *Curve) IsOnCurve(x, y *big.Int) bool {
if equal(x, zero) && equal(y, zero) {
return false
}
return IsOnCurveAffine(x, y)
}
// Add returns the sum of (x1,y1) and (x2,y2) satisfies elliptic.Curve.
func (_ *Curve) Add(x1, y1, x2, y2 *big.Int) (x3, y3 *big.Int) {
x3 = new(big.Int)
y3 = new(big.Int)
AddAffine(x1, y1, x2, y2, x3, y3)
return
}
// Double returns 2*(x,y). Satisfies elliptic.Curve.
func (_ *Curve) Double(x1, y1 *big.Int) (x3, y3 *big.Int) {
x3 = new(big.Int)
y3 = new(big.Int)
DoubleAffine(x1, y1, x3, y3)
return
}
// ScalarMult returns k*(Bx,By) where k is a number in big-endian form.
// Satisfies elliptic.Curve.
func (_ *Curve) ScalarMult(x1, y1 *big.Int, k []byte) (x2, y2 *big.Int) {
x2 = new(big.Int)
y2 = new(big.Int)
kBig := new(big.Int).SetBytes(k)
if equal(x1, Secp256k1_GeneratorX) && equal(y1, Secp256k1_GeneratorY) {
MultiplyBasePoint(kBig, x2, y2)
} else {
MultiplyAffine(x1, y1, kBig, x2, y2, nil)
}
return
}
// ScalarBaseMult returns k*G, where G is the base point of the group
// and k is an integer in big-endian form. Satisfies elliptic.Curve.
func (_ *Curve) ScalarBaseMult(k []byte) (x2, y2 *big.Int) {
x2 = new(big.Int)
y2 = new(big.Int)
kBig := new(big.Int).SetBytes(k)
MultiplyBasePoint(kBig, x2, y2)
return
} | curve.go | 0.81309 | 0.541894 | curve.go | starcoder |
package main
import (
"math"
"github.com/seqsense/pcgol/mat"
"github.com/seqsense/pcgol/pc"
)
const (
selectBitmaskCropped = 0x00000001
selectBitmaskSelected = 0x00000002
selectBitmaskNearCursor = 0x00000004
selectBitmaskOnScreen = 0x00000008
selectBitmaskExclude = 0x80000000
selectBitmaskSegmentSelected = 0x00000010
)
const (
pointSelectRange = 0.1
rectSelectRange = 0.2
)
func selectPointOrtho(modelViewMatrix, projectionMatrix *mat.Mat4, x, y, width, height int, depth *mat.Vec3) *mat.Vec3 {
a := projectionMatrix.Mul(*modelViewMatrix)
var d float32
if depth != nil {
dp := a.Transform(*depth)
d = dp[2]
}
pos := mat.NewVec3(
float32(x)*2/float32(width)-1,
1-float32(y)*2/float32(height), d)
target := a.Inv().Transform(pos)
return &target
}
func screenPosVec(x, y, width, height int, projectionMatrix, modelViewMatrix *mat.Mat4) (*mat.Vec3, *mat.Mat4) {
pos := mat.NewVec3(
float32(x)*2/float32(width)-1,
1-float32(y)*2/float32(height), -1)
a := projectionMatrix.Mul(*modelViewMatrix).Inv()
return &pos, &a
}
func perspectiveOriginDirFromPosVec(pos *mat.Vec3, a *mat.Mat4, modelViewMatrix *mat.Mat4) (*mat.Vec3, *mat.Vec3) {
target := a.Transform(*pos)
origin := modelViewMatrix.InvAffine().TransformAffine(mat.NewVec3(0, 0, 0))
dir := target.Sub(origin).Normalized()
return &origin, &dir
}
func perspectiveOriginDir(x, y, width, height int, projectionMatrix, modelViewMatrix *mat.Mat4) (*mat.Vec3, *mat.Vec3) {
pos, a := screenPosVec(x, y, width, height, projectionMatrix, modelViewMatrix)
return perspectiveOriginDirFromPosVec(pos, a, modelViewMatrix)
}
func selectPoint(pp *pc.PointCloud, selMask []uint32, projectionType ProjectionType, modelViewMatrix, projectionMatrix *mat.Mat4, x, y, width, height int, rangeMax float32) (*mat.Vec3, bool) {
pos, a := screenPosVec(x, y, width, height, projectionMatrix, modelViewMatrix)
it, err := pp.Vec3Iterator()
if err != nil {
return nil, false
}
var selected *mat.Vec3
rangeMaxSq := rangeMax * rangeMax
switch projectionType {
case ProjectionPerspective:
origin, dir := perspectiveOriginDirFromPosVec(pos, a, modelViewMatrix)
vMin := float32(1000 * 1000)
if selMask != nil {
n := pp.Points
for i := 0; i < n; i++ {
if selMask[i]&(selectBitmaskCropped|selectBitmaskNearCursor|selectBitmaskOnScreen) != selectBitmaskNearCursor|selectBitmaskOnScreen {
continue
}
p := it.Vec3At(i)
pRel := origin.Sub(p)
dot, distSq := pRel.Dot(*dir), pRel.NormSq()
v := (distSq - dot*dot) + distSq/10000
if v < vMin {
vMin, selected = v, &p
}
}
} else {
// Full search for select box drag check
for ; it.IsValid(); it.Incr() {
p := it.Vec3()
pRel := origin.Sub(p)
dot := pRel.Dot(*dir)
if dot < 0 {
distSq := pRel.NormSq()
dSq := distSq - dot*dot
v := dSq + distSq/10000
if v < vMin && dSq < rangeMaxSq && distSq > 1.0 {
vMin, selected = v, &p
}
}
}
}
case ProjectionOrthographic:
o1 := a.TransformAffine(mat.NewVec3(pos[0], pos[1], 0))
o2 := a.TransformAffine(mat.NewVec3(pos[0], pos[1], 1))
oDiff := o2.Sub(o1)
oDiffNormSq := oDiff.NormSq()
dSqMin := rangeMaxSq
for i := 0; it.IsValid(); func() {
it.Incr()
i++
}() {
if selMask != nil {
if selMask[i]&selectBitmaskCropped != 0 {
continue
}
}
p := it.Vec3()
dSq := oDiff.CrossNormSq(p.Sub(o1)) / oDiffNormSq
if dSq < dSqMin {
dSqMin = dSq
selected = &p
}
}
}
if selected != nil {
return selected, true
}
return nil, false
}
func rectFrom3(p0, p1, p2 mat.Vec3) [4]mat.Vec3 {
base := p1.Sub(p0)
proj := p0.Add(
base.Mul(base.Dot(p2.Sub(p0)) / base.NormSq()))
perp := p2.Sub(proj)
return [4]mat.Vec3{p0, p1, p1.Add(perp), p0.Add(perp)}
}
func boxFrom4(p0, p1, p2, p3 mat.Vec3) [8]mat.Vec3 {
pp := rectFrom3(p0, p1, p2)
v0n, v1n := pp[1].Sub(p0).Normalized(), pp[3].Sub(p0).Normalized()
v2n := v0n.Cross(v1n)
m := (mat.Mat4{
v0n[0], v0n[1], v0n[2], 0,
v1n[0], v1n[1], v1n[2], 0,
v2n[0], v2n[1], v2n[2], 0,
0, 0, 0, 1,
}).InvAffine().MulAffine(mat.Translate(-p0[0], -p0[1], -p0[2]))
z := m.TransformAffineZ(p3)
v3 := v2n.Mul(z)
return [8]mat.Vec3{
pp[0], pp[1], pp[2], pp[3],
pp[0].Add(v3), pp[1].Add(v3), pp[2].Add(v3), pp[3].Add(v3),
}
}
func boxFromRect(min, max mat.Vec3) [8]mat.Vec3 {
return [8]mat.Vec3{
min,
{min[0], max[1], min[2]},
{max[0], max[1], min[2]},
{max[0], min[1], min[2]},
{min[0], min[1], max[2]},
{min[0], max[1], max[2]},
max,
{max[0], min[1], max[2]},
}
}
func dragTranslation(s, e mat.Vec3) mat.Mat4 {
diff := e.Sub(s)
return mat.Translate(diff[0], diff[1], diff[2])
}
func dragRotation(s, e mat.Vec3, rect []mat.Vec3, modelView *mat.Mat4) mat.Mat4 {
if len(rect) <= 1 {
return mat.Translate(0, 0, 0)
}
var center mat.Vec3
for _, p := range rect {
center = center.Add(p)
}
center = center.Mul(1 / float32(len(rect)))
// Transform to view coordinate
vCenter := modelView.Transform(center)
vS := modelView.Transform(s)
vE := modelView.Transform(e)
vS[2], vE[2] = vCenter[2], vCenter[2]
// Get view direction
viewInv := modelView.Inv()
camera := viewInv.Transform(mat.Vec3{})
cameraFront := viewInv.Transform(mat.Vec3{0, 0, 1})
dir := cameraFront.Sub(camera)
// Calculate angle of dragged point around the center of the rect
vSRel := vS.Sub(vCenter).Normalized()
vERel := vE.Sub(vCenter).Normalized()
ang0 := float32(math.Atan2(float64(vSRel[1]), float64(vSRel[0])))
ang1 := float32(math.Atan2(float64(vERel[1]), float64(vERel[0])))
return mat.Translate(center[0], center[1], center[2]).
Mul(mat.Rotate(dir[0], dir[1], dir[2], ang1-ang0)).
Mul(mat.Translate(-center[0], -center[1], -center[2]))
}
func cursorsToTrans(curs []mat.Vec3) mat.Mat4 {
o := curs[0]
x := curs[1].Sub(o)
y := curs[2].Sub(o)
z := curs[3].Sub(o)
return mat.Mat4{
x[0], x[1], x[2], 0,
y[0], y[1], y[2], 0,
z[0], z[1], z[2], 0,
o[0], o[1], o[2], 1,
}
} | select.go | 0.575469 | 0.424293 | select.go | starcoder |
package aoc2015
/*
The elves are running low on wrapping paper, and so they need to submit an order for more. They have a list of the dimensions (length l, width w, and height h) of each present, and only want to order exactly as much as they need.
Fortunately, every present is a box (a perfect right rectangular prism), which makes calculating the required wrapping paper for each gift a little easier: find the surface area of the box, which is 2*l*w + 2*w*h + 2*h*l. The elves also need a little extra paper for each present: the area of the smallest side.
For example:
A present with dimensions 2x3x4 requires 2*6 + 2*12 + 2*8 = 52 square feet of wrapping paper plus 6 square feet of slack, for a total of 58 square feet.
A present with dimensions 1x1x10 requires 2*1 + 2*10 + 2*10 = 42 square feet of wrapping paper plus 1 square foot of slack, for a total of 43 square feet.
All numbers in the elves' list are in feet. How many total square feet of wrapping paper should they order?
*/
import (
"fmt"
"sort"
"strconv"
"strings"
goutils "github.com/simonski/goutils"
)
// AOC_2015_02 is the entrypoint
func (app *Application) Y2015D02() {
app.Y2015D02P1()
app.Y2015D02P1()
}
func (app *Application) Y2015D02P1() {
lines := strings.Split(DAY_2015_02_DATA, "\n")
total := 0
for _, line := range lines {
p := NewPresent(line)
total += p.Area()
}
fmt.Printf("Area: %v\n", total)
}
/*
--- Part Two ---
The elves are also running low on ribbon. Ribbon is all the same width, so they only have to worry about the length they need to order, which they would again like to be exact.
The ribbon required to wrap a present is the shortest distance around its sides, or the smallest perimeter of any one face. Each present also requires a bow made out of ribbon as well; the feet of ribbon required for the perfect bow is equal to the cubic feet of volume of the present. Don't ask how they tie the bow, though; they'll never tell.
For example:
A present with dimensions 2x3x4 requires 2+2+3+3 = 10 feet of ribbon to wrap the present plus 2*3*4 = 24 feet of ribbon for the bow, for a total of 34 feet.
A present with dimensions 1x1x10 requires 1+1+1+1 = 4 feet of ribbon to wrap the present plus 1*1*10 = 10 feet of ribbon for the bow, for a total of 14 feet.
How many total feet of ribbon should they order?
*/
func (app *Application) Y2015D02P2() {
lines := strings.Split(DAY_2015_02_DATA, "\n")
volume := 0
perimeter := 0
for _, line := range lines {
p := NewPresent(line)
perimeter += p.Perimeter()
volume += p.Volume()
}
total := perimeter + volume
fmt.Printf("Volume %v Perimeter %v, total %v\n", volume, perimeter, total)
}
type Present struct {
l int
w int
h int
}
func (p *Present) Area() int {
l := p.l
w := p.w
h := p.h
a1 := (2 * l * w)
a2 := (2 * w * h)
a3 := (2 * h * l)
t := l * w
t = goutils.Min(t, w*h)
t = goutils.Min(t, h*l)
return a1 + a2 + a3 + t
}
func (p *Present) Volume() int {
return p.l * p.h * p.w
}
func (p *Present) Perimeter() int {
arr := make([]int, 0)
arr = append(arr, p.l)
arr = append(arr, p.h)
arr = append(arr, p.w)
sort.Ints(arr)
fmt.Printf("%v\n", arr)
return arr[0] + arr[0] + arr[1] + arr[1]
}
func NewPresent(line string) *Present {
splits := strings.Split(strings.TrimSpace(line), "x")
l, _ := strconv.Atoi(splits[0])
w, _ := strconv.Atoi(splits[1])
h, _ := strconv.Atoi(splits[2])
p := Present{l: l, w: w, h: h}
return &p
} | app/aoc2015/aoc2015_02.go | 0.712632 | 0.690063 | aoc2015_02.go | starcoder |
package bloomfilter
import (
"hash/fnv"
"math"
"github.com/russmack/bitarray-go"
)
// Hash32Fn is a function type for 32 bit hashing functions.
type Hash32Fn func(string) uint32
// BloomFilter is the public struct.
type BloomFilter struct {
filter *bitarraygo.BitArray
size uint32
hashFuncs []Hash32Fn
totalFlipped uint32
}
func (b *BloomFilter) setTrue(i uint32) {
b.filter.Set(uint64(i), true)
}
func (b *BloomFilter) get(i uint32) bool {
return b.filter.Get(uint64(i))
}
// NewBloomFilter creates a new BloomFilter with the specified number of switches,
// and a list of the hash functions to use when adding elements to the set, and
// when checking for existence.
func NewBloomFilter(filterSize uint32) *BloomFilter {
b := BloomFilter{}
b.filter = bitarraygo.NewBitArray(4294967295)
b.size = filterSize
// TODO: inject preferred choice of hashers.
b.hashFuncs = []Hash32Fn{hashFnv1, hashFnv1a}
return &b
}
// hashFnv1 puts a string through the golang stdlib 32-bit FNV-1 hash.
// A string is reduced to an int.
func hashFnv1(s string) uint32 {
h := fnv.New32()
h.Write([]byte(s))
return h.Sum32()
}
// hashFnv1a puts a string through the golang stdlib 32-bit FNV-1a hash.
// A string is reduced to an int.
func hashFnv1a(s string) uint32 {
h := fnv.New32a()
h.Write([]byte(s))
return h.Sum32()
}
// getIndex32 hashes a string, then reduces that hash to an index within the bounds of the filter.
func (b *BloomFilter) getIndex32(s string, hashFn Hash32Fn) uint32 {
h := hashFn(s)
return h % b.size
}
// Add a string to the Bloom filter.
func (b *BloomFilter) Add(s string) {
// Iterate over the list of hash functions, using each to reduce the string
// to a single index in the filter, which is then flipped on.
for _, j := range b.hashFuncs {
idx := b.getIndex32(s, j)
b.setTrue(idx)
b.totalFlipped++
}
}
// Exists checks if the given string is in the Bloom Filter.
func (b *BloomFilter) Exists(s string) bool {
// Put the candidate string through each of the hash functions, and for each
// index returned get the value at that index in the bloom filter, and put
// those values into the results slice.
results := make([]bool, len(b.hashFuncs))
for i, j := range b.hashFuncs {
idx := b.getIndex32(s, j)
results[i] = b.get(idx)
}
allTrue := true
// Iterate over the switch values retrieved from the bloom filter.
for _, j := range results {
// If the switch values retrieved are all true we'll end up returning true.
allTrue = allTrue && j
}
return allTrue
}
// False positive probability:
// The number of true cells in the filter,
// divided by the length of the filter,
// to the power of the number of hash functions.
func (b *BloomFilter) GetFalsePositiveProbability() float64 {
x := float64(b.totalFlipped) / float64(b.size)
y := float64(len(b.hashFuncs))
z := math.Pow(x, y) * 100
p := math.Floor(z + .5)
return p
} | bloomfilter.go | 0.563858 | 0.45847 | bloomfilter.go | starcoder |
package bitesized
import (
"math"
"time"
"github.com/jinzhu/now"
)
// Interval define which time intervals to track events. Ex: `Month` interval
// turns on bit for that user in the specified month's bit array. Multiple
// intervals can be selected.
type Interval int
const (
All Interval = iota
TenMinutes
ThirtyMinutes
Hour
Day
Biweekly
Week
Bimonthly
Month
Quarter
Year
)
func handleMinuteInterval(t time.Time, n *now.Now, cycleLength int, keyName string) string {
layout := keyName + ":2006-01-02-15:04"
offset := t.Sub(n.BeginningOfHour())
cycle := int(math.Floor(offset.Minutes() / float64(cycleLength)))
return n.BeginningOfHour().Add(time.Duration(cycle*cycleLength) * time.Minute).Format(layout)
}
func nearestInterval(t time.Time, interval Interval) string {
n := now.New(t.UTC())
switch interval {
case All:
return "all"
case TenMinutes:
return handleMinuteInterval(t, n, 10, "ten_minutes")
case ThirtyMinutes:
return handleMinuteInterval(t, n, 30, "thirty_minutes")
case Day:
layout := "day:2006-01-02"
return n.BeginningOfDay().Format(layout)
case Biweekly:
layout := "biweekly:2006-01-02"
date := n.BeginningOfWeek()
if offset := t.Sub(n.BeginningOfWeek()); offset.Hours() > 84 {
date = date.Add(84 * time.Hour)
}
return date.Format(layout)
case Week:
layout := "week:2006-01-02"
return n.BeginningOfWeek().Format(layout)
case Bimonthly:
layout := "bimonthly:2006-01-02"
monthMiddle := n.EndOfMonth().Sub(n.BeginningOfMonth()) / 2
date := n.BeginningOfMonth()
if offset := t.Sub(n.BeginningOfMonth()); offset > monthMiddle {
date = date.Add(monthMiddle)
}
return date.Format(layout)
case Month:
layout := "month:2006-01"
return n.BeginningOfMonth().Format(layout)
case Quarter:
layout := "quarter:2006-01"
return n.BeginningOfQuarter().Format(layout)
case Year:
layout := "year:2006"
return n.BeginningOfYear().Format(layout)
}
layout := "hour:2006-01-02-15:04"
return n.BeginningOfHour().Format(layout)
}
func getDuration(t time.Time, i Interval) time.Duration {
switch i {
case Day:
return 24 * time.Hour
case Week:
return 7 * 24 * time.Hour
case Month:
noOfDays := daysIn(t.Month(), t.Year())
return time.Duration(noOfDays) * 24 * time.Hour
case Year:
return 365 * 24 * time.Hour
}
return time.Hour
}
func daysIn(m time.Month, year int) int {
return time.Date(year, m+1, 0, 0, 0, 0, 0, time.UTC).Day()
} | interval.go | 0.798423 | 0.444685 | interval.go | starcoder |
package tuple2
import "fmt"
// T2 is a tuple of two elements.
type T2[A, B any] struct {
T A
V B
}
// T3 is a tuple of three elements.
type T3[A, B, C any] struct {
T T2[A, B]
V C
}
// T4 is a tuple of four elements.
type T4[A, B, C, D any] struct {
T T3[A, B, C]
V D
}
// New2 returns a new T2.
func New2[A, B any](a A, b B) T2[A, B] {
return Join2(a, b)
}
// New3 returns a new T3.
func New3[A, B, C any](a A, b B, c C) T3[A, B, C] {
return Join3(New2(a, b), c)
}
// New4 returns a new T4.
func New4[A, B, C, D any](a A, b B, c C, d D) T4[A, B, C, D] {
return Join4(New3(a, b, c), d)
}
// Join2 returns a T2 consisting of the elements t and v.
// Join2 is identical to New2.
func Join2[A, B any](t A, v B) T2[A, B] { return T2[A, B]{t, v} }
// Join3 returns a T3 consisting of the T2 t and the value v.
func Join3[A, B, C any](t T2[A, B], v C) T3[A, B, C] { return T3[A, B, C]{t, v} }
// Join4 returns a T4 consisting of the T3 t and the value v.
func Join4[A, B, C, D any](t T3[A, B, C], v D) T4[A, B, C, D] { return T4[A, B, C, D]{t, v} }
// V0 returns the first element of its receiver tuple.
func (t T2[A, B]) V0() A { return t.T }
// V1 returns the second element of its receiver tuple.
func (t T2[A, B]) V1() B { return t.V }
func (t T3[A, B, C]) V0() A { return t.T.T }
func (t T3[A, B, C]) V1() B { return t.T.V }
func (t T3[A, B, C]) V2() C { return t.V }
func (t T4[A, B, C, D]) V0() A { return t.T.T.T }
func (t T4[A, B, C, D]) V1() B { return t.T.T.V }
func (t T4[A, B, C, D]) V2() C { return t.T.V }
func (t T4[A, B, C, D]) V3() D { return t.V }
// Spread returns the elements of its receiver as separate return values.
func (t T2[A, B]) Spread() (A, B) { return t.V0(), t.V1() }
// Spread returns the elements of its receiver as separate return values.
func (t T3[A, B, C]) Spread() (A, B, C) { return t.V0(), t.V1(), t.V2() }
// Spread returns the elements of its receiver as separate return values.
func (t T4[A, B, C, D]) Spread() (A, B, C, D) { return t.V0(), t.V1(), t.V2(), t.V3() }
func (t T2[A, B]) String() string { return fmt.Sprintf("<%v, %v>", t.V0(), t.V1()) }
func (t T3[A, B, C]) String() string { return fmt.Sprintf("<%v, %v, %v>", t.V0(), t.V1(), t.V2()) }
func (t T4[A, B, C, D]) String() string {
return fmt.Sprintf("<%v, %v, %v, %v>",
t.V0(), t.V1(), t.V2(), t.V3())
}
// Nth returns the nth element ,0-based, of its receiver.
func (t T2[A, B]) Nth(i int) interface{} {
switch i {
case 0:
return t.T
case 1:
return t.V
default:
panic("bad index")
}
}
// Nth returns the nth element ,0-based, of its receiver.
func (t T3[A, B, C]) Nth(i int) interface{} {
if i == 2 {
return t.V
}
return t.T.Nth(i)
}
// Nth returns the nth element ,0-based, of its receiver.
func (t T4[A, B, C, D]) Nth(i int) interface{} {
if i == 3 {
return t.V
}
return t.T.Nth(i)
} | tuple2/tuple.go | 0.755727 | 0.746231 | tuple.go | starcoder |
package sort
import "github.com/nickelchen/gorithms/tree"
func QuickSort(numbers []int) {
// worst: O(N^2); best: O(NlogN)
if len(numbers) <= 1 {
return
}
pivot := numbers[0]
head, tail := 0, len(numbers)-1
i := 1
for i <= tail {
if numbers[i] > pivot {
numbers[i], numbers[tail] = numbers[tail], numbers[i]
tail--
} else {
numbers[i], numbers[head] = numbers[head], numbers[i]
head++
i++
}
}
QuickSort(numbers[:head])
QuickSort(numbers[head+1:])
}
func InsertSort(numbers []int) {
// worst: O(N^2); best: O(N)
tail := len(numbers) - 1
for i := 0; i < tail; i++ {
for j := i + 1; j >= 1; j-- {
if numbers[j-1] < numbers[j] {
break
} else {
numbers[j-1], numbers[j] = numbers[j], numbers[j-1]
}
}
}
}
func ShellSort(numbers []int) {
// O(N^(3/2)) by Knuth,1973, steps are: 1, 4, 13, 40, 121, ...
tail := len(numbers) - 1
step := 1
for step < len(numbers)/3 {
step = 3*step + 1
}
for step > 0 {
// do insert sort for each step length
for i := step - 1; i < tail; i++ {
for j := i + 1; j >= step; j -= step {
if numbers[j-step] < numbers[j] {
break
} else {
numbers[j-step], numbers[j] = numbers[j], numbers[j-step]
}
}
}
// decrease step. then do insert sort again.
step = step / 3
}
}
func SelectSort(numbers []int) {
// worst: O(N^2); best: O(N^2)
tail := len(numbers) - 1
for i := 0; i <= tail; i++ {
min := i
for j := i; j <= tail; j++ {
if numbers[j] <= numbers[min] {
min = j
}
}
numbers[i], numbers[min] = numbers[min], numbers[i]
}
}
func BubbleSort(numbers []int) {
// worst: O(N^2); best: O(N)
tail := len(numbers) - 1
for i := tail; i >= 0; i-- {
swapped := false
for j := 0; j < i; j++ {
if numbers[j] >= numbers[j+1] {
numbers[j], numbers[j+1] = numbers[j+1], numbers[j]
swapped = true
}
}
if !swapped {
break
}
}
}
func MergeSort(numbers []int) []int {
// worst: O(NlogN); best: O(N)
if len(numbers) <= 1 { // only 1 element.
return numbers
}
mid := len(numbers) / 2
s1 := MergeSort(numbers[:mid]) // now s1 is sorted
s2 := MergeSort(numbers[mid:]) // now s2 is sorted
s3 := merge(s1, s2)
return s3
}
func merge(s1, s2 []int) []int {
var l int = len(s1) + len(s2) // total length.
var res []int = make([]int, l, l)
i := 0
j := 0
k := 0
for k < l {
if i >= len(s1) { // s1 is exsausted, pick from s2
res[k] = s2[j]
j++
} else if j >= len(s2) { // s2 is exsausted, pick from s1
res[k] = s1[i]
i++
} else if s1[i] >= s2[j] {
res[k] = s2[j]
j++
} else {
res[k] = s1[i]
i++
}
k++
}
return res
}
func HeapSort(numbers []int) []int {
// TBD
// construct the heap.
heap := tree.NewBinaryHeap()
for _, v := range numbers {
node := tree.Node{Value: rune(v)}
heap.Insert(&node)
}
var res []int
m := heap.RemoveMax()
for m != nil {
res = append(res, int(m.Value))
m = heap.RemoveMax()
}
reverse(res)
return res
}
func reverse(numbers []int) {
tail := len(numbers) - 1
for i := 0; i < len(numbers)/2; i++ {
numbers[i], numbers[tail-i] = numbers[tail-i], numbers[i]
}
} | sort/sort.go | 0.563618 | 0.41324 | sort.go | starcoder |
package interp
import (
"math"
"encoding/gob"
"github.com/ungerik/go3d/float64/bezier2"
"github.com/ungerik/go3d/float64/vec2"
)
func init() {
gob.Register(Lerp{})
gob.Register(Bezier{})
gob.Register(Equation{})
gob.Register(SinFunc{})
}
var Linear *Lerp = &Lerp{}
var EaseOut *Bezier = &Bezier{
bezier2.T{
vec2.T{0.0, 0.0},
vec2.T{1.0, 0.0},
vec2.T{1.0, 0.0},
vec2.T{1.0, 1.0},
},
}
var EaseIn *Bezier = &Bezier{
bezier2.T{
vec2.T{0.0, 0.0},
vec2.T{0.0, 1.0},
vec2.T{0.0, 1.0},
vec2.T{1.0, 1.0},
},
}
var Sinusoid *Equation = &Equation{
Func: SinFunc{},
}
type Interp interface {
Uint8(uint8, uint8, float64) uint8
Float64(float64, float64, float64) float64
Vec2(vec2.T, vec2.T, float64) vec2.T
}
type Lerp struct {
}
func (i *Lerp) Float64(a, b float64, t float64) float64 {
m := b - a // Slope = Rise over run | Note: Run = (1 - 0)
y := (m * t) + a
return y
}
func (i *Lerp) Uint8(a, b uint8, t float64) uint8 {
return uint8(i.Float64(float64(a), float64(b), t))
}
func (i *Lerp) Vec2(a, b vec2.T, t float64) vec2.T {
ret := vec2.T{
i.Float64(a[0], b[0], t),
i.Float64(a[1], b[1], t),
}
return ret
}
type Bezier struct {
bezier2.T
}
func (i *Bezier) Float64(a, b float64, t float64) float64 {
iValue := i.T.Point(t)
return Linear.Float64(a, b, iValue[1])
}
func (i *Bezier) Uint8(a, b uint8, t float64) uint8 {
iValue := i.T.Point(t)
return Linear.Uint8(a, b, iValue[1])
}
func (i *Bezier) Vec2(a, b vec2.T, t float64) vec2.T {
iValue := i.T.Point(t)
return Linear.Vec2(a, b, iValue[1])
}
type Equation struct {
Func Function
}
func (i *Equation) Float64(a, b float64, t float64) float64 {
iValue := i.Func.Interp(t)
return Linear.Float64(a, b, iValue)
}
func (i *Equation) Uint8(a, b uint8, t float64) uint8 {
iValue := i.Func.Interp(t)
return Linear.Uint8(a, b, iValue)
}
func (i *Equation) Vec2(a, b vec2.T, t float64) vec2.T {
iValue := i.Func.Interp(t)
return Linear.Vec2(a, b, iValue)
}
type Function interface {
Interp(t float64) float64
}
type SinFunc struct {}
func (s SinFunc) Interp(t float64) float64 {
return math.Sin(t * math.Pi)
} | interp/interp.go | 0.790773 | 0.438424 | interp.go | starcoder |
package promql
import (
"fmt"
"math"
"strings"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/VictoriaMetrics/metricsql"
"github.com/VictoriaMetrics/metricsql/binaryop"
)
var binaryOpFuncs = map[string]binaryOpFunc{
"+": newBinaryOpArithFunc(binaryop.Plus),
"-": newBinaryOpArithFunc(binaryop.Minus),
"*": newBinaryOpArithFunc(binaryop.Mul),
"/": newBinaryOpArithFunc(binaryop.Div),
"%": newBinaryOpArithFunc(binaryop.Mod),
"^": newBinaryOpArithFunc(binaryop.Pow),
// cmp ops
"==": newBinaryOpCmpFunc(binaryop.Eq),
"!=": newBinaryOpCmpFunc(binaryop.Neq),
">": newBinaryOpCmpFunc(binaryop.Gt),
"<": newBinaryOpCmpFunc(binaryop.Lt),
">=": newBinaryOpCmpFunc(binaryop.Gte),
"<=": newBinaryOpCmpFunc(binaryop.Lte),
// logical set ops
"and": binaryOpAnd,
"or": binaryOpOr,
"unless": binaryOpUnless,
// New ops
"if": newBinaryOpArithFunc(binaryop.If),
"ifnot": newBinaryOpArithFunc(binaryop.Ifnot),
"default": newBinaryOpArithFunc(binaryop.Default),
}
func getBinaryOpFunc(op string) binaryOpFunc {
op = strings.ToLower(op)
return binaryOpFuncs[op]
}
type binaryOpFuncArg struct {
be *metricsql.BinaryOpExpr
left []*timeseries
right []*timeseries
}
type binaryOpFunc func(bfa *binaryOpFuncArg) ([]*timeseries, error)
func newBinaryOpCmpFunc(cf func(left, right float64) bool) binaryOpFunc {
cfe := func(left, right float64, isBool bool) float64 {
if !isBool {
if cf(left, right) {
return left
}
return nan
}
if cf(left, right) {
return 1
}
if math.IsNaN(left) {
return nan
}
return 0
}
return newBinaryOpFunc(cfe)
}
func newBinaryOpArithFunc(af func(left, right float64) float64) binaryOpFunc {
afe := func(left, right float64, isBool bool) float64 {
return af(left, right)
}
return newBinaryOpFunc(afe)
}
func newBinaryOpFunc(bf func(left, right float64, isBool bool) float64) binaryOpFunc {
return func(bfa *binaryOpFuncArg) ([]*timeseries, error) {
isBool := bfa.be.Bool
left, right, dst, err := adjustBinaryOpTags(bfa.be, bfa.left, bfa.right)
if err != nil {
return nil, err
}
if len(left) != len(right) || len(left) != len(dst) {
logger.Panicf("BUG: len(left) must match len(right) and len(dst); got %d vs %d vs %d", len(left), len(right), len(dst))
}
for i, tsLeft := range left {
leftValues := tsLeft.Values
rightValues := right[i].Values
dstValues := dst[i].Values
if len(leftValues) != len(rightValues) || len(leftValues) != len(dstValues) {
logger.Panicf("BUG: len(leftVaues) must match len(rightValues) and len(dstValues); got %d vs %d vs %d",
len(leftValues), len(rightValues), len(dstValues))
}
for j, a := range leftValues {
b := rightValues[j]
dstValues[j] = bf(a, b, isBool)
}
}
// Do not remove time series containing only NaNs, since then the `(foo op bar) default N`
// won't work as expected if `(foo op bar)` results to NaN series.
return dst, nil
}
}
func adjustBinaryOpTags(be *metricsql.BinaryOpExpr, left, right []*timeseries) ([]*timeseries, []*timeseries, []*timeseries, error) {
if len(be.GroupModifier.Op) == 0 && len(be.JoinModifier.Op) == 0 {
if isScalar(left) {
// Fast path: `scalar op vector`
rvsLeft := make([]*timeseries, len(right))
tsLeft := left[0]
for i, tsRight := range right {
resetMetricGroupIfRequired(be, tsRight)
rvsLeft[i] = tsLeft
}
return rvsLeft, right, right, nil
}
if isScalar(right) {
// Fast path: `vector op scalar`
rvsRight := make([]*timeseries, len(left))
tsRight := right[0]
for i, tsLeft := range left {
resetMetricGroupIfRequired(be, tsLeft)
rvsRight[i] = tsRight
}
return left, rvsRight, left, nil
}
}
// Slow path: `vector op vector` or `a op {on|ignoring} {group_left|group_right} b`
var rvsLeft, rvsRight []*timeseries
mLeft, mRight := createTimeseriesMapByTagSet(be, left, right)
joinOp := strings.ToLower(be.JoinModifier.Op)
groupOp := strings.ToLower(be.GroupModifier.Op)
if len(groupOp) == 0 {
groupOp = "ignoring"
}
groupTags := be.GroupModifier.Args
for k, tssLeft := range mLeft {
tssRight := mRight[k]
if len(tssRight) == 0 {
continue
}
switch joinOp {
case "group_left":
var err error
rvsLeft, rvsRight, err = groupJoin("right", be, rvsLeft, rvsRight, tssLeft, tssRight)
if err != nil {
return nil, nil, nil, err
}
case "group_right":
var err error
rvsRight, rvsLeft, err = groupJoin("left", be, rvsRight, rvsLeft, tssRight, tssLeft)
if err != nil {
return nil, nil, nil, err
}
case "":
if err := ensureSingleTimeseries("left", be, tssLeft); err != nil {
return nil, nil, nil, err
}
if err := ensureSingleTimeseries("right", be, tssRight); err != nil {
return nil, nil, nil, err
}
tsLeft := tssLeft[0]
resetMetricGroupIfRequired(be, tsLeft)
switch groupOp {
case "on":
tsLeft.MetricName.RemoveTagsOn(groupTags)
case "ignoring":
tsLeft.MetricName.RemoveTagsIgnoring(groupTags)
default:
logger.Panicf("BUG: unexpected binary op modifier %q", groupOp)
}
rvsLeft = append(rvsLeft, tsLeft)
rvsRight = append(rvsRight, tssRight[0])
default:
logger.Panicf("BUG: unexpected join modifier %q", joinOp)
}
}
dst := rvsLeft
if joinOp == "group_right" {
dst = rvsRight
}
return rvsLeft, rvsRight, dst, nil
}
func ensureSingleTimeseries(side string, be *metricsql.BinaryOpExpr, tss []*timeseries) error {
if len(tss) == 0 {
logger.Panicf("BUG: tss must contain at least one value")
}
for len(tss) > 1 {
if !mergeNonOverlappingTimeseries(tss[0], tss[len(tss)-1]) {
return fmt.Errorf(`duplicate time series on the %s side of %s %s: %s and %s`, side, be.Op, be.GroupModifier.AppendString(nil),
stringMetricTags(&tss[0].MetricName), stringMetricTags(&tss[len(tss)-1].MetricName))
}
tss = tss[:len(tss)-1]
}
return nil
}
func groupJoin(singleTimeseriesSide string, be *metricsql.BinaryOpExpr, rvsLeft, rvsRight, tssLeft, tssRight []*timeseries) ([]*timeseries, []*timeseries, error) {
joinTags := be.JoinModifier.Args
var m map[string]*timeseries
for _, tsLeft := range tssLeft {
resetMetricGroupIfRequired(be, tsLeft)
if len(tssRight) == 1 {
// Easy case - right part contains only a single matching time series.
tsLeft.MetricName.SetTags(joinTags, &tssRight[0].MetricName)
rvsLeft = append(rvsLeft, tsLeft)
rvsRight = append(rvsRight, tssRight[0])
continue
}
// Hard case - right part contains multiple matching time series.
// Verify it doesn't result in duplicate MetricName values after adding missing tags.
if m == nil {
m = make(map[string]*timeseries, len(tssRight))
} else {
for k := range m {
delete(m, k)
}
}
bb := bbPool.Get()
for _, tsRight := range tssRight {
var tsCopy timeseries
tsCopy.CopyFromShallowTimestamps(tsLeft)
tsCopy.MetricName.SetTags(joinTags, &tsRight.MetricName)
bb.B = marshalMetricTagsSorted(bb.B[:0], &tsCopy.MetricName)
if tsExisting := m[string(bb.B)]; tsExisting != nil {
// Try merging tsExisting with tsRight if they don't overlap.
if mergeNonOverlappingTimeseries(tsExisting, tsRight) {
continue
}
return nil, nil, fmt.Errorf("duplicate time series on the %s side of `%s %s %s`: %s and %s",
singleTimeseriesSide, be.Op, be.GroupModifier.AppendString(nil), be.JoinModifier.AppendString(nil),
stringMetricTags(&tsExisting.MetricName), stringMetricTags(&tsRight.MetricName))
}
m[string(bb.B)] = tsRight
rvsLeft = append(rvsLeft, &tsCopy)
rvsRight = append(rvsRight, tsRight)
}
bbPool.Put(bb)
}
return rvsLeft, rvsRight, nil
}
func mergeNonOverlappingTimeseries(dst, src *timeseries) bool {
// Verify whether the time series can be merged.
srcValues := src.Values
dstValues := dst.Values
overlaps := 0
_ = dstValues[len(srcValues)-1]
for i, v := range srcValues {
if math.IsNaN(v) {
continue
}
if !math.IsNaN(dstValues[i]) {
overlaps++
}
}
// Allow up to two overlapping datapoints, which can appear due to staleness algorithm,
// which can add a few datapoints in the end of time series.
if overlaps > 2 {
return false
}
// Do not merge time series with too small number of datapoints.
// This can be the case during evaluation of instant queries (alerting or recording rules).
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1141
if len(srcValues) <= 2 && len(dstValues) <= 2 {
return false
}
// Time series can be merged. Merge them.
for i, v := range srcValues {
if math.IsNaN(v) {
continue
}
dstValues[i] = v
}
return true
}
func resetMetricGroupIfRequired(be *metricsql.BinaryOpExpr, ts *timeseries) {
if metricsql.IsBinaryOpCmp(be.Op) && !be.Bool {
// Do not reset MetricGroup for non-boolean `compare` binary ops like Prometheus does.
return
}
switch be.Op {
case "default", "if", "ifnot":
// Do not reset MetricGroup for these ops.
return
}
ts.MetricName.ResetMetricGroup()
}
func binaryOpAnd(bfa *binaryOpFuncArg) ([]*timeseries, error) {
mLeft, mRight := createTimeseriesMapByTagSet(bfa.be, bfa.left, bfa.right)
var rvs []*timeseries
for k, tssRight := range mRight {
tssLeft := mLeft[k]
if tssLeft == nil {
continue
}
// Add gaps to tssLeft if there are gaps at valuesRight.
valuesRight := tssRight[0].Values
for _, tsLeft := range tssLeft {
valuesLeft := tsLeft.Values
for i, v := range valuesRight {
if math.IsNaN(v) {
valuesLeft[i] = nan
}
}
}
tssLeft = removeNaNs(tssLeft)
rvs = append(rvs, tssLeft...)
}
return rvs, nil
}
func binaryOpOr(bfa *binaryOpFuncArg) ([]*timeseries, error) {
mLeft, mRight := createTimeseriesMapByTagSet(bfa.be, bfa.left, bfa.right)
var rvs []*timeseries
for _, tss := range mLeft {
rvs = append(rvs, tss...)
}
for k, tssRight := range mRight {
tssLeft := mLeft[k]
if tssLeft == nil {
rvs = append(rvs, tssRight...)
continue
}
// Fill gaps in tssLeft with values from tssRight as Prometheus does.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/552
valuesRight := tssRight[0].Values
for _, tsLeft := range tssLeft {
valuesLeft := tsLeft.Values
for i, v := range valuesLeft {
if math.IsNaN(v) {
valuesLeft[i] = valuesRight[i]
}
}
}
}
return rvs, nil
}
func binaryOpUnless(bfa *binaryOpFuncArg) ([]*timeseries, error) {
mLeft, mRight := createTimeseriesMapByTagSet(bfa.be, bfa.left, bfa.right)
var rvs []*timeseries
for k, tssLeft := range mLeft {
tssRight := mRight[k]
if tssRight == nil {
rvs = append(rvs, tssLeft...)
continue
}
// Add gaps to tssLeft if the are no gaps at valuesRight.
valuesRight := tssRight[0].Values
for _, tsLeft := range tssLeft {
valuesLeft := tsLeft.Values
for i, v := range valuesRight {
if !math.IsNaN(v) {
valuesLeft[i] = nan
}
}
}
tssLeft = removeNaNs(tssLeft)
rvs = append(rvs, tssLeft...)
}
return rvs, nil
}
func createTimeseriesMapByTagSet(be *metricsql.BinaryOpExpr, left, right []*timeseries) (map[string][]*timeseries, map[string][]*timeseries) {
groupTags := be.GroupModifier.Args
groupOp := strings.ToLower(be.GroupModifier.Op)
if len(groupOp) == 0 {
groupOp = "ignoring"
}
getTagsMap := func(arg []*timeseries) map[string][]*timeseries {
bb := bbPool.Get()
m := make(map[string][]*timeseries, len(arg))
mn := storage.GetMetricName()
for _, ts := range arg {
mn.CopyFrom(&ts.MetricName)
mn.ResetMetricGroup()
switch groupOp {
case "on":
mn.RemoveTagsOn(groupTags)
case "ignoring":
mn.RemoveTagsIgnoring(groupTags)
default:
logger.Panicf("BUG: unexpected binary op modifier %q", groupOp)
}
bb.B = marshalMetricTagsSorted(bb.B[:0], mn)
m[string(bb.B)] = append(m[string(bb.B)], ts)
}
storage.PutMetricName(mn)
bbPool.Put(bb)
return m
}
mLeft := getTagsMap(left)
mRight := getTagsMap(right)
return mLeft, mRight
}
func isScalar(arg []*timeseries) bool {
if len(arg) != 1 {
return false
}
mn := &arg[0].MetricName
if len(mn.MetricGroup) > 0 {
return false
}
return len(mn.Tags) == 0
} | app/vmselect/promql/binary_op.go | 0.542621 | 0.414543 | binary_op.go | starcoder |
package terrain
import (
"sync"
"time"
perlin "github.com/aquilax/go-perlin"
"github.com/brandonnelson3/GoRender/gfx"
"github.com/brandonnelson3/GoRender/gfx/shaders"
"github.com/go-gl/gl/v4.5-core/gl"
"github.com/go-gl/mathgl/mgl32"
)
const (
cellsize = int32(128)
cellsizep1 = cellsize + 1
cellsizep1p2 = cellsizep1 + 2
worldSize = 6
worldSizem1 = worldSize - 1
)
var (
halfCell = mgl32.Vec3{float32(cellsize) / 2.0, 0, float32(cellsize) / 2.0}
)
type cellId struct {
x, z int32
}
func (lhs *cellId) Equal(rhs cellId) bool {
return lhs.x == rhs.x && lhs.z == rhs.z
}
type cell struct {
id cellId
vao, vbo, veb uint32
numIndices int32
verts []gfx.Vertex
indices []uint32
}
func (c *cell) Update(colorShader *shaders.ColorShader) {
if c.vao == 0 {
gl.GenVertexArrays(1, &c.vao)
gl.BindVertexArray(c.vao)
gl.GenBuffers(1, &c.vbo)
gl.BindBuffer(gl.ARRAY_BUFFER, c.vbo)
gl.BufferData(gl.ARRAY_BUFFER, len(c.verts)*8*4, gl.Ptr(c.verts), gl.STATIC_DRAW)
gfx.BindVertexAttributes(colorShader.Program())
gl.GenBuffers(1, &c.veb)
gl.BindBuffer(gl.ELEMENT_ARRAY_BUFFER, c.veb)
gl.BufferData(gl.ELEMENT_ARRAY_BUFFER, len(c.indices)*4, gl.Ptr(c.indices), gl.STATIC_DRAW)
gl.BindVertexArray(0)
}
}
func (c *cell) Render(colorShader *shaders.ColorShader) {
if c.vao != 0 {
gl.BindVertexArray(c.vao)
colorShader.Model.Set(mgl32.Translate3D(float32(c.id.x*cellsize), 0, float32(c.id.z*cellsize)))
gl.DrawElements(gl.TRIANGLES, c.numIndices, gl.UNSIGNED_INT, nil)
gl.BindVertexArray(0)
}
}
func (c *cell) RenderDepth(depthShader *shaders.DepthShader) {
if c.vao != 0 {
gl.BindVertexArray(c.vao)
depthShader.Model.Set(mgl32.Translate3D(float32(c.id.x*cellsize), 0, float32(c.id.z*cellsize)))
gl.DrawElements(gl.TRIANGLES, c.numIndices, gl.UNSIGNED_INT, nil)
gl.BindVertexArray(0)
}
}
type Terrain struct {
mu sync.Mutex
data map[cellId]*cell
noise *perlin.Perlin
diffuse uint32
}
func NewTerrain() *Terrain {
diffuseTexture, err := gfx.LoadTexture("assets/sand.png")
if err != nil {
panic(err)
}
t := &Terrain{
data: make(map[cellId]*cell),
noise: perlin.NewPerlin(2, 2, 3, int64(0)),
diffuse: diffuseTexture,
}
for x := int32(-worldSize); x <= worldSize; x++ {
for z := int32(-worldSize); z <= worldSize; z++ {
go t.generate(x, z)
}
}
return t
}
func calculateNormal(pos1, pos2, pos3 mgl32.Vec3) mgl32.Vec3 {
a := pos2.Sub(pos1)
b := pos3.Sub(pos1)
return a.Cross(b)
}
func calculateIndice(x, z uint32) uint32 {
return z*uint32(cellsizep1) + x
}
func isCellInWorld(cell, centroidCell cellId) bool {
if cell.x < centroidCell.x-worldSizem1 {
return false
}
if cell.z < centroidCell.z-worldSizem1 {
return false
}
if cell.x > centroidCell.x+worldSize {
return false
}
if cell.z > centroidCell.z+worldSize {
return false
}
return true
}
func (t *Terrain) GenerateCell(id cellId) *cell {
var grid [cellsizep1p2][cellsizep1p2]mgl32.Vec3
for x := int32(0); x < cellsizep1p2; x++ {
for z := int32(0); z < cellsizep1p2; z++ {
h := float32((t.noise.Noise2D(float64(id.x*cellsize+x)/100.0, float64(id.z*cellsize+z)/100.0)+1)/2.0) * 50
grid[x][z] = mgl32.Vec3{float32(x), h, float32(z)}
}
}
var verts []gfx.Vertex
for x := int32(1); x <= cellsizep1; x++ {
for z := int32(1); z <= cellsizep1; z++ {
n := mgl32.Vec3{}
v := grid[x][z]
u := grid[x][z+1]
d := grid[x][z-1]
l := grid[x-1][z]
r := grid[x+1][z]
if x%2 == 0 && z%2 == 0 || x%2 == 1 && z%2 == 1 {
// / | \
// / 1 | 2 \
// ----V----
// \ 3 | 4 /
// \ | /
n1 := calculateNormal(l, u, v)
n2 := calculateNormal(u, r, v)
n3 := calculateNormal(r, d, v)
n4 := calculateNormal(d, l, v)
n = n1.Add(n2).Add(n3).Add(n4)
} else {
// \ 1 | 2 /
// 8 \ | / 3
// ----V----
// 7 / | \ 4
// / 6 | 5 \
ul := grid[x-1][z+1]
ur := grid[x+1][z+1]
dl := grid[x-1][z-1]
dr := grid[x+1][z-1]
n1 := calculateNormal(ul, u, v)
n2 := calculateNormal(u, ur, v)
n3 := calculateNormal(ur, u, v)
n4 := calculateNormal(r, dr, v)
n5 := calculateNormal(dr, d, v)
n6 := calculateNormal(d, dl, v)
n7 := calculateNormal(dl, l, v)
n8 := calculateNormal(l, ul, v)
n = n1.Add(n2).Add(n3).Add(n4).Add(n5).Add(n6).Add(n7).Add(n8)
}
verts = append(verts, gfx.Vertex{grid[x][z], n.Normalize(), mgl32.Vec2{float32(x) / 5.0, float32(z) / 5.0}})
}
}
var indices []uint32
for z := uint32(0); z < uint32(cellsize); z++ {
for x := uint32(0); x < uint32(cellsize); x++ {
i1 := calculateIndice(x, z)
i2 := calculateIndice(x+1, z)
i3 := calculateIndice(x, z+1)
i4 := calculateIndice(x+1, z+1)
if x%2 == 0 && z%2 == 0 || x%2 == 1 && z%2 == 1 {
// 1-----2
// | / |
// | / |
// 3-----4
indices = append(indices, i3, i1, i2, i2, i4, i3)
} else {
// 1-----2
// | \ |
// | \ |
// 3-----4
indices = append(indices, i1, i2, i4, i4, i3, i1)
}
}
}
return &cell{
id: id,
verts: verts,
indices: indices,
numIndices: int32(len(indices)),
}
}
func (t *Terrain) generate(x, z int32) {
lastCell := cellId{-1000000000, -1000000000}
for {
// No point in checking more often then every 100ms.
<-time.After(100 * time.Millisecond)
// Positions are shifted by half a cell from cell positions since cell positions are in the lower left corner.
pos := gfx.FirstPerson.GetPosition().Sub(halfCell)
// If this is the same cell as last iteration bail.
thisCell := cellId{int32(pos.X())/cellsize + x, int32(pos.Z())/cellsize + z}
if lastCell.Equal(thisCell) {
continue
}
lastCell = thisCell
// If this cell is already present in the world bail.
t.mu.Lock()
_, newOk := t.data[thisCell]
t.mu.Unlock()
if newOk {
continue
}
// This is a new cell not currently present in the world. Generate then insert.
c := t.GenerateCell(thisCell)
t.mu.Lock()
t.data[thisCell] = c
t.mu.Unlock()
}
}
func (t *Terrain) GetHeight(x, z float32) float32 {
return float32((t.noise.Noise2D(float64(x)/100.0, float64(z)/100.0)+1)/2.0) * 50
}
func (t *Terrain) Update(colorShader *shaders.ColorShader) {
pos := gfx.FirstPerson.GetPosition()
pos = pos.Sub(halfCell)
centroidCell := cellId{int32(pos.X()) / cellsize, int32(pos.Z()) / cellsize}
t.mu.Lock()
defer t.mu.Unlock()
for _, c := range t.data {
if !isCellInWorld(c.id, centroidCell) {
delete(t.data, c.id)
continue
}
c.Update(colorShader)
}
}
func (t *Terrain) Render(colorShader *shaders.ColorShader) {
colorShader.Diffuse.Set(gl.TEXTURE0, 0, t.diffuse)
t.mu.Lock()
defer t.mu.Unlock()
for _, c := range t.data {
c.Render(colorShader)
}
}
func (t *Terrain) RenderDepth(depthShader *shaders.DepthShader) {
depthShader.Diffuse.Set(gl.TEXTURE0, 0, t.diffuse)
t.mu.Lock()
defer t.mu.Unlock()
for _, c := range t.data {
c.RenderDepth(depthShader)
}
} | terrain/terrain.go | 0.60743 | 0.400163 | terrain.go | starcoder |
package neuro
import (
"math"
"math/rand"
)
type Node struct {
Weights []float64
Bias float64
}
type Layer struct {
Nodes []Node
}
type Network struct {
Layers []Layer
}
func Sigmoid(t float64) float64 {
return (1 / (1 + math.Exp(-t)))
}
func (n *Node) Calculate(inputs ...float64) float64 {
newVal := n.Bias
for i, val := range inputs {
if i >= len(n.Weights) {
break
}
newVal = newVal + val*n.Weights[i]
}
newVal = Sigmoid(newVal)
return newVal
}
func (l *Layer) Calculate(inputs ...float64) []float64 {
outputs := make([]float64, len(l.Nodes))
for i, n := range l.Nodes {
outputs[i] = n.Calculate(inputs...)
}
return outputs
}
func (l *Layer) BackProp(train float64, input []float64, errors []float64) []float64 {
newErrors := make([]float64, len(input))
for l1 := range l.Nodes {
nodeNet := l.Nodes[l1].Bias
for i, val := range input {
if i >= len(l.Nodes[l1].Weights) {
break
}
nodeNet = nodeNet + val*l.Nodes[l1].Weights[i]
}
nodeError := Sigmoid(nodeNet) * (1 - Sigmoid(nodeNet)) * errors[l1]
for l2 := range l.Nodes[l1].Weights {
newErrors[l2] += nodeError * l.Nodes[l1].Weights[l2]
l.Nodes[l1].Weights[l2] += train * input[l2] * nodeError
}
l.Nodes[l1].Bias += train * Sigmoid(l.Nodes[l1].Bias) * nodeError
}
return newErrors
}
func (n *Network) Calculate(input ...float64) []float64 {
output := input
for _, l := range n.Layers {
output = l.Calculate(output...)
}
return output
}
func (n *Network) Train(train float64, input []float64, target []float64) float64 {
ins := [][]float64{}
curval := input
for l1 := range n.Layers {
ins = append(ins, curval)
curval = n.Layers[l1].Calculate(curval...)
}
errors := make([]float64, len(target))
for l1 := range target {
errors[l1] = target[l1] - curval[l1]
}
for l1 := range n.Layers {
l1 = len(n.Layers) - 1 - l1
errors = n.Layers[l1].BackProp(train, ins[l1], errors)
}
return 0
}
func Generate(input int, layers ...int) *Network {
n := &Network{Layers: make([]Layer, len(layers))}
priorCount := input
for l1, c := range layers {
n.Layers[l1].Nodes = make([]Node, c)
for l2 := range n.Layers[l1].Nodes {
n.Layers[l1].Nodes[l2].Weights = make([]float64, priorCount)
for l3 := range n.Layers[l1].Nodes[l2].Weights {
n.Layers[l1].Nodes[l2].Weights[l3] = rand.Float64()*2 - 1
}
n.Layers[l1].Nodes[l2].Bias = rand.Float64()*2 - 1
}
priorCount = c
}
return n
} | neuro.go | 0.553505 | 0.409634 | neuro.go | starcoder |
package operators
import (
"context"
"github.com/MontFerret/ferret/pkg/runtime/core"
"github.com/MontFerret/ferret/pkg/runtime/values"
)
type (
OperatorFunc func(left, right core.Value) core.Value
baseOperator struct {
src core.SourceMap
left core.Expression
right core.Expression
}
)
func (operator *baseOperator) Exec(_ context.Context, _ *core.Scope) (core.Value, error) {
return values.None, core.ErrInvalidOperation
}
func (operator *baseOperator) Eval(_ context.Context, _, _ core.Value) (core.Value, error) {
return values.None, core.ErrInvalidOperation
}
// Equality
func Equal(left, right core.Value) core.Value {
if left.Compare(right) == 0 {
return values.True
}
return values.False
}
func NotEqual(left, right core.Value) core.Value {
if left.Compare(right) != 0 {
return values.True
}
return values.False
}
func Less(left, right core.Value) core.Value {
if left.Compare(right) < 0 {
return values.True
}
return values.False
}
func LessOrEqual(left, right core.Value) core.Value {
out := left.Compare(right)
if out < 0 || out == 0 {
return values.True
}
return values.False
}
func Greater(left, right core.Value) core.Value {
if left.Compare(right) > 0 {
return values.True
}
return values.False
}
func GreaterOrEqual(left, right core.Value) core.Value {
out := left.Compare(right)
if out > 0 || out == 0 {
return values.True
}
return values.False
}
func Not(left, _ core.Value) core.Value {
b := values.ToBoolean(left)
if b == values.True {
return values.False
}
return values.True
}
// Adds numbers
// Concats strings
func Add(left, right core.Value) core.Value {
if left.Type() == core.IntType {
if right.Type() == core.IntType {
l := left.(values.Int)
r := right.(values.Int)
return l + r
}
if right.Type() == core.FloatType {
l := left.(values.Int)
r := right.(values.Float)
return values.Float(l) + r
}
}
if left.Type() == core.FloatType {
if right.Type() == core.FloatType {
l := left.(values.Float)
r := right.(values.Float)
return l + r
}
if right.Type() == core.IntType {
l := left.(values.Float)
r := right.(values.Int)
return l + values.Float(r)
}
}
return values.NewString(left.String() + right.String())
}
func Subtract(left, right core.Value) core.Value {
if left.Type() == core.IntType {
if right.Type() == core.IntType {
l := left.(values.Int)
r := right.(values.Int)
return l - r
}
if right.Type() == core.FloatType {
l := left.(values.Int)
r := right.(values.Float)
return values.Float(l) - r
}
}
if left.Type() == core.FloatType {
if right.Type() == core.FloatType {
l := left.(values.Float)
r := right.(values.Float)
return l - r
}
if right.Type() == core.IntType {
l := left.(values.Float)
r := right.(values.Int)
return l - values.Float(r)
}
}
return values.ZeroInt
}
func Multiply(left, right core.Value) core.Value {
if left.Type() == core.IntType {
if right.Type() == core.IntType {
l := left.(values.Int)
r := right.(values.Int)
return l * r
}
if right.Type() == core.FloatType {
l := left.(values.Int)
r := right.(values.Float)
return values.Float(l) * r
}
}
if left.Type() == core.FloatType {
if right.Type() == core.FloatType {
l := left.(values.Float)
r := right.(values.Float)
return l * r
}
if right.Type() == core.IntType {
l := left.(values.Float)
r := right.(values.Int)
return l * values.Float(r)
}
}
return values.ZeroInt
}
func Divide(left, right core.Value) core.Value {
if left.Type() == core.IntType {
if right.Type() == core.IntType {
l := left.(values.Int)
r := right.(values.Int)
return l / r
}
if right.Type() == core.FloatType {
l := left.(values.Int)
r := right.(values.Float)
return values.Float(l) / r
}
}
if left.Type() == core.FloatType {
if right.Type() == core.FloatType {
l := left.(values.Float)
r := right.(values.Float)
return l / r
}
if right.Type() == core.IntType {
l := left.(values.Float)
r := right.(values.Int)
return l / values.Float(r)
}
}
return values.ZeroInt
}
func Modulus(left, right core.Value) core.Value {
if left.Type() == core.IntType {
if right.Type() == core.IntType {
l := left.(values.Int)
r := right.(values.Int)
return l % r
}
if right.Type() == core.FloatType {
l := left.(values.Int)
r := right.(values.Float)
return l % values.Int(r)
}
}
if left.Type() == core.FloatType {
if right.Type() == core.FloatType {
l := left.(values.Float)
r := right.(values.Float)
return values.Int(l) % values.Int(r)
}
if right.Type() == core.IntType {
l := left.(values.Float)
r := right.(values.Int)
return values.Int(l) % r
}
}
return values.ZeroInt
}
func Increment(left, _ core.Value) core.Value {
if left.Type() == core.IntType {
l := left.(values.Int)
return l + 1
}
if left.Type() == core.FloatType {
l := left.(values.Float)
return l + 1
}
return values.None
}
func Decrement(left, _ core.Value) core.Value {
if left.Type() == core.IntType {
l := left.(values.Int)
return l - 1
}
if left.Type() == core.FloatType {
l := left.(values.Float)
return l - 1
}
return values.None
}
func Negative(value, _ core.Value) core.Value {
err := core.ValidateType(value, core.IntType, core.FloatType)
if err != nil {
return values.ZeroInt
}
if value.Type() == core.IntType {
return -value.(values.Int)
}
return -value.(values.Float)
}
func Positive(value, _ core.Value) core.Value {
err := core.ValidateType(value, core.IntType, core.FloatType)
if err != nil {
return values.ZeroInt
}
if value.Type() == core.IntType {
return +value.(values.Int)
}
return +value.(values.Float)
}
func ToBoolean(value, _ core.Value) core.Value {
return values.ToBoolean(value)
} | pkg/runtime/expressions/operators/operator.go | 0.734501 | 0.619299 | operator.go | starcoder |
package etensor
// Prjn2DShape returns the size of a 2D projection of the given tensor Shape,
// collapsing higher dimensions down to 2D (and 1D up to 2D).
// For any odd number of dimensions, the remaining outer-most dimension
// can either be multipliexed across the row or column, given the oddRow arg.
// Even multiples of inner-most dimensions are assumed to be row, then column.
// RowMajor and ColMajor layouts are handled appropriately.
// rowEx returns the number of "extra" (higher dimensional) rows
// and colEx returns the number of extra cols
func Prjn2DShape(shp *Shape, oddRow bool) (rows, cols, rowEx, colEx int) {
if shp.Len() == 0 {
return 1, 1, 0, 0
}
nd := shp.NumDims()
switch nd {
case 1:
if oddRow {
return shp.Dim(0), 1, 0, 0
} else {
return 1, shp.Dim(0), 0, 0
}
case 2:
if shp.IsRowMajor() {
return shp.Dim(0), shp.Dim(1), 0, 0
} else {
return shp.Dim(1), shp.Dim(0), 0, 0
}
case 3:
if oddRow {
if shp.IsRowMajor() {
return shp.Dim(0) * shp.Dim(1), shp.Dim(2), shp.Dim(0), 0
} else {
return shp.Dim(2) * shp.Dim(1), shp.Dim(0), shp.Dim(2), 0
}
} else {
if shp.IsRowMajor() {
return shp.Dim(1), shp.Dim(0) * shp.Dim(2), 0, shp.Dim(0)
} else {
return shp.Dim(1), shp.Dim(2) * shp.Dim(0), 0, shp.Dim(2)
}
}
case 4:
if shp.IsRowMajor() {
return shp.Dim(0) * shp.Dim(2), shp.Dim(1) * shp.Dim(3), shp.Dim(0), shp.Dim(1)
} else {
return shp.Dim(3) * shp.Dim(1), shp.Dim(2) * shp.Dim(0), shp.Dim(3), shp.Dim(2)
}
case 5:
if oddRow {
if shp.IsRowMajor() {
return shp.Dim(0) * shp.Dim(1) * shp.Dim(3), shp.Dim(2) * shp.Dim(4), shp.Dim(0) * shp.Dim(1), 0
} else {
return shp.Dim(4) * shp.Dim(3) * shp.Dim(1), shp.Dim(2) * shp.Dim(0), shp.Dim(4) * shp.Dim(3), 0
}
} else {
if shp.IsRowMajor() {
return shp.Dim(1) * shp.Dim(3), shp.Dim(0) * shp.Dim(2) * shp.Dim(4), 0, shp.Dim(0) * shp.Dim(1)
} else {
return shp.Dim(3) * shp.Dim(1), shp.Dim(4) * shp.Dim(2) * shp.Dim(0), 0, shp.Dim(4) * shp.Dim(2)
}
}
}
return 1, 1, 0, 0
}
// Prjn2DIdx returns the flat 1D index for given row, col coords for a 2D projection
// of the given tensor shape, collapsing higher dimensions down to 2D (and 1D up to 2D).
// For any odd number of dimensions, the remaining outer-most dimension
// can either be multipliexed across the row or column, given the oddRow arg.
// Even multiples of inner-most dimensions are assumed to be row, then column.
// RowMajor and ColMajor layouts are handled appropriately.
func Prjn2DIdx(shp *Shape, oddRow bool, row, col int) int {
nd := shp.NumDims()
switch nd {
case 1:
if oddRow {
return row
} else {
return col
}
case 2:
if shp.IsRowMajor() {
return shp.Offset([]int{row, col})
} else {
return shp.Offset([]int{col, row})
}
case 3:
if oddRow {
ny := shp.Dim(1)
yy := row / ny
y := row % ny
if shp.IsRowMajor() {
return shp.Offset([]int{yy, y, col})
} else {
return shp.Offset([]int{col, y, yy})
}
} else {
nx := shp.Dim(2)
xx := col / nx
x := col % nx
if shp.IsRowMajor() {
return shp.Offset([]int{xx, row, x})
} else {
return shp.Offset([]int{x, row, xx})
}
}
case 4:
if shp.IsRowMajor() {
ny := shp.Dim(2)
yy := row / ny
y := row % ny
nx := shp.Dim(3)
xx := col / nx
x := col % nx
return shp.Offset([]int{yy, xx, y, x})
} else {
ny := shp.Dim(1)
yy := row / ny
y := row % ny
nx := shp.Dim(0)
xx := col / nx
x := col % nx
return shp.Offset([]int{x, y, xx, yy})
}
case 5:
// todo: oddRows version!
if shp.IsRowMajor() {
nyy := shp.Dim(1)
ny := shp.Dim(3)
yyy := row / (nyy * ny)
yy := row % (nyy * ny)
y := yy % ny
yy = yy / ny
nx := shp.Dim(4)
xx := col / nx
x := col % nx
return shp.Offset([]int{yyy, yy, xx, y, x})
} else {
nyy := shp.Dim(3)
ny := shp.Dim(1)
yyy := row / (nyy * ny)
yy := row % (nyy * ny)
y := yy % ny
yy = yy / ny
nx := shp.Dim(0)
xx := col / nx
x := col % nx
return shp.Offset([]int{x, y, xx, yy, yyy})
}
}
return 0
}
// Prjn2DCoords returns the corresponding full-dimensional coordinates
// that go into the given row, col coords for a 2D projection of the given tensor,
// collapsing higher dimensions down to 2D (and 1D up to 2D).
func Prjn2DCoords(shp *Shape, oddRow bool, row, col int) (rowCoords, colCoords []int) {
idx := Prjn2DIdx(shp, oddRow, row, col)
dims := shp.Index(idx)
nd := shp.NumDims()
switch nd {
case 1:
if oddRow {
return dims, []int{0}
} else {
return []int{0}, dims
}
case 2:
if shp.IsRowMajor() {
return dims[:1], dims[1:]
} else {
return dims[1:], dims[:1]
}
case 3:
if oddRow {
if shp.IsRowMajor() {
return dims[:2], dims[2:]
} else {
return dims[1:], dims[:1]
}
} else {
if shp.IsRowMajor() {
return dims[:1], dims[1:]
} else {
return dims[2:], dims[:2]
}
}
case 4:
if shp.IsRowMajor() {
return []int{dims[0], dims[2]}, []int{dims[1], dims[3]}
} else {
return []int{dims[1], dims[3]}, []int{dims[0], dims[2]}
}
case 5:
if oddRow {
if shp.IsRowMajor() {
return []int{dims[0], dims[1], dims[3]}, []int{dims[2], dims[4]}
} else {
return []int{dims[1], dims[3], dims[4]}, []int{dims[0], dims[2]}
}
} else {
if shp.IsRowMajor() {
return []int{dims[1], dims[3]}, []int{dims[0], dims[2], dims[4]}
} else {
return []int{dims[1], dims[3]}, []int{dims[0], dims[2], dims[4]}
}
}
}
return nil, nil
}
// Prjn2DVal returns the float64 value at given row, col coords for a 2D projection
// of the given tensor, collapsing higher dimensions down to 2D (and 1D up to 2D).
// For any odd number of dimensions, the remaining outer-most dimension
// can either be multipliexed across the row or column, given the oddRow arg.
// Even multiples of inner-most dimensions are assumed to be row, then column.
// RowMajor and ColMajor layouts are handled appropriately.
func Prjn2DVal(tsr Tensor, oddRow bool, row, col int) float64 {
idx := Prjn2DIdx(tsr.ShapeObj(), oddRow, row, col)
return tsr.FloatVal1D(idx)
}
// Prjn2DSet sets a float64 value at given row, col coords for a 2D projection
// of the given tensor, collapsing higher dimensions down to 2D (and 1D up to 2D).
// For any odd number of dimensions, the remaining outer-most dimension
// can either be multipliexed across the row or column, given the oddRow arg.
// Even multiples of inner-most dimensions are assumed to be row, then column.
// RowMajor and ColMajor layouts are handled appropriately.
func Prjn2DSet(tsr Tensor, oddRow bool, row, col int, val float64) {
idx := Prjn2DIdx(tsr.ShapeObj(), oddRow, row, col)
tsr.SetFloat1D(idx, val)
} | etensor/prjn2d.go | 0.663778 | 0.844216 | prjn2d.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.