code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package redblack
import (
"fmt"
"go.uber.org/zap"
)
type color bool
const (
red color = false
black color = true
)
type node struct {
key string
value string
left *node
right *node
p *node
color color
}
// RedBlack implement Tree interface for RedBlackTree
type RedBlack struct {
root *node
sentinel *node
}
// Put key-value into tree
func (r *RedBlack) Put(k, v string) error {
z := &node{
key: k,
value: v,
left: r.sentinel,
right: r.sentinel,
p: r.sentinel,
color: red,
}
y := r.sentinel
x := r.root
for x != r.sentinel {
y = x
if z.key < x.key {
x = x.left
} else {
x = x.right
}
}
z.p = y
if y == r.sentinel {
r.root = z
} else if z.key < y.key {
y.left = z
} else {
y.right = z
}
z.left = r.sentinel
z.right = r.sentinel
z.color = red
r.insertFixup(z)
return nil
}
func (r *RedBlack) insertFixup(z *node) {
for z.p.color == red {
if z.p == z.p.p.left {
y := z.p.p.right
if y.color == red {
z.p.color = black
y.color = black
z.p.p.color = red
z = z.p.p
} else {
if z == z.p.right {
z = z.p
r.leftRotate(z)
}
z.p.color = black
z.p.p.color = red
r.rightRotate(z.p.p)
}
} else {
y := z.p.p.left
if y.color == red {
z.p.color = black
y.color = black
z.p.p.color = red
z = z.p.p
} else {
if z == z.p.left {
z = z.p
r.rightRotate(z)
}
z.p.color = black
z.p.p.color = red
r.leftRotate(z.p.p)
}
}
}
r.root.color = black
}
// Get value by key from tree
func (r *RedBlack) Get(k string) (string, error) {
t := r.root
for t != r.sentinel && k != t.key {
if k < t.key {
t = t.left
} else {
t = t.right
}
}
if t != r.sentinel {
return t.value, nil
}
return "", fmt.Errorf("Key %s not found", k)
}
// Del key-value from tree if exist
func (r *RedBlack) Del(k string) error {
return nil
}
// Walk all nodes in tree
func (r *RedBlack) Walk() error {
// zap.L().Info("Walking")
r.inorderTreeWalk(r.root)
return nil
}
func (r *RedBlack) inorderTreeWalk(x *node) {
if x != nil {
r.inorderTreeWalk(x.left)
zap.L().Info(x.key)
r.inorderTreeWalk(x.right)
}
}
func (r *RedBlack) leftRotate(x *node) {
y := x.right
x.right = y.left
if y.left != r.sentinel {
y.left.p = x
}
y.p = x.p
if x.p == r.sentinel {
r.root = y
} else if x == x.p.left {
x.p.left = y
} else {
x.p.right = y
}
y.left = x
x.p = y
}
func (r *RedBlack) rightRotate(y *node) {
x := y.left
y.left = x.right
if x.right != r.sentinel {
x.right.p = y
}
x.p = y.p
if y.p == r.sentinel {
r.root = x
} else if y == y.p.left {
y.p.left = x
} else {
y.p.right = x
}
x.right = y
y.p = x
}
func makeDummy() *node {
return &node{
key: "",
value: "",
left: nil,
right: nil,
p: nil,
color: black,
}
}
// New create a new RedBlack tree
func New() *RedBlack {
sentinel := makeDummy()
return &RedBlack{
root: sentinel,
sentinel: sentinel,
}
} | pkg/tree/redblack/redblack.go | 0.6508 | 0.410402 | redblack.go | starcoder |
package iso20022
// Details of the securities trade.
type SecuritiesTradeDetails37 struct {
// Market in which a trade transaction has been executed.
PlaceOfTrade *MarketIdentification78 `xml:"PlcOfTrad,omitempty"`
// Infrastructure which may be a component of a clearing house and wich facilitates clearing and settlement for its members by standing between the buyer and the seller. It may net transactions and it substitutes itself as settlement counterparty for each position.
PlaceOfClearing *AnyBICIdentifier `xml:"PlcOfClr,omitempty"`
// Specifies the date/time on which the trade was executed.
TradeDate *TradeDate1Choice `xml:"TradDt,omitempty"`
// Date and time at which the securities are to be delivered or received.
OpeningSettlementDate *DateAndDateTimeChoice `xml:"OpngSttlmDt"`
// Specifies the price of the traded financial instrument.
// This is the deal price of the individual trade transaction.
// If there is only one trade transaction for the execution of the trade, then the deal price could equal the executed trade price (unless, for example, the price includes commissions or rounding, or some other factor has been applied to the deal price or the executed trade price, or both).
DealPrice *Price2 `xml:"DealPric,omitempty"`
// Number of days on which the interest rate accrues (daily accrual note).
NumberOfDaysAccrued *Max3Number `xml:"NbOfDaysAcrd,omitempty"`
// Specifies that a trade is to be reported to a third party.
Reporting []*Reporting2Choice `xml:"Rptg,omitempty"`
// Indicates the conditions under which the order/trade is to be/was executed.
TradeTransactionCondition []*TradeTransactionCondition1Choice `xml:"TradTxCond,omitempty"`
// Specifies the role of the investor in the transaction.
InvestorCapacity *InvestorCapacity1Choice `xml:"InvstrCpcty,omitempty"`
// Specifies the role of the trading party in the transaction.
TradeOriginatorRole *TradeOriginator1Choice `xml:"TradOrgtrRole,omitempty"`
// Account servicer is instructed to buy the indicated currency after the receipt of cash proceeds or to sell the indicated currency in order to obtain the necessary currency to fund the transaction.
CurrencyToBuyOrSell *CurrencyToBuyOrSell1Choice `xml:"CcyToBuyOrSell,omitempty"`
// Status of affirmation of a trade.
AffirmationStatus *AffirmationStatus1Choice `xml:"AffirmSts,omitempty"`
// Provides the matching status of the instruction.
MatchingStatus *MatchingStatus1Choice `xml:"MtchgSts,omitempty"`
// Provides additional settlement processing information which can not be included within the structured fields of the message.
SettlementInstructionProcessingAdditionalDetails *Max350Text `xml:"SttlmInstrPrcgAddtlDtls,omitempty"`
// Provides additional details pertaining to foreign exchange instructions.
FXAdditionalDetails *Max350Text `xml:"FxAddtlDtls,omitempty"`
}
func (s *SecuritiesTradeDetails37) AddPlaceOfTrade() *MarketIdentification78 {
s.PlaceOfTrade = new(MarketIdentification78)
return s.PlaceOfTrade
}
func (s *SecuritiesTradeDetails37) SetPlaceOfClearing(value string) {
s.PlaceOfClearing = (*AnyBICIdentifier)(&value)
}
func (s *SecuritiesTradeDetails37) AddTradeDate() *TradeDate1Choice {
s.TradeDate = new(TradeDate1Choice)
return s.TradeDate
}
func (s *SecuritiesTradeDetails37) AddOpeningSettlementDate() *DateAndDateTimeChoice {
s.OpeningSettlementDate = new(DateAndDateTimeChoice)
return s.OpeningSettlementDate
}
func (s *SecuritiesTradeDetails37) AddDealPrice() *Price2 {
s.DealPrice = new(Price2)
return s.DealPrice
}
func (s *SecuritiesTradeDetails37) SetNumberOfDaysAccrued(value string) {
s.NumberOfDaysAccrued = (*Max3Number)(&value)
}
func (s *SecuritiesTradeDetails37) AddReporting() *Reporting2Choice {
newValue := new (Reporting2Choice)
s.Reporting = append(s.Reporting, newValue)
return newValue
}
func (s *SecuritiesTradeDetails37) AddTradeTransactionCondition() *TradeTransactionCondition1Choice {
newValue := new (TradeTransactionCondition1Choice)
s.TradeTransactionCondition = append(s.TradeTransactionCondition, newValue)
return newValue
}
func (s *SecuritiesTradeDetails37) AddInvestorCapacity() *InvestorCapacity1Choice {
s.InvestorCapacity = new(InvestorCapacity1Choice)
return s.InvestorCapacity
}
func (s *SecuritiesTradeDetails37) AddTradeOriginatorRole() *TradeOriginator1Choice {
s.TradeOriginatorRole = new(TradeOriginator1Choice)
return s.TradeOriginatorRole
}
func (s *SecuritiesTradeDetails37) AddCurrencyToBuyOrSell() *CurrencyToBuyOrSell1Choice {
s.CurrencyToBuyOrSell = new(CurrencyToBuyOrSell1Choice)
return s.CurrencyToBuyOrSell
}
func (s *SecuritiesTradeDetails37) AddAffirmationStatus() *AffirmationStatus1Choice {
s.AffirmationStatus = new(AffirmationStatus1Choice)
return s.AffirmationStatus
}
func (s *SecuritiesTradeDetails37) AddMatchingStatus() *MatchingStatus1Choice {
s.MatchingStatus = new(MatchingStatus1Choice)
return s.MatchingStatus
}
func (s *SecuritiesTradeDetails37) SetSettlementInstructionProcessingAdditionalDetails(value string) {
s.SettlementInstructionProcessingAdditionalDetails = (*Max350Text)(&value)
}
func (s *SecuritiesTradeDetails37) SetFXAdditionalDetails(value string) {
s.FXAdditionalDetails = (*Max350Text)(&value)
} | SecuritiesTradeDetails37.go | 0.830834 | 0.438725 | SecuritiesTradeDetails37.go | starcoder |
// Package verification contains verifiers for clients of the map to confirm
// entries are committed to.
package verification
import (
"bytes"
"crypto"
"fmt"
"github.com/google/trillian/experimental/batchmap"
"github.com/google/trillian/merkle/coniks"
"github.com/google/trillian/merkle/smt"
"github.com/google/trillian/merkle/smt/node"
)
// TileFetch gets the tile at the specified path in the given map revision.
// There is currently an assumption that this is very fast and thus it looks
// up tiles one at a time. This can be replaced with a batch version if that
// assumption is invalidated (e.g. this method triggers network operations).
type TileFetch func(revision int, path []byte) (*batchmap.Tile, error)
// MapVerifier verifies inclusion of key/values in a map.
type MapVerifier struct {
tileFetch TileFetch
prefixStrata int
treeID int64
hash crypto.Hash
}
// NewMapVerifier returns a MapVerifier for the map at the given location and with the
// configuration provided.
func NewMapVerifier(tileFetch TileFetch, prefixStrata int, treeID int64, hash crypto.Hash) *MapVerifier {
return &MapVerifier{
tileFetch: tileFetch,
prefixStrata: prefixStrata,
treeID: treeID,
hash: hash,
}
}
// CheckInclusion confirms that the key & value are committed to by the map in the given
// directory, and returns the computed and confirmed root hash that commits to this.
func (v *MapVerifier) CheckInclusion(rev int, key string, value []byte) ([]byte, error) {
// Determine the key/value we expect to find.
// Note that the map tiles do not contain raw values, but commitments to the values.
// If the map needs to return the values to clients then it is recommended that the
// map operator uses a Content Addressable Store to store these values.
h := v.hash.New()
h.Write([]byte(key))
keyPath := h.Sum(nil)
leafID := node.NewID(string(keyPath), uint(len(keyPath)*8))
expectedValueHash := coniks.Default.HashLeaf(v.treeID, leafID, value)
// Read the tiles required for this check from disk.
tiles, err := v.getTilesForKey(rev, keyPath)
if err != nil {
return nil, fmt.Errorf("couldn't load tiles: %v", err)
}
// Perform the verification.
// 1) Start at the leaf tile and check the key/value.
// 2) Compute the merkle root of the leaf tile
// 3) Check the computed root matches that reported in the tile
// 4) Check this root value is the key/value of the tile above.
// 5) Rinse and repeat until we reach the tree root.
et := emptyTree{treeID: v.treeID, hasher: coniks.Default}
needPath, needValue := keyPath, expectedValueHash
for i := v.prefixStrata; i >= 0; i-- {
tile := tiles[i]
// Check the prefix of what we are looking for matches the tile's path.
if got, want := tile.Path, needPath[:len(tile.Path)]; !bytes.Equal(got, want) {
return nil, fmt.Errorf("wrong tile found at index %d: got %x, want %x", i, got, want)
}
// Leaf paths within a tile are within the scope of the tile, so we can
// drop the prefix from the expected path now we have verified it.
needLeafPath := needPath[len(tile.Path):]
// Identify the leaf we need, and convert all leaves to the format needed for hashing.
var leaf *batchmap.TileLeaf
nodes := make([]smt.Node, len(tile.Leaves))
for j, l := range tile.Leaves {
if bytes.Equal(l.Path, needLeafPath) {
leaf = l
}
nodes[j] = toNode(tile.Path, l)
}
// Confirm we found the leaf we needed, and that it had the value we expected.
if leaf == nil {
return nil, fmt.Errorf("couldn't find expected leaf %x in tile %x", needLeafPath, tile.Path)
}
if !bytes.Equal(leaf.Hash, needValue) {
return nil, fmt.Errorf("wrong leaf value in tile %x, leaf %x: got %x, want %x", tile.Path, leaf.Path, leaf.Hash, needValue)
}
// Hash this tile given its leaf values, and confirm that the value we compute
// matches the value reported in the tile.
hs, err := smt.NewHStar3(nodes, et.hasher.HashChildren,
uint(len(tile.Path)+len(leaf.Path))*8, uint(len(tile.Path))*8)
if err != nil {
return nil, fmt.Errorf("failed to create HStar3 for tile %x: %v", tile.Path, err)
}
res, err := hs.Update(et)
if err != nil {
return nil, fmt.Errorf("failed to hash tile %x: %v", tile.Path, err)
} else if got, want := len(res), 1; got != want {
return nil, fmt.Errorf("wrong number of roots for tile %x: got %v, want %v", tile.Path, got, want)
}
if got, want := res[0].Hash, tile.RootHash; !bytes.Equal(got, want) {
return nil, fmt.Errorf("wrong root hash for tile %x: got %x, calculated %x", tile.Path, got, want)
}
// Make the next iteration of the loop check that the tile above this has the
// root value of this tile stored as the value at the expected leaf index.
needPath, needValue = tile.Path, res[0].Hash
}
return needValue, nil
}
// getTilesForKey loads the tiles on the path from the root to the given leaf.
func (v *MapVerifier) getTilesForKey(rev int, key []byte) ([]*batchmap.Tile, error) {
tiles := make([]*batchmap.Tile, v.prefixStrata+1)
for i := 0; i <= v.prefixStrata; i++ {
tilePath := key[0:i]
tile, err := v.tileFetch(rev, tilePath)
if err != nil {
return nil, fmt.Errorf("failed to read tile %x @ revision %d: %v", tilePath, rev, err)
}
tiles[i] = tile
}
return tiles, nil
}
// toNode converts a TileLeaf into the equivalent Node for HStar3.
func toNode(prefix []byte, l *batchmap.TileLeaf) smt.Node {
path := make([]byte, 0, len(prefix)+len(l.Path))
path = append(append(path, prefix...), l.Path...)
return smt.Node{
ID: node.NewID(string(path), uint(len(path))*8),
Hash: l.Hash,
}
}
// emptyTree is a NodeAccessor for an empty tree with the given ID.
type emptyTree struct {
treeID int64
hasher *coniks.Hasher
}
func (e emptyTree) Get(id node.ID) ([]byte, error) {
return e.hasher.HashEmpty(e.treeID, id), nil
}
func (e emptyTree) Set(id node.ID, hash []byte) {} | experimental/batchmap/sumdb/verification/inclusion.go | 0.752195 | 0.556821 | inclusion.go | starcoder |
package day12
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
)
type Coord [2]int
type Direction int
const (
East Direction = iota
South
West
North
Left
Right
Forward
)
type Entry struct {
distance int
direction Direction
}
func Day12() {
input, err := parseInput()
if err != nil {
panic(err)
}
fmt.Printf("Day 12 part 1 answer is %d\n", SailAndGetDistance(input))
fmt.Printf("Day 12 part 2 answer is %d\n", SailAndGetDistanceWithWaypoint(input))
}
func SailAndGetDistance(input []*Entry) int {
coord := Coord{0, 0}
directionToDelta := [][2]int{
[2]int{1, 0}, // East
[2]int{0, -1}, // South
[2]int{-1, 0}, // West
[2]int{0, 1}, // North
}
d := East
delta := directionToDelta[d]
for _, entry := range input {
switch entry.direction {
case Forward:
coord[0] += delta[0] * entry.distance
coord[1] += delta[1] * entry.distance
case North, South, East, West:
dd := directionToDelta[entry.direction]
coord[0] += dd[0] * entry.distance
coord[1] += dd[1] * entry.distance
case Left, Right:
revs := entry.distance / 90
var newD int
if entry.direction == Left {
newD = (int(d) - revs) % 4
} else {
newD = (int(d) + revs) % 4
}
if newD < 0 {
newD += 4
}
d = Direction(newD)
delta = directionToDelta[newD]
}
}
return calculateManhattanDistance(coord)
}
func SailAndGetDistanceWithWaypoint(input []*Entry) int {
coord := Coord{0, 0}
waypoint := Coord{10, 1}
directionToDelta := [][2]int{
[2]int{1, 0}, // East
[2]int{0, -1}, // South
[2]int{-1, 0}, // West
[2]int{0, 1}, // North
}
// Rotation matrix for counter-clockwise (left) direction
rotationMatrix := [][2][2]int{
[2][2]int{ // 90
[2]int{0, -1},
[2]int{1, 0},
},
[2][2]int{ // 180
[2]int{-1, 0},
[2]int{0, -1},
},
[2][2]int{ // 270
[2]int{0, 1},
[2]int{-1, 0},
},
}
for _, entry := range input {
switch entry.direction {
case Forward:
coord[0] += waypoint[0] * entry.distance
coord[1] += waypoint[1] * entry.distance
case North, South, East, West:
dd := directionToDelta[entry.direction]
waypoint[0] += dd[0] * entry.distance
waypoint[1] += dd[1] * entry.distance
case Left, Right:
// Counter clockwise rotation degress
deg := entry.distance
// If the direction is clockwise convert to counter clockwise
if entry.direction == Right {
deg = 360 - deg
}
m := rotationMatrix[deg/90-1]
matmul(m, &waypoint)
}
}
return calculateManhattanDistance(coord)
}
func matmul(matrix [2][2]int, vector *Coord) {
v := *vector
(*vector)[0] = v[0]*matrix[0][0] + v[1]*matrix[0][1]
(*vector)[1] = v[0]*matrix[1][0] + v[1]*matrix[1][1]
}
func calculateManhattanDistance(coord Coord) int {
return abs(coord[0]) + abs(coord[1])
}
func parseInput() ([]*Entry, error) {
f, err := os.Open("./input.txt")
if err != nil {
return nil, err
}
return readInput(f)
}
func readInput(r io.Reader) ([]*Entry, error) {
scanner := bufio.NewScanner(r)
scanner.Split(bufio.ScanLines)
var result []*Entry
mapping := map[byte]Direction{
'N': North,
'S': South,
'E': East,
'W': West,
'L': Left,
'R': Right,
'F': Forward,
}
for scanner.Scan() {
t := scanner.Text()
d, err := strconv.Atoi(t[1:])
if err != nil {
return result, err
}
result = append(result, &Entry{
direction: mapping[t[0]],
distance: d,
})
}
return result, scanner.Err()
}
func abs(x int) int {
if x < 0 {
return -x
}
return x
} | day12/day12.go | 0.585812 | 0.434401 | day12.go | starcoder |
package main
// Note: Adjacency list representation of graph was already implemented by me in previous graphs section. Hence I have
// modified few things which converts this adjacency list to adjacency matrix and then the bellman ford algorithm is
// implemented. However you can directly take user inputs and add it in adjacency matrix.
import (
"fmt"
"math"
)
type vertexNumber struct {
value int
name string
}
var adjacencyMatrix [][]float64
var distanceMatrix []float64
var vertices map[string]int
// BellmanFord calculates all points shortest path from adjacency matrix of a graph
func BellmanFord(start string) {
flag := 0
// start relaxing each vertex for |v|-1 times
k := len(vertices) - 1
for k != 0 {
for i := range vertices {
for j := range vertices {
if distanceMatrix[vertices[i]]+adjacencyMatrix[vertices[i]][vertices[j]] < distanceMatrix[vertices[j]] {
distanceMatrix[vertices[j]] = distanceMatrix[vertices[i]] + adjacencyMatrix[vertices[i]][vertices[j]]
}
}
}
k--
}
// display generated distance matrix
fmt.Println("\nDistance Matrix: ")
for i := range vertices {
fmt.Println(i, "=>", distanceMatrix[vertices[i]])
}
fmt.Println("\nValidating the answer by relaxing each vertices one more time...")
// validate the generated matrix by relaxing each vertex one more time
for i := range vertices {
for j := range vertices {
if distanceMatrix[vertices[i]]+adjacencyMatrix[vertices[i]][vertices[j]] < distanceMatrix[vertices[j]] {
flag = 1
break
}
}
if flag == 1 {
break
}
}
if flag == 1 {
fmt.Println("-- Distance matrix formed is WRONG. Found a loop with negative weight in graph. --")
} else {
fmt.Println("Distance matrix formed is CORRECT.")
}
}
func init() {
vertices = make(map[string]int)
}
func main() {
fmt.Println("\n-- Bellman Ford Algorithm --")
i := 0
for i == 0 {
fmt.Println("\n1. ADD A VERTEX")
fmt.Println("2. ADD AN EDGE")
fmt.Println("3. SIMPLE DISPLAY")
fmt.Println("4. RUN FLOYD WARSHALL")
fmt.Println("5. EXIT")
var choice int
fmt.Print("Enter your choice: ")
fmt.Scanf("%d\n", &choice)
switch choice {
case 1:
addVertex()
case 2:
addEdge()
case 3:
simpleDisplay()
case 4:
startBellmanFord()
case 5:
i = 1
default:
fmt.Println("Command not recognized.")
}
}
}
// generates an adjacency matrix from adjacency list
func constructMatrix() {
count := 0
adjacencyMatrix = make([][]float64, len(vertices))
for i := range graph {
adjacencyMatrix[vertices[i]] = make([]float64, len(vertices))
for j := range graph[i] {
adjacencyMatrix[vertices[i]][vertices[graph[i][j].name]] = float64(graph[i][j].value)
}
count++
}
for i := range adjacencyMatrix {
for j := range adjacencyMatrix[i] {
if i != j && adjacencyMatrix[i][j] == 0 {
adjacencyMatrix[i][j] = math.Inf(0)
}
}
}
fmt.Println("\n-- Initial Adjacency Matrix --")
for i := range adjacencyMatrix {
fmt.Println(adjacencyMatrix[i])
}
}
func startBellmanFord() {
// scan for starting vertex name
var vtx string
fmt.Print("Enter the name of starting vertex: ")
fmt.Scanf("%s\n", &vtx)
// initialize distance matrix with necessary values
distanceMatrix = make([]float64, len(vertices))
count := len(vertices) - 1
for count != -1 {
if vertices[vtx] == count {
distanceMatrix[count] = 0
} else {
distanceMatrix[count] = math.Inf(0)
}
count--
}
// validate few things before starting the algorithm
if len(graph) == 0 {
fmt.Println("\n-- Graph is null/empty. --")
return
} else if graph[vtx] == nil {
fmt.Println("\n-- Start vertex not found in graph. --")
return
}
// generate adjacency matrix and then start bellman ford algorithm
constructMatrix()
BellmanFord(vtx)
}
func simpleDisplay() {
fmt.Println("")
for i := range graph {
fmt.Print(i, " => ")
for j := range graph[i] {
fmt.Print(graph[i][j])
}
fmt.Println("")
}
}
// Graph with no negative weight cycle
// addVertexToGraph("a")
// addVertexToGraph("b")
// addVertexToGraph("c")
// addVertexToGraph("d")
// addVertexToGraph("e")
// addVertexToGraph("f")
// addVertexToGraph("g")
// addEdgeToGraph("a", "b", 6)
// addEdgeToGraph("a", "c", 5)
// addEdgeToGraph("a", "d", 5)
// addEdgeToGraph("b", "e", -1)
// addEdgeToGraph("c", "b", -2)
// addEdgeToGraph("c", "e", 1)
// addEdgeToGraph("d", "c", -2)
// addEdgeToGraph("d", "f", -1)
// addEdgeToGraph("e", "g", 3)
// addEdgeToGraph("f", "g", 3)
// Graph with negative weight cycle
// addVertexToGraph("a")
// addVertexToGraph("b")
// addVertexToGraph("c")
// addVertexToGraph("d")
// addEdgeToGraph("a", "b", 4)
// addEdgeToGraph("a", "d", 5)
// addEdgeToGraph("b", "d", 5)
// addEdgeToGraph("c", "b", -10)
// addEdgeToGraph("d", "c", 3) | algorithms/graphs/bellman_ford/bellman_ford.go | 0.628179 | 0.532547 | bellman_ford.go | starcoder |
package node
import (
"sort"
"github.com/insolar/insolar/insolar"
)
type Accessor struct {
snapshot *Snapshot
refIndex map[insolar.Reference]insolar.NetworkNode
sidIndex map[insolar.ShortNodeID]insolar.NetworkNode
addrIndex map[string]insolar.NetworkNode
// should be removed in future
active []insolar.NetworkNode
}
func (a *Accessor) GetActiveNodeByShortID(shortID insolar.ShortNodeID) insolar.NetworkNode {
return a.sidIndex[shortID]
}
func (a *Accessor) GetActiveNodeByAddr(address string) insolar.NetworkNode {
return a.addrIndex[address]
}
func (a *Accessor) GetActiveNodes() []insolar.NetworkNode {
result := make([]insolar.NetworkNode, len(a.active))
copy(result, a.active)
return result
}
func (a *Accessor) GetActiveNode(ref insolar.Reference) insolar.NetworkNode {
return a.refIndex[ref]
}
func (a *Accessor) GetWorkingNode(ref insolar.Reference) insolar.NetworkNode {
node := a.GetActiveNode(ref)
if node == nil || node.GetPower() == 0 {
return nil
}
return node
}
func (a *Accessor) GetWorkingNodes() []insolar.NetworkNode {
workingList := a.snapshot.nodeList[ListWorking]
result := make([]insolar.NetworkNode, len(workingList))
copy(result, workingList)
sort.Slice(result, func(i, j int) bool {
return result[i].ID().Compare(result[j].ID()) < 0
})
return result
}
func GetSnapshotActiveNodes(snapshot *Snapshot) []insolar.NetworkNode {
joining := snapshot.nodeList[ListJoiner]
idle := snapshot.nodeList[ListIdle]
working := snapshot.nodeList[ListWorking]
leaving := snapshot.nodeList[ListLeaving]
joinersCount := len(joining)
idlersCount := len(idle)
workingCount := len(working)
leavingCount := len(leaving)
result := make([]insolar.NetworkNode, joinersCount+idlersCount+workingCount+leavingCount)
copy(result[:joinersCount], joining)
copy(result[joinersCount:joinersCount+idlersCount], idle)
copy(result[joinersCount+idlersCount:joinersCount+idlersCount+workingCount], working)
copy(result[joinersCount+idlersCount+workingCount:], leaving)
return result
}
func (a *Accessor) addToIndex(node insolar.NetworkNode) {
a.refIndex[node.ID()] = node
a.sidIndex[node.ShortID()] = node
a.addrIndex[node.Address()] = node
if node.GetPower() == 0 {
return
}
}
func NewAccessor(snapshot *Snapshot) *Accessor {
result := &Accessor{
snapshot: snapshot,
refIndex: make(map[insolar.Reference]insolar.NetworkNode),
sidIndex: make(map[insolar.ShortNodeID]insolar.NetworkNode),
addrIndex: make(map[string]insolar.NetworkNode),
}
result.active = GetSnapshotActiveNodes(snapshot)
for _, node := range result.active {
result.addToIndex(node)
}
return result
} | network/node/accessor.go | 0.583559 | 0.427935 | accessor.go | starcoder |
package ccd
import (
"strconv"
"strings"
"time"
"github.com/mattn/go-pkg-xmlx"
)
const (
// Found both these formats in the wild
TimeDecidingIndex = 14
TimeFormat = "20060102150405-0700"
TimeFormat2 = "20060102150405.000-0700"
)
type TimeType string
const (
// represents a single point in time
TIME_SINGLE TimeType = "TS"
// interval of time
TIME_INTERVAL = "IVL_TS"
// periodic interval of time
TIME_PERIODIC = "PIVL_TS"
// event based time interval
TIME_EVENT = "EIVL_TS"
// represents an probabilistic time interval and is used to represent dosing frequencies like q4-6h
TIME_PROBABILISTIC = "PIVL_PPD_TS"
// represents a parenthetical set of time expressions
TIME_PARENTHETICAL = "SXPR_TS"
)
type Time struct {
Type TimeType
Low time.Time
High time.Time
Value time.Time
Period time.Duration // s, min, h, d, wk and mo
}
func (t *Time) IsZero() bool {
return t.Value.IsZero() && t.Low.IsZero() && t.High.IsZero() && t.Period == 0
}
func decodeTime(node *xmlx.Node) (t Time) {
if node == nil {
return t
}
t.Type = TimeType(strings.ToUpper(node.As("*", "type")))
lowNode := Nget(node, "low")
if lowNode != nil && !lowNode.HasAttr("*", "nullFlavor") {
t.Low, _ = ParseHL7Time(lowNode.As("*", "value"))
}
highNode := Nget(node, "high")
if highNode != nil && !highNode.HasAttr("*", "nullFlavor") {
t.High, _ = ParseHL7Time(highNode.As("*", "value"))
}
val := node.As("*", "value")
if len(val) > 0 {
t.Value, _ = ParseHL7Time(val)
} else {
centerNode := Nget(node, "center")
if centerNode != nil {
t.Value, _ = ParseHL7Time(centerNode.As("*", "value"))
}
}
if t.Value.IsZero() && !t.Low.IsZero() && t.High.IsZero() {
t.Value = t.Low
}
period := Nget(node, "period")
if period != nil {
value := time.Duration(toInt64(period.As("*", "value")))
unit := period.As("*", "unit")
switch strings.ToLower(unit) {
case "s":
t.Period = time.Second * value
case "min":
t.Period = time.Minute * value
case "h":
t.Period = time.Hour * value
case "d":
t.Period = time.Hour * 24 * value
case "wk":
t.Period = time.Hour * 24 * 7 * value
case "mo":
t.Period = time.Hour * 24 * 30 * value
}
}
return t
}
// Dates and times in a CCD can be partial. Meaning they can be:
// 2006, 200601, 20060102, etc...
// This function helps us parse all cases.
func ParseHL7Time(value string) (time.Time, error) {
if value == "" {
return time.Time{}, nil
}
l := len(value)
tmfmt := TimeFormat
if l > TimeDecidingIndex && value[TimeDecidingIndex] == '.' {
tmfmt = TimeFormat2
}
return time.Parse(tmfmt[:l], value)
}
// Node get.
// helper function to continually transverse down the
// xml nodes in args, and return the last one.
func Nget(node *xmlx.Node, args ...string) *xmlx.Node {
for _, a := range args {
if node == nil {
return nil
}
node = node.SelectNode("*", a)
}
return node
}
// Node Safe get.
// just like Nget, but returns a node no matter what.
func Nsget(node *xmlx.Node, args ...string) *xmlx.Node {
n := Nget(node, args...)
if n == nil {
return xmlx.NewNode(0)
}
return n
}
func insertSortParser(p Parser, parsers Parsers) Parsers {
i := len(parsers) - 1
for ; i >= 0; i-- {
if p.Priority > parsers[i].Priority {
i += 1
break
}
}
if i < 0 {
i = 0
}
parsers = append(parsers, p) // this just expands storage.
copy(parsers[i+1:], parsers[i:])
parsers[i] = p
return parsers
}
func toInt64(val interface{}) int64 {
switch t := val.(type) {
case int:
return int64(t)
case int8:
return int64(t)
case int16:
return int64(t)
case int32:
return int64(t)
case int64:
return int64(t)
case uint:
return int64(t)
case uint8:
return int64(t)
case uint16:
return int64(t)
case uint32:
return int64(t)
case uint64:
return int64(t)
case bool:
if t == true {
return int64(1)
}
return int64(0)
case float32:
return int64(t)
case float64:
return int64(t)
case string:
i, _ := strconv.ParseInt(t, 10, 64)
return i
}
return 0
} | ccd/util.go | 0.679923 | 0.401658 | util.go | starcoder |
Package events implements the audit log interface events.IAuditLog
using filesystem backend.
Audit logs
----------
Audit logs are events associated with user logins, server access
and session log events like session.start.
Example audit log event:
{"addr.local":"192.168.127.12:3022",
"addr.remote":"192.168.127.12:58866",
"event":"session.start",
"login":"root",
"user":"<EMAIL>"
}
Session Logs
------------
Session logs are a series of events and recorded SSH interactive session playback.
Example session log event:
{
"time":"2018-01-04T02:12:40.245Z",
"event":"print",
"bytes":936,
"ms":40962,
"offset":16842,
"ei":31,
"ci":29
}
Print event fields
------------------
Print event specifies session output - PTY io recorded by Teleport node or Proxy
based on the configuration.
* "offset" is an offset in bytes from a start of a session
* "ms" is a delay in milliseconds from the last event occurred
* "ci" is a chunk index ordering only print events
* "ei" is an event index ordering events from the first one
As in example of print event above, "ei" - is a session event index - 31,
while "ci" is a chunk index - meaning that this event is 29th in a row of print events.
Client streaming session logs
------------------------------
Session related logs are delivered in order defined by clients.
Every event is ordered and has a session-local index, every next event has index incremented.
Client delivers session events in batches, where every event in the batch
is guaranteed to be in continuous order (e.g. no cases with events
delivered in a single batch to have missing event or chunk index).
Disk File format
----------------
On disk file format is designed to be compatible with NFS filesystems and provides
guarantee that only one auth server writes to the file at a time.
Main Audit Log Format
=====================
The main log files are saved as:
/var/lib/teleport/log/<auth-server-id>/<date>.log
The log file is rotated every 24 hours. The old files must be cleaned
up or archived by an external tool.
Log file format:
utc_date,action,json_fields
Common JSON fields
- user : teleport user
- login : server OS login, the user logged in as
- addr.local : server address:port
- addr.remote: connected client's address:port
- sid : session ID (GUID format)
Examples:
2016-04-25 22:37:29 +0000 UTC,session.start,{"addr.local":"127.0.0.1:3022","addr.remote":"127.0.0.1:35732","login":"root","sid":"4a9d97de-0b36-11e6-a0b3-d8cb8ae5080e","user":"vincent"}
2016-04-25 22:54:31 +0000 UTC,exec,{"addr.local":"127.0.0.1:3022","addr.remote":"127.0.0.1:35949","command":"-bash -c ls /","login":"root","user":"vincent"}
Session log file format
=======================
Each session has its own session log stored as several files:
Index file contains a list of event files and chunks files associated with a session:
/var/lib/teleport/log/sessions/<auth-server-id>/<session-id>.index
The format of the index file contains of two or more lines with pointers to other files:
{"file_name":"<session-id>-<first-event-in-file-index>.events","type":"events","index":<first-event-in-file-index>}
{"file_name":"<session-id>-<first-chunk-in-file-offset>.chunks","type":"chunks","offset":<first-chunk-in-file-offset>}
Files:
/var/lib/teleport/log/<auth-server-id>/<session-id>-<first-event-in-file-index>.events
/var/lib/teleport/log/<auth-server-id>/<session-id>-<first-chunk-in-file-offset>.chunks
Where:
- .events (same events as in the main log, but related to the session)
- .chunks (recorded session bytes: PTY IO)
Examples
~~~~~~~~
**Single auth server**
In the simplest case, single auth server a1 log for a single session id s1
will consist of three files:
/var/lib/teleport/a1/s1.index
With contents:
{"file_name":"s1-0.events","type":"events","index":0}
{"file_name":"s1-0.chunks","type":"chunks","offset":0}
This means that all session events are located in s1-0.events file starting from
the first event with index 0 and all chunks are located in file s1-0.chunks file
with the byte offset from the start - 0.
File with session events /var/lib/teleport/a1/s1-0.events will contain:
{"ei":0,"event":"session.start", ...}
{"ei":1,"event":"resize",...}
{"ei":2,"ci":0, "event":"print","bytes":40,"offset":0}
{"ei":3,"event":"session.end", ...}
File with recorded session /var/lib/teleport/a1/s1-0.chunks will contain 40 bytes
emitted by print event with chunk index 0
**Multiple Auth Servers**
In high availability mode scenario, multiple auth servers will be
deployed behind a load balancer.
Any auth server can go down during session and clients will retry the delivery
to the other auth server.
Both auth servers have mounted /var/lib/teleport/log as a shared NFS folder.
To make sure that only one auth server writes to a file at a time,
each auth server writes to it's own file in a sub folder named
with host UUID of the server.
Client sends the chunks of events related to the session s1 in order,
but load balancer sends first batch of event to the first server a1,
and the second batch of event to the second server a2.
Server a1 will produce the following file:
/var/lib/teleport/a1/s1.index
With contents:
{"file_name":"s1-0.events","type":"events","index":0}
{"file_name":"s1-0.chunks","type":"chunks","offset":0}
Events file /var/lib/teleport/a1/s1-0.events will contain:
{"ei":0,"event":"session.start", ...}
{"ei":1,"event":"resize",...}
{"ei":2,"ci":0, "event":"print","bytes":40,"offset":0}
Events file /var/lib/teleport/a1/s1-0.chunks will contain 40 bytes
emitted by print event with chunk index.
Server a2 will produce the following file:
/var/lib/teleport/a2/s1.index
With contents:
{"file_name":"s1-3.events","type":"events","index":3}
{"file_name":"s1-40.chunks","type":"chunks","offset":40}
Events file /var/lib/teleport/a2/s1-4.events will contain:
{"ei":3,"ci":1, "event":"print","bytes":15,"ms":713,"offset":40}
{"ei":4,"event":"session.end", ...}
Events file /var/lib/teleport/a2/s1-40.chunks will contain 15 bytes emitted
by print event with chunk index 1 and comes after delay of 713 milliseconds.
Offset 40 indicates that the first chunk stored in the file s1-40.chunks
comes at an offset of 40 bytes from the start of the session.
Log Search and Playback
-----------------------
Log search and playback is aware of multiple auth servers, merges
indexes, event streams stored on multiple auth servers.
*/
package events | lib/events/doc.go | 0.6137 | 0.407569 | doc.go | starcoder |
package rs
import (
"fmt"
)
type poly struct {
field *Field
coefficients []byte // In reverse order.
}
var zero = []byte{0}
var one = []byte{1}
// |coefficients| representing elements of GF(size), arranged from most
// significant (highest-power term) coefficient to least significant.
func makePoly(field *Field, coefficients []byte) *poly {
if len(coefficients) == 0 {
return nil
}
obj := &poly{field: field}
if len(coefficients) > 1 && coefficients[0] == 0 {
// Leading term must be non-zero for anything except the constant polynomial "0".
firstNonZero := 1
for coefficients[firstNonZero] == 0 && firstNonZero < len(coefficients) {
firstNonZero++
}
if firstNonZero == len(coefficients) {
obj.coefficients = zero
} else {
// Slice it.
obj.coefficients = coefficients[firstNonZero:]
}
} else {
obj.coefficients = coefficients
}
return obj
}
func getZero(field *Field) *poly {
return &poly{field, zero}
}
func getOne(field *Field) *poly {
return &poly{field, one}
}
func (p *poly) degree() int {
return len(p.coefficients) - 1
}
// Returns the monomial representing coefficient * x^degree.
func buildMonomial(field *Field, degree int, coefficient byte) *poly {
if degree < 0 {
return nil
}
if coefficient == 0 {
return getZero(field)
}
coefficients := make([]byte, degree+1)
coefficients[0] = coefficient
return &poly{field, coefficients}
}
// Returns true iff this polynomial is the monomial "0".
func (p *poly) isZero() bool {
return p.coefficients[0] == 0
}
// Returns coefficient of x^degree term in this polynomial.
func (p *poly) getCoefficient(degree int) byte {
return p.coefficients[len(p.coefficients)-1-degree]
}
// Returns evaluation of this polynomial at a given point.
func (p *poly) evaluateAt(a byte) byte {
if a == 0 {
// Just return the x^0 coefficient
return p.getCoefficient(0)
}
if a == 1 {
// Just the sum of the coefficients.
result := byte(0)
for _, v := range p.coefficients {
result = p.field.f.Add(result, v)
}
return result
}
result := p.coefficients[0]
for i := 1; i < len(p.coefficients); i++ {
result = p.field.f.Add(p.field.f.Mul(a, result), p.coefficients[i])
}
return result
}
func (p *poly) add(other *poly) *poly {
if p.isZero() {
return other
}
if other.isZero() {
return p
}
smaller := p.coefficients
larger := other.coefficients
if len(smaller) > len(larger) {
smaller, larger = larger, smaller
}
sumDiff := make([]byte, len(larger))
lengthDiff := len(larger) - len(smaller)
// Copy high-order terms only found in higher-degree polynomial's coefficients
copy(sumDiff, larger[:lengthDiff])
for i := lengthDiff; i < len(larger); i++ {
sumDiff[i] = p.field.f.Add(smaller[i-lengthDiff], larger[i])
}
return makePoly(p.field, sumDiff)
}
func (p *poly) mulPoly(other *poly) *poly {
if p.isZero() || other.isZero() {
return getZero(p.field)
}
aCoefficients := p.coefficients
bCoefficients := other.coefficients
product := make([]byte, len(aCoefficients)+len(bCoefficients)-1)
for i := 0; i < len(aCoefficients); i++ {
aCoeff := aCoefficients[i]
for j := 0; j < len(bCoefficients); j++ {
product[i+j] = p.field.f.Add(product[i+j], p.field.f.Mul(aCoeff, bCoefficients[j]))
}
}
return makePoly(p.field, product)
}
func (p *poly) mulScalar(scalar byte) *poly {
if scalar == 0 {
return getZero(p.field)
}
if scalar == 1 {
return p
}
product := make([]byte, len(p.coefficients))
for i := 0; i < len(p.coefficients); i++ {
product[i] = p.field.f.Mul(p.coefficients[i], scalar)
}
return makePoly(p.field, product)
}
func (p *poly) mulByMonomial(degree int, coefficient byte) *poly {
if degree < 0 {
return nil
}
if coefficient == 0 {
return getZero(p.field)
}
size := len(p.coefficients)
product := make([]byte, size+degree)
for i := 0; i < size; i++ {
product[i] = p.field.f.Mul(p.coefficients[i], coefficient)
}
return makePoly(p.field, product)
}
func (p *poly) divide(divisor *poly) (q *poly, r *poly) {
if divisor.isZero() {
// "Divide by 0".
return nil, nil
}
quotient := getZero(p.field)
remainder := p
denominatorLeadingTerm := divisor.getCoefficient(divisor.degree())
inverseDenominatorLeadingTerm := p.field.f.Inv(denominatorLeadingTerm)
for remainder.degree() >= divisor.degree() && !remainder.isZero() {
degreeDifference := remainder.degree() - divisor.degree()
scale := p.field.f.Mul(remainder.getCoefficient(remainder.degree()), inverseDenominatorLeadingTerm)
term := divisor.mulByMonomial(degreeDifference, scale)
iterationQuotient := buildMonomial(p.field, degreeDifference, scale)
quotient = quotient.add(iterationQuotient)
remainder = remainder.add(term)
}
return quotient, remainder
}
func (p *poly) String() string {
return fmt.Sprintf("poly{%v}", p.coefficients)
} | poly.go | 0.822724 | 0.658857 | poly.go | starcoder |
package twidgets
import (
"fmt"
"github.com/gdamore/tcell"
"gitlab.com/tslocum/cview"
)
const (
arrowUp = "▲"
arrowDown = "▼"
)
// SortType is a direction that can be sorted with
type Sort int
const (
// Sort ascending
SortAsc Sort = iota
// Sort descending
SortDesc
)
// Table extends cview.Table with some helpers for managing rows.
// In addition it provides sorting capabilities.
type Table struct {
*cview.Table
columns []string
columnWidths []int
columnExpansions []int
showIndex bool
sortCol int
sortType Sort
sortFunc func(col string, sort Sort)
addCellFunc func(cell *cview.TableCell, header bool, col int)
}
// NewTable creates new table instance
func NewTable() *Table {
t := &Table{
Table: cview.NewTable(),
}
t.Table.SetFixed(1, 100)
t.Table.SetSelectable(true, false)
t.sortCol = 0
t.sortType = SortAsc
t.SetCellSimple(0, 0, "#")
t.SetFixed(1, 10)
return t
}
// SetSortFunc sets sorting function that gets called whenever user calls sorting some column
func (t *Table) SetSortFunc(sortFunc func(column string, sort Sort)) *Table {
t.sortFunc = sortFunc
return t
}
// SetAddCellFunc add function callback that gets called every time a new cell is added with flag of whether
// the cell is in header row. Use this to modify e.g. style of the cell when it gets added to table.
func (t *Table) SetAddCellFunc(cellFunc func(cell *cview.TableCell, header bool, col int)) *Table {
t.addCellFunc = cellFunc
return t
}
// SetShowIndex configure whether first column in table is item index. If set, first item is in index 1.
// Changing this does not update existing data. Thus data needs to be cleared and rows added again for changes
// to take effect
func (t *Table) SetShowIndex(index bool) {
t.showIndex = index
}
// SetColumnWidths sets each columns maximum width. If index is included as first row,
// it must be included in here.
func (t *Table) SetColumnWidths(widths []int) {
t.columnWidths = widths
}
// SetColumnExpansions sets how each column will expand / shrink when changing terminal size.
// If index is included as first row, it must be included in here.
func (t *Table) SetColumnExpansions(expansions []int) {
t.columnExpansions = expansions
}
// Clear clears the content of the table. If headers==true, remove headers as well
func (t *Table) Clear(headers bool) *Table {
if headers {
t.Table.Clear()
} else {
count := t.Table.GetColumnCount()
cells := make([]*cview.TableCell, count)
for i := 0; i < count; i++ {
cells[i] = t.Table.GetCell(0, i)
}
t.Table.Clear()
for i := 0; i < count; i++ {
t.Table.SetCell(0, i, cells[i])
}
}
t.SetOffset(1, 0)
return t
}
//AddRow adds single row to table
func (t *Table) AddRow(index int, content ...string) *Table {
count := len(content)
cells := make([]*cview.TableCell, count, count+1)
for i := 0; i < len(content); i++ {
cells[i] = cview.NewTableCell(content[i])
}
if t.showIndex {
cells = append([]*cview.TableCell{cview.NewTableCell(fmt.Sprint(index + 1))}, cells...)
}
for i := 0; i < len(cells); i++ {
if len(t.columnWidths) >= i && t.columnWidths != nil {
cells[i].SetMaxWidth(t.columnWidths[i])
}
if len(t.columnExpansions) >= i && t.columnExpansions != nil {
cells[i].SetExpansion(t.columnExpansions[i])
}
if t.addCellFunc != nil {
t.addCellFunc(cells[i], false, index+1)
}
t.Table.SetCell(index+1, i, cells[i])
}
return t
}
// SetSort sets default sort column and type
func (t *Table) SetSort(column int, sort Sort) *Table {
if t.showIndex && column == 0 {
t.sortCol = 1
} else {
t.sortCol = column
}
t.sortType = sort
t.updateSort()
return t
}
// SetColumns set column header names. This will clear the table
func (t *Table) SetColumns(columns []string) *Table {
t.Clear(true)
if t.showIndex {
columns = append([]string{"#"}, columns...)
if len(columns) >= 2 {
t.sortCol = 1
t.sortType = SortAsc
}
} else {
if len(columns) >= 1 {
t.sortCol = 0
t.sortType = SortAsc
}
}
for i := 0; i < len(columns); i++ {
cell := cview.NewTableCell(columns[i])
if t.addCellFunc != nil {
t.addCellFunc(cell, true, 0)
}
t.Table.SetCell(0, i, cell)
}
t.columns = columns
return t
}
//Inputhandler handles header row inputs
func (t *Table) InputHandler() func(event *tcell.EventKey, setFocus func(p cview.Primitive)) {
return func(event *tcell.EventKey, setFocus func(p cview.Primitive)) {
enableHeader := false
key := event.Key()
if t.sortFunc != nil {
row, _ := t.Table.GetSelection()
if row == 1 && key == tcell.KeyUp {
enableHeader = true
t.Table.SetSelectable(true, true)
t.Table.Select(0, t.sortCol)
} else if row == 0 && key == tcell.KeyDown {
t.Table.SetSelectable(true, false)
}
if key == tcell.KeyEnter && row == 0 && t.sortFunc != nil {
t.updateSort()
}
}
// User might move to first/last row, catch when user moves to 0 row and select
// 1st row instead. This is only if user moved with other key than key up
row, _ := t.Table.GetSelection()
atHeader := row == 0
t.Table.InputHandler()(event, setFocus)
row, _ = t.Table.GetSelection()
if row == 0 && !atHeader && !enableHeader {
t.Table.Select(1, 0)
t.Table.SetSelectable(true, false)
} else if enableHeader {
t.Table.Select(0, t.sortCol)
t.Table.SetSelectable(true, true)
}
}
}
//update sort and call sortFunc if there is one
func (t *Table) updateSort() {
_, col := t.GetSelection()
if col == 0 && t.showIndex {
//Refuse to sort by index
return
}
cell := t.GetCell(0, t.sortCol)
if t.sortCol == col {
if t.sortType == SortAsc {
t.sortType = SortDesc
cell.SetText(fmt.Sprintf("%s %s", t.columns[col], arrowUp))
} else {
t.sortType = SortAsc
cell.SetText(fmt.Sprintf("%s %s", t.columns[col], arrowDown))
}
} else {
cell.SetText(t.columns[t.sortCol])
newCell := t.GetCell(0, col)
t.sortCol = col
t.sortType = SortAsc
newCell.SetText(fmt.Sprintf("%s %s", t.columns[col], arrowDown))
}
if t.sortFunc != nil {
name := t.columns[t.sortCol]
t.sortFunc(name, t.sortType)
}
} | table.go | 0.624064 | 0.406921 | table.go | starcoder |
package disasm
type OpDoc struct {
Short string
Long string
Formulae string
}
var OpDocs = map[string]OpDoc{
"SOF": {Short: "Scale and offset",
Long: "SOF will multiply the current value in ACC with C and will then " +
"add the constant D to the result.",
Formulae: "C * ACC + D",
},
"AND": {Short: "Bit operation",
Long: "AND will perform a bit wise \"and\" of the current ACC and the 24-bit " +
"MASK specified within the instruction word. ",
Formulae: "ACC & MASK",
},
"OR": {Short: "Bit operation",
Long: "OR will perform a bit wise \"or\" of the current ACC and the 24-bit " +
"MASK specified within the instruction word",
Formulae: "ACC | MASK",
},
"XOR": {Short: "Bit operation",
Long: "XOR will perform a bit wise \"xor\" of the current ACC and the 24-bit " +
"MASK specified within the instruction word.",
Formulae: "ACC ^ MASK",
},
"LOG": {Short: "Mathematical operation",
Long: "LOG will multiply the Base2 LOG of the current absolute value in " +
"ACC with C and add the constant D to the result.",
Formulae: "C * LOG(|ACC|) + D",
},
"EXP": {Short: "Mathematical operation",
Long: "EXP will multiply 2^ACC with C and add the constant D to the result",
Formulae: "C * EXP(ACC) + D",
},
"SKP": {Short: "Conditional skip",
Long: "The SKP instruction allows conditional program execution",
Formulae: "CMASK N",
},
"RDAX": {Short: "Read from register",
Long: "RDAX will fetch the value contained in [ADDR] from the register file, " +
"multiply it with C and add the result to the previous content of ACC",
Formulae: "C * REG[ADDR] + ACC",
},
"WRAX": {Short: "Write to register",
Long: "WRAX will save the current value in ACC to [ADDR] and then multiply ACC by C",
Formulae: "ACC->REG[ADDR], C * ACC",
},
"MAXX": {Short: "Get max value of Reg*C or ACC",
Long: "MAXX will compare the absolute value of ACC versus C times the absolute value " +
"of the register pointed to by ADDR. If the absolute value of ACC is larger ACC " +
"will be loaded with |ACC|, otherwise the accumulator becomes overwritten by " +
"|REG[ADDR] * C|.",
Formulae: "MAX(|REG[ADDR] * C|, |ACC|)",
},
"MULX": {Short: "Multiply ACC with register",
Long: "MULX will multiply ACC by the value of the register pointed to by ADDR.",
Formulae: "ACC * REG[ADDR]",
},
"RDFX": {Short: "Multi-op instruction",
Long: "RDFX will subtract the value of the register pointed to by ADDR from ACC, " +
"multiply the result by C and then add the value of the register pointed to by ADDR.",
Formulae: "(ACC-REG[ADDR])*C + REG[ADDR]",
},
"WRLX": {Short: "Multi-op instruction",
Long: "First the current ACC value is stored into the register pointed to by ADDR, then " +
"ACC is subtracted from the previous content of ACC (PACC). The difference is then " +
"multiplied by C and finally PACC is added to the result.",
Formulae: "ACC->REG[ADDR], (PACC-ACC)*C + PACC",
},
"WRHX": {Short: "Multi-op instruction",
Long: "The current ACC value is stored in the register pointed to by ADDR, " +
"then ACC is multiplied by C. Finally the previous content of ACC (PACC) is added to " +
"the product",
Formulae: "ACC->REG[ADDR], (ACC*C) + PACC",
},
"RDA": {Short: "Read from RAM",
Long: "RDA will fetch the sample [ADDR] from the delay ram, multiply it by C and add " +
"the result to the previous content of ACC.",
Formulae: "SRAM[ADDR] * C + ACC",
},
"RMPA": {Short: "Indirect RAM read",
Long: "RMPA provides indirect delay line addressing in that the delay line address of " +
"the sample to be multiplied by C is not explicitly given in the instruction itself " +
"but contained within the pointer register ADDR_PTR (absolute address 24 within the " +
"internal register file.) ",
Formulae: "SRAM[PNTR[N]] * C + ACC",
},
"WRA": {Short: "Write to RAM",
Long: "WRA will store ACC to the delay ram location addressed by ADDR and then " +
"multiply ACC by C.",
Formulae: "ACC->SRAM[ADDR], ACC * C",
},
"WRAP": {Short: "Write to RAM and update ACC",
Long: "WRAP will store ACC to the delay ram location addressed by ADDR then multiply ACC " +
"by C and finally add the content of the LR register to the product",
Formulae: "ACC->SRAM[ADDR], (ACC*C) + LR",
},
"WLDS": {Short: "Config sine LFO",
Long: "WLDS will load frequency and amplitude control values into the selected " +
"SIN LFO (0 or 1).",
Formulae: "see datasheet",
},
"WLDR": {Short: "Config ramp LFO",
Long: "WLDR will load frequency and amplitude control values into the selected " +
"RAMP LFO. (0 or 1)",
Formulae: "see datasheet",
},
"JAM": {Short: "Reset ramp LFO",
Long: "JAM will reset the selected RAMP LFO to its starting point",
Formulae: "0 -> RAMP LFO N",
},
"CHO RDA": {Short: "Chorus read from MEM",
Long: "Like the RDA instruction, CHO RDA will read a sample from the delay ram, " +
"multiply it by a coefficient and add the product to the previous content of ACC.",
Formulae: "See datasheet",
},
"CHO SOF": {Short: "Chorus scale and offset",
Long: "Like the SOF instruction, CHO SOF will multiply ACC by a coefficient and add " +
"the constant D to the result. ", /*However, in contrast to SOF the coefficient is not " +
"explicitly embedded within the instruction. Instead, based on the selected LFO and " +
"the 6 bit vector C, the coefficient is picked from a list of possible coefficients " +
"available within the LFO block of the FV-1"*/
Formulae: "See datasheet",
},
"CHO RDAL": {Short: "Write to LFO to ACC",
Long: "CHO RDAL will read the current value of the selected LFO into ACC.",
Formulae: "LFO*1 -> ACC",
},
"CLR": {Short: "Clear ACC",
Long: "Clear the ACC register",
Formulae: "0->ACC",
},
"NOT": {Short: "Bit operation",
Long: "NOT will negate all bit positions within accumulator thus performing a 1’s complement.",
Formulae: "/ACC -> ACC",
},
"ABSA": {Short: "Absolute value of ACC",
Long: "Loads the accumulator with the absolute value of the accumulator",
Formulae: "|ACC| -> ACC",
},
"LDAX": {Short: "Load register into ACC",
Long: "Loads the accumulator with the contents of the addressed register.",
Formulae: "REG[ADDR] -> ACC",
},
"NOP": {Short: "No-Operation",
Long: "Does nothing. Same as 'SKP 0, 0'",
Formulae: "No operation",
},
} | disasm/docs.go | 0.510008 | 0.609698 | docs.go | starcoder |
package lfuda
import (
"sync"
"github.com/bparli/lfuda-go/simplelfuda"
)
// Cache is a thread-safe fixed size lfuda cache.
type Cache struct {
lfuda simplelfuda.LFUDACache
lock sync.RWMutex
}
// New creates an lfuda of the given size.
func New(size float64) *Cache {
return newWithEvict(size, "LFUDA", nil)
}
// NewGDSF creates an lfuda of the given size and the GDSF cache policy.
func NewGDSF(size float64) *Cache {
return newWithEvict(size, "GDSF", nil)
}
// NewLFU creates an lfuda of the given size.
func NewLFU(size float64) *Cache {
return newWithEvict(size, "LFU", nil)
}
// NewWithEvict constructs a fixed size LFUDA cache with the given eviction
// callback.
func NewWithEvict(size float64, onEvicted func(key interface{}, value interface{})) *Cache {
return newWithEvict(size, "LFUDA", onEvicted)
}
// NewGDSFWithEvict constructs a fixed GDSF size cache with the given eviction
// callback.
func NewGDSFWithEvict(size float64, onEvicted func(key interface{}, value interface{})) *Cache {
return newWithEvict(size, "GDSF", onEvicted)
}
// NewLFUWithEvict constructs a fixed size LFU cache with the given eviction
// callback.
func NewLFUWithEvict(size float64, onEvicted func(key interface{}, value interface{})) *Cache {
return newWithEvict(size, "LFU", onEvicted)
}
func newWithEvict(size float64, policy string, onEvicted func(key interface{}, value interface{})) *Cache {
if policy == "GDSF" {
gdsf := simplelfuda.NewGDSF(size, simplelfuda.EvictCallback(onEvicted))
return &Cache{
lfuda: gdsf,
}
} else if policy == "LFU" {
lfu := simplelfuda.NewLFU(size, simplelfuda.EvictCallback(onEvicted))
return &Cache{
lfuda: lfu,
}
}
lfuda := simplelfuda.NewLFUDA(size, simplelfuda.EvictCallback(onEvicted))
return &Cache{
lfuda: lfuda,
}
}
// Purge is used to completely clear the cache.
func (c *Cache) Purge() {
c.lock.Lock()
c.lfuda.Purge()
c.lock.Unlock()
}
// Set adds a value to the cache. Returns true if an eviction occurred.
func (c *Cache) Set(key, value interface{}) (ok bool) {
c.lock.Lock()
ok = c.lfuda.Set(key, value)
c.lock.Unlock()
return ok
}
// Get looks up a key's value from the cache.
func (c *Cache) Get(key interface{}) (value interface{}, ok bool) {
c.lock.Lock()
value, ok = c.lfuda.Get(key)
c.lock.Unlock()
return value, ok
}
// Contains checks if a key is in the cache, without updating the
// recent-ness or deleting it for being stale.
func (c *Cache) Contains(key interface{}) bool {
c.lock.RLock()
containKey := c.lfuda.Contains(key)
c.lock.RUnlock()
return containKey
}
// Peek returns the key value (or undefined if not found) without updating
// the "recently used"-ness of the key.
func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) {
c.lock.RLock()
value, ok = c.lfuda.Peek(key)
c.lock.RUnlock()
return value, ok
}
// ContainsOrSet checks if a key is in the cache without updating the
// recent-ness or deleting it for being stale, and if not, adds the value.
// Returns whether found and whether the key/value was set or not.
func (c *Cache) ContainsOrSet(key, value interface{}) (ok, set bool) {
c.lock.Lock()
defer c.lock.Unlock()
if c.lfuda.Contains(key) {
return true, false
}
set = c.lfuda.Set(key, value)
return false, set
}
// PeekOrSet checks if a key is in the cache without updating the
// hits or deleting it for being stale, and if not, adds the value.
// Returns whether found and whether the key/value was set or not.
func (c *Cache) PeekOrSet(key, value interface{}) (previous interface{}, ok, set bool) {
c.lock.Lock()
defer c.lock.Unlock()
previous, ok = c.lfuda.Peek(key)
if ok {
return previous, true, false
}
set = c.lfuda.Set(key, value)
return nil, false, set
}
// Remove removes the provided key from the cache.
func (c *Cache) Remove(key interface{}) (present bool) {
c.lock.Lock()
present = c.lfuda.Remove(key)
c.lock.Unlock()
return
}
// Keys returns a slice of the keys in the cache, from oldest to newest.
func (c *Cache) Keys() []interface{} {
c.lock.RLock()
keys := c.lfuda.Keys()
c.lock.RUnlock()
return keys
}
// Len returns the number of items in the cache.
func (c *Cache) Len() (length int) {
c.lock.RLock()
length = c.lfuda.Len()
c.lock.RUnlock()
return length
}
// Size returns the current size of the cache in bytes.
func (c *Cache) Size() (size float64) {
c.lock.RLock()
size = c.lfuda.Size()
c.lock.RUnlock()
return size
}
// Age returns the cache's current age
func (c *Cache) Age() (age float64) {
c.lock.RLock()
age = c.lfuda.Age()
c.lock.RUnlock()
return age
} | lfuda.go | 0.76934 | 0.500793 | lfuda.go | starcoder |
package align
import (
"unicode"
"git.sr.ht/~flobar/lev"
)
// Pos represents the start and end position of an alignment.
type Pos struct {
B, E int // Start end end positions of the alignment slice.
str []rune // Reference string of the alignment.
}
// mkpos creates a new Pos instance with leading and subsequent
// whitespace removed.
func mkpos(b, e int, str []rune) Pos {
for e < len(str) && !unicode.IsSpace(str[e]) {
e++
}
b, e = strip(b, e, str)
if e < b {
e = b
}
return Pos{B: b, E: e, str: str}
}
// Slice returns the slice of base for the position.
func (p Pos) Slice() []rune {
return p.str[p.B:p.E]
}
func (p Pos) String() string {
return string(p.Slice())
}
// Do aligns the words in master pairwise with the words in other.
func Do(master []rune, other ...[]rune) [][]Pos {
var spaces []int
var words [][]Pos
b := -1
for i, j := strip(0, len(master), master); i < j; i++ {
if unicode.IsSpace(master[i]) {
spaces = append(spaces, i)
words = append(words, []Pos{mkpos(b+1, i, master)})
// Skip subsequent whitespace.
for i+1 < len(master) && unicode.IsSpace(master[i+1]) {
i++
}
b = i
}
}
words = append(words, []Pos{mkpos(b+1, len(master), master)})
for i := 0; i < len(other); i++ {
b, e := strip(0, len(other[i]), other[i])
alignments := alignAt(spaces, other[i][b:e])
for j := range words {
words[j] = append(words[j], alignments[j])
}
}
return words
}
func alignAt(spaces []int, str []rune) []Pos {
// If str is empty, each alignment is the empty string. We
// still need to return a slice with the right length.
if len(str) == 0 {
return make([]Pos, len(spaces)+1)
}
ret := make([]Pos, 0, len(spaces)+1)
b := -1
for _, s := range spaces {
// log.Printf("space = %d", s)
e := alignmentPos(str, s)
// log.Printf("e = %d", e)
// Var b points to the last found space.
// Skip to the next non space token after b.
b = skipSpace(str, b+1)
// log.Printf("e <= b, %d <= %d", e, b)
if e <= b { // (e <= b) -> (b>=0) -> len(ret) > 0
b = ret[len(ret)-1].B
}
ret = append(ret, mkpos(b, e, str))
b = e
}
if len(str) <= b { // see above
ret = append(ret, mkpos(ret[len(ret)-1].B, len(str), str))
} else {
ret = append(ret, mkpos(b+1, len(str), str))
}
return ret
}
func alignmentPos(str []rune, pos int) int {
// log.Printf("alignmentPos(%s, %d)", string(str), pos)
if pos >= len(str) {
return len(str)
}
if str[pos] == ' ' {
return pos
}
for i := 1; ; i++ {
if pos+i >= len(str) && i >= pos {
return len(str)
}
if pos+i < len(str) && str[pos+i] == ' ' {
return pos + i
}
if i <= pos && str[pos-i] == ' ' {
return pos - i
}
}
}
func skipSpace(str []rune, pos int) int {
for pos < len(str) && unicode.IsSpace(str[pos]) {
pos++
}
return pos
}
func strip(b, e int, str []rune) (int, int) {
for b < len(str) && unicode.IsSpace(str[b]) {
b++
}
for e > b && unicode.IsSpace(str[e-1]) {
e--
}
return b, e
}
func Lev(m *lev.Mat, primary []rune, rest ...[]rune) [][]Pos {
primary = stripR(primary)
var tokens [][]Pos
b := -1
for i, j := strip(0, len(primary), primary); i < j; i++ {
if unicode.IsSpace(primary[i]) {
tokens = append(tokens, []Pos{mkpos(b+1, i, primary)})
// Skip subsequent whitespace.
for i+1 < len(primary) && unicode.IsSpace(primary[i+1]) {
i++
}
b = i
}
}
tokens = append(tokens, []Pos{mkpos(b+1, len(primary), primary)})
for _, r := range rest {
r = stripR(r)
as := alignPair(m, primary, r)
if len(as) < len(tokens) {
as = append(as, make([]Pos, len(tokens)-len(as))...)
}
for i := range tokens {
tokens[i] = append(tokens[i], as[i])
}
}
return tokens
}
func alignPair(m *lev.Mat, p, s []rune) []Pos {
if len(p) == 0 {
return []Pos{mkpos(0, len(s), s)}
}
p = append(p, ' ')
s = append(s, ' ')
m.DistanceR(p, s)
trace := m.TraceR(p, s)
var pos []Pos
var pi, si /*pb,*/, sb int
pa, sa := lev.AlignTraceR(p, s, trace)
for i := 0; i < len(trace); {
if unicode.IsSpace(pa[i]) && unicode.IsSpace(sa[i]) {
pos = append(pos, mkpos(sb, si, s))
skip(trace, pa, sa, &i, &pi, &si)
sb = si
continue
}
if unicode.IsSpace(pa[i]) {
pos = append(pos, mkpos(sb, si, s))
if skip(trace, pa, sa, &i, &pi, &si) {
sb = si
}
continue
}
next(trace, &i, &pi, &si)
}
return pos
}
func skip(trace []byte, pa, sa []rune, i, pi, si *int) bool {
var ret bool
for *i < len(trace) && unicode.IsSpace(pa[*i]) {
if unicode.IsSpace(sa[*si]) {
ret = true
}
next(trace, i, pi, si)
}
return ret
}
func next(trace []byte, i, pi, si *int) {
switch trace[*i] {
case '#', '|':
*pi++
*si++
case '+':
*si++
case '-':
*pi++
}
*i++
}
func stripR(str []rune) []rune {
b, e := strip(0, len(str), str)
return str[b:e]
} | pkg/apoco/align/align.go | 0.507812 | 0.50708 | align.go | starcoder |
package suite
import (
"testing"
"reflect"
"fmt"
"github.com/kylelemons/godebug/pretty"
)
type AnyType int
type RunTest func(*testing.T, *Test, int)
const (
Any AnyType = 0
)
type Test struct {
Name string
Caller interface{}
Request []interface{}
Response []interface{}
}
type Table struct {
Name string
Tests []*Test
Run RunTest
}
func Suite(t *testing.T, table *Table) {
t.Run(table.Name, func(t *testing.T) {
for i, test := range table.Tests {table.Run(t, test, i)}
})
}
func Assert(f interface{}) RunTest {
method := reflect.ValueOf(f)
return func(t *testing.T, test *Test, count int) {
defer func() {
if err := recover(); err != nil {
t.Error(formatFailure(test.Name, count, err))
}
} ()
response := method.Call(valmap(test.Request))
assert(t, test.Name, count, response, test.Response)
}
}
func Params(data ...interface{}) []interface{} {
result := make([]interface{}, len(data))
for i, d := range data {
result[i] = d
}
return result
}
func assert(t *testing.T, name string, count int, actual []reflect.Value, expected []interface{}) {
if len(actual) != len(expected) {
left, right := showValues(actual, expected)
t.Error(formatResult(left, right, name, count,
fmt.Sprintf("Output size mismatch: Got %d, expected %d",
len(actual), len(expected))))
} else {
left, right, match := compare(actual, expected)
if !match {
t.Error(formatResult(left, right, name, count, ""))
}
}
}
func valmap(data []interface{}) []reflect.Value {
result := []reflect.Value{}
for _, d := range data {
result = append(result, reflect.ValueOf(d))
}
return result
}
func compare(actual []reflect.Value, expected []interface{}) (string, string, bool) {
left := ""
right := ""
match := true
for i, val := range actual {
rawVal := val.Interface()
indicator := ""
if expected[i] != Any && !reflect.DeepEqual(rawVal, expected[i]) {
indicator = "__"
match = false
}
left = format(left, rawVal, indicator)
right = format(right, expected[i], indicator)
}
return left, right, match
}
func showValues(actual []reflect.Value, expected []interface{}) (string, string) {
left := ""
right := ""
for _, act := range actual {left = format(left, act.Interface(), "")}
for _, exp := range expected {right = format(right, exp, "")}
return left, right
}
func format(previous string, val interface{}, indicator string) string {
cmpt := &pretty.Config{Compact: true}
return fmt.Sprintf("%s\n\t\t%s%s :: %T%s,", previous, indicator, cmpt.Sprint(val), val, indicator)
}
func formatResult(left string, right string, name string, count int, err string) string {
failure := formatFailure(name, count, err)
return fmt.Sprintf("%s%s\n!=%s", failure, left, right)
}
func formatFailure(name string, count int, err interface{}) string {
if err != "" {
err = fmt.Sprintf(" :: %s", err)
}
return fmt.Sprintf("[%d] %s%s", count, name, err)
} | suite/suite.go | 0.630116 | 0.433262 | suite.go | starcoder |
package deep
import "math"
// Mode denotes inference mode
type Mode int
const (
// ModeDefault is unspecified mode
ModeDefault Mode = 0
// ModeMultiClass is for one-hot encoded classification, applies softmax output layer
ModeMultiClass Mode = 1
// ModeRegression is regression, applies linear output layer
ModeRegression Mode = 2
// ModeBinary is binary classification, applies sigmoid output layer
ModeBinary Mode = 3
// ModeMultiLabel is for multilabel classification, applies sigmoid output layer
ModeMultiLabel Mode = 4
)
// OutputActivation returns activation corresponding to prediction mode
func OutputActivation(c Mode) ActivationType {
switch c {
case ModeMultiClass:
return ActivationSoftmax
case ModeRegression:
return ActivationLinear
case ModeBinary, ModeMultiLabel:
return ActivationSigmoid
}
return ActivationNone
}
// GetActivation returns the concrete activation given an ActivationType
func GetActivation(act ActivationType) Differentiable {
switch act {
case ActivationSigmoid:
return Sigmoid{}
case ActivationTanh:
return Tanh{}
case ActivationReLU:
return ReLU{}
case ActivationLinear:
return Linear{}
case ActivationSoftmax:
return Linear{}
}
return Linear{}
}
// ActivationType is represents a neuron activation function
type ActivationType int
const (
// ActivationNone is no activation
ActivationNone ActivationType = 0
// ActivationSigmoid is a sigmoid activation
ActivationSigmoid ActivationType = 1
// ActivationTanh is hyperbolic activation
ActivationTanh ActivationType = 2
// ActivationReLU is rectified linear unit activation
ActivationReLU ActivationType = 3
// ActivationLinear is linear activation
ActivationLinear ActivationType = 4
// ActivationSoftmax is a softmax activation (per layer)
ActivationSoftmax ActivationType = 5
)
// Differentiable is an activation function and its first order derivative,
// where the latter is expressed as a function of the former for efficiency
type Differentiable interface {
F(float64) float64
Df(float64) float64
}
// Sigmoid is a logistic activator in the special case of a = 1
type Sigmoid struct{}
// F is Sigmoid(x)
func (a Sigmoid) F(x float64) float64 { return Logistic(x, 1) }
// Df is Sigmoid'(y), where y = Sigmoid(x)
func (a Sigmoid) Df(y float64) float64 { return y * (1 - y) }
// Logistic is the logistic function
func Logistic(x, a float64) float64 {
return 1 / (1 + math.Exp(-a*x))
}
// Tanh is a hyperbolic activator
type Tanh struct{}
// F is Tanh(x)
func (a Tanh) F(x float64) float64 { return (1 - math.Exp(-2*x)) / (1 + math.Exp(-2*x)) }
// Df is Tanh'(y), where y = Tanh(x)
func (a Tanh) Df(y float64) float64 { return 1 - math.Pow(y, 2) }
// ReLU is a rectified linear unit activator
type ReLU struct{}
// F is ReLU(x)
func (a ReLU) F(x float64) float64 { return math.Max(x, 0) }
// Df is ReLU'(y), where y = ReLU(x)
func (a ReLU) Df(y float64) float64 {
if y > 0 {
return 1
}
return 0
}
// Linear is a linear activator
type Linear struct{}
// F is the identity function
func (a Linear) F(x float64) float64 { return x }
// Df is constant
func (a Linear) Df(x float64) float64 { return 1 } | plugins/data/learn/ml-libs-godeep/activation.go | 0.882047 | 0.652823 | activation.go | starcoder |
package golfcart
import (
"github.com/alecthomas/participle/v2"
"github.com/alecthomas/participle/v2/lexer"
"github.com/alecthomas/participle/v2/lexer/stateful"
)
type ExpressionList struct {
Pos lexer.Position
Expressions []*Expression `@@*`
}
type Expression struct {
Pos lexer.Position
Assignment *Assignment `@@`
NativeFunctionValue *NativeFunctionValue
}
type Assignment struct {
Pos lexer.Position
LogicAnd *LogicAnd `@@`
Op string `( @"="`
Next *LogicAnd ` @@ )?`
}
type LogicAnd struct {
Pos lexer.Position
LogicOr *LogicOr `@@`
Op string `( @( "and" )`
Next *LogicAnd ` @@ )?`
}
type LogicOr struct {
Pos lexer.Position
Equality *Equality `@@`
Op string `( @( "or" )`
Next *LogicOr ` @@ )?`
}
type Equality struct {
Pos lexer.Position
Comparison *Comparison `@@`
Op string `( @( "!" "=" | "=" "=" )`
Next *Equality ` @@ )?`
}
type Comparison struct {
Pos lexer.Position
Addition *Addition `@@`
Op string `( @( ">" "=" | ">" | "<" "=" | "<" )`
Next *Comparison ` @@ )?`
}
type Addition struct {
Pos lexer.Position
Multiplication *Multiplication `@@`
Op string `( @( "-" | "+" )`
Next *Addition ` @@ )?`
}
type Multiplication struct {
Pos lexer.Position
Unary *Unary `@@`
Op string `( @( "/" | "*" | "%")`
Next *Multiplication ` @@ )?`
}
type Unary struct {
Pos lexer.Position
Op string `( @( "!" | "-" )`
Unary *Unary ` @@ )`
Primary *Primary `| @@`
}
type Primary struct {
Pos lexer.Position
If *If `@@`
DataLiteral *DataLiteral `| @@`
SubExpression *Expression `| "(" @@ ")"`
Call *Call `| @@`
// TODO: `for {}` is a parser error
ForKeyValue *ForKeyValue `| @@`
ForValue *ForValue `| @@`
For *For `| @@`
ForWhile *ForWhile `| @@`
Return *Return `| @@`
Break *Break `| @@`
Continue *Continue `| @@`
Number *float64 `| @Float | @Int`
Str *string `| @String`
True *bool `| @"true"`
False *bool `| @"false"`
Nil *bool `| @"nil"`
Ident *string `| @Ident`
}
type DataLiteral struct {
FunctionLiteral *FunctionLiteral `@@`
ListLiteral *ListLiteral `| @@`
DictLiteral *DictLiteral `| @@`
}
type If struct {
Pos lexer.Position
Condition *Expression `"if" @@`
IfBody []*Expression `"{" @@* "}"`
ElseIf *ElseIf `@@*`
ElseBody []*Expression `( "else" "{" @@* "}" )?`
}
type ElseIf struct {
Condition *Expression `"else" "if" @@`
IfBody []*Expression `"{" @@* "}"`
Next *ElseIf `@@*`
}
type FunctionLiteral struct {
Pos lexer.Position
Parameters []string `( "(" ( @Ident ( "," @Ident )* )? ")" | @Ident )`
Body []*Expression `"=" ">" ( "{" @@* "}" | @@ )`
}
type ListLiteral struct {
Pos lexer.Position
Expressions *[]Expression `"[" ( @@ ( "," @@ )* )? "]"`
}
type DictLiteral struct {
Pos lexer.Position
DictEntry *[]DictEntry `"{" ( @@ ("," @@)* ","? )? "}"`
}
type DictEntry struct {
Pos lexer.Position
Ident *string `( @Ident`
Key *Expression `| @@ ) ":" `
Value *Expression `@@`
}
type Call struct {
Pos lexer.Position
Ident *string `( @Ident`
SubExpression *Expression `| "(" @@ ")" )`
CallChain *CallChain `@@`
}
type CallChain struct {
Parameters *[]Expression `( "(" ( @@ ( "," @@ )* )? ")" `
Access *string ` | "." @Ident`
ComputedAccess *Expression ` | "[" @@ "]" )`
Next *CallChain `@@?`
}
type Break struct {
Pos lexer.Position
Break *string `"break"`
}
type Continue struct {
Pos lexer.Position
Continue *string `"continue"`
}
type Return struct {
Pos lexer.Position
Return *string `( "return" `
Expression *Expression `@@ )`
}
type For struct {
Pos lexer.Position
Init []*Assignment `"for" ( @@ ";"`
Condition *Expression `@@ ";"`
Post *Expression `@@`
Body []*Expression `"{" @@* "}" )`
}
type ForValue struct {
Pos lexer.Position
Value *string `( "for" @Ident "in"`
Collection *string `( @Ident`
CollectionExpression *Expression `| @@) `
Body []*Expression `"{" @@* "}" )`
}
type ForKeyValue struct {
Pos lexer.Position
Key *string `( "for" @Ident ","`
Value *string `@Ident "in"`
Collection *string `( @Ident`
CollectionExpression *Expression `| @@) `
Body []*Expression `"{" @@* "}" )`
}
type ForWhile struct {
Pos lexer.Position
Init []*Assignment `"for" (`
Condition *Expression `@@?`
Post *Expression ``
Body []*Expression `"{" @@* "}" )`
}
var (
_lexer = lexer.Must(stateful.New(stateful.Rules{
"Root": {
{"comment", `//.*|/\*.*?\*/`, nil},
{"whitespace", `[\n\r\t ]+`, nil},
{"Float", `[+-]?([0-9]*[.])?[0-9]+`, nil},
{"Int", `[\d]+`, nil},
{"String", `"([^"]*)"`, nil},
{"Ident", `[\w]+`, nil},
{"Punct", `[-[!*%()+_={}\|:;"<,>./]|]`, nil},
},
}))
parser = participle.MustBuild(&ExpressionList{}, participle.Lexer(_lexer),
participle.Elide("whitespace", "comment"), participle.UseLookahead(2))
)
func GetGrammer() string {
return parser.String()
}
func GenerateAST(source string) (*ExpressionList, error) {
expressionList := &ExpressionList{}
err := parser.ParseString("", source, expressionList)
if err != nil {
return nil, err
}
return expressionList, nil
} | pkg/golfcart/parse.go | 0.562417 | 0.450964 | parse.go | starcoder |
package convexHull
import "sort"
// Point is a struct that holds the X Y cooridinates
// of a specific point in the Ecliden plane or space.
type Point struct {
X, Y int
}
// Points is a slice built up of Point structs.
type Points []Point
func (points Points) Swap(i, j int) {
points[i], points[j] = points[j], points[i]
}
func (points Points) Len() int {
return len(points)
}
// lets sort our Points by x and, if equal, by y
func (points Points) Less(i, j int) bool {
if points[i].X == points[j].X {
return points[i].Y < points[j].Y
}
return points[i].X < points[j].X
}
// returns the modulo (and sign) of the cross product between vetors OA and OB
func crossProduct(O, A, B Point) int {
return (A.X-O.X)*(B.Y-O.Y) - (A.Y-O.Y)*(B.X-O.X)
}
// findConvexHull returns a slice of Point with a convex hull
// it is counterclockwise and starts and ends at the same point
// i.e. the same point is repeated at the beginning and at the end
func findConvexHull(points Points) Points {
n := len(points) // number of points to find convex hull
var result Points // final result
count := 0 // size of our convex hull (number of points added)
// lets sort our points by x and if equal by y
sort.Sort(points)
if n == 0 {
return result
}
// add the first element:
result = append(result, points[0])
count++
// find the lower hull
for i := 1; i < n; i++ {
// remove points which are not part of the lower hull
for count > 1 && crossProduct(result[count-2], result[count-1], points[i]) < 0 {
count--
result = result[:count]
}
// add a new better point than the removed ones
result = append(result, points[i])
count++
}
count0 := count // our base counter for the upper hull
// find the upper hull
for i := n - 2; i >= 0; i-- {
// remove points which are not part of the upper hull
for count-count0 > 0 && crossProduct(result[count-2], result[count-1], points[i]) < 0 {
count--
result = result[:count]
}
// add a new better point than the removed ones
result = append(result, points[i])
count++
}
return result
} | convexHull/convexHull.go | 0.833019 | 0.551211 | convexHull.go | starcoder |
package compact_time
import (
"fmt"
"strings"
gotime "time"
)
type TimeType uint8
const (
TimeTypeDate = TimeType(iota)
TimeTypeTime
TimeTypeTimestamp
)
type TimezoneType uint8
const (
TimezoneTypeUnset = TimezoneType(iota)
TimezoneTypeUTC
TimezoneTypeLocal
TimezoneTypeAreaLocation
TimezoneTypeLatitudeLongitude
TimezoneTypeUTCOffset
)
type Timezone struct {
ShortAreaLocation string
LongAreaLocation string
LatitudeHundredths int16
LongitudeHundredths int16
MinutesOffsetFromUTC int16
Type TimezoneType
}
var (
timezoneUTC = Timezone{
Type: TimezoneTypeUTC,
ShortAreaLocation: "Z",
LongAreaLocation: "Etc/UTC",
}
timezoneLocal = Timezone{
Type: TimezoneTypeLocal,
ShortAreaLocation: "L",
LongAreaLocation: "Local",
}
)
func TZAtUTC() Timezone {
return timezoneUTC
}
func TZLocal() Timezone {
return timezoneLocal
}
func TZAtAreaLocation(areaLocation string) Timezone {
var this Timezone
this.InitWithAreaLocation(areaLocation)
return this
}
func TZAtLatLong(latitudeHundredths, longitudeHundredths int) Timezone {
var this Timezone
this.InitWithLatLong(latitudeHundredths, longitudeHundredths)
return this
}
func TZWithMiutesOffsetFromUTC(minutesOffsetFromUTC int) Timezone {
var this Timezone
this.InitWithMinutesOffsetFromUTC(minutesOffsetFromUTC)
return this
}
func (this *Timezone) InitWithAreaLocation(areaLocation string) {
switch areaLocationToTimezoneType[areaLocation] {
case internalTZUTC:
*this = timezoneUTC
case internalTZLocal:
*this = timezoneLocal
case internalTZUTCPreserve:
this.Type = TimezoneTypeUTC
this.ShortAreaLocation = "Z"
this.LongAreaLocation = areaLocation
default:
this.Type = TimezoneTypeAreaLocation
this.ShortAreaLocation, this.LongAreaLocation = splitAreaLocation(areaLocation)
}
}
func (this *Timezone) InitWithLatLong(latitudeHundredths, longitudeHundredths int) {
this.LatitudeHundredths = int16(latitudeHundredths)
this.LongitudeHundredths = int16(longitudeHundredths)
this.Type = TimezoneTypeLatitudeLongitude
}
func (this *Timezone) InitWithMinutesOffsetFromUTC(minutesOffsetFromUTC int) {
minutes := int16(minutesOffsetFromUTC)
if minutes == 0 {
*this = timezoneUTC
} else {
this.MinutesOffsetFromUTC = minutes
this.Type = TimezoneTypeUTCOffset
}
}
func (this *Timezone) Validate() error {
switch this.Type {
case TimezoneTypeAreaLocation:
length := len(this.LongAreaLocation)
if length == 0 {
return fmt.Errorf("Time zone is specified as area/location, but the AreaLocation field is empty")
}
if length > 127 {
return fmt.Errorf("Area/location time zones cannot be over 127 bytes long")
}
case TimezoneTypeLatitudeLongitude:
if this.LongitudeHundredths < longitudeMin || this.LongitudeHundredths > longitudeMax {
return fmt.Errorf("%v: Invalid longitude (must be %v to %v)", this.LongitudeHundredths, longitudeMin, longitudeMax)
}
if this.LatitudeHundredths < latitudeMin || this.LatitudeHundredths > latitudeMax {
return fmt.Errorf("%v: Invalid latitude (must be %v to %v)", this.LatitudeHundredths, latitudeMin, latitudeMax)
}
case TimezoneTypeUTCOffset:
if this.MinutesOffsetFromUTC < minutesFromUTCMin || this.MinutesOffsetFromUTC > minutesFromUTCMax {
return fmt.Errorf("%v: Invalid UTC offset", this.MinutesOffsetFromUTC)
}
}
return nil
}
func (this *Timezone) IsEquivalentTo(that *Timezone) bool {
if this.Type != that.Type {
return false
}
switch this.Type {
case TimezoneTypeAreaLocation:
return this.ShortAreaLocation == that.ShortAreaLocation && this.LongAreaLocation == that.LongAreaLocation
case TimezoneTypeLatitudeLongitude:
return this.LatitudeHundredths == that.LatitudeHundredths && this.LongitudeHundredths == that.LongitudeHundredths
case TimezoneTypeUTCOffset:
return this.MinutesOffsetFromUTC == that.MinutesOffsetFromUTC
}
return true
}
func (this *Timezone) String() string {
switch this.Type {
case TimezoneTypeUTC:
return ""
case TimezoneTypeAreaLocation, TimezoneTypeLocal:
return fmt.Sprintf("/%s", this.LongAreaLocation)
case TimezoneTypeLatitudeLongitude:
return fmt.Sprintf("/%.2f/%.2f", float64(this.LatitudeHundredths)/100, float64(this.LongitudeHundredths)/100)
case TimezoneTypeUTCOffset:
sign := '+'
minute := int(this.MinutesOffsetFromUTC)
if minute < 0 {
sign = '-'
minute = -minute
}
hour := minute / 60
minute %= 60
return fmt.Sprintf("%c%02d%02d", sign, hour, minute)
default:
return fmt.Sprintf("Error: %v: Unknown time zone type", this.Type)
}
}
type Time struct {
Timezone Timezone
Year int
Nanosecond uint32
Second uint8
Minute uint8
Hour uint8
Day uint8
Month uint8
Type TimeType
}
// Create a "zero" date, which will encode to all zeroes.
func ZeroDate() Time {
return Time{Type: TimeTypeDate}
}
// Create a "zero" time, which will encode to all zeroes.
func ZeroTime() Time {
return Time{Type: TimeTypeTime}
}
// Create a "zero" timestamp, which will encode to all zeroes.
func ZeroTimestamp() Time {
return Time{Type: TimeTypeTimestamp}
}
func NewDate(year, month, day int) Time {
var this Time
this.InitDate(year, month, day)
return this
}
func (this *Time) InitDate(year, month, day int) {
this.Type = TimeTypeDate
this.Year = year
this.Month = uint8(month)
this.Day = uint8(day)
this.Timezone.Type = TimezoneTypeLocal
}
func NewTime(hour, minute, second, nanosecond int, timezone Timezone) Time {
var this Time
this.InitTime(hour, minute, second, nanosecond, timezone)
return this
}
func (this *Time) InitTime(hour, minute, second, nanosecond int, timezone Timezone) {
this.Type = TimeTypeTime
this.Hour = uint8(hour)
this.Minute = uint8(minute)
this.Second = uint8(second)
this.Nanosecond = uint32(nanosecond)
this.Timezone = timezone
}
func NewTimestamp(year, month, day, hour, minute, second, nanosecond int, timezone Timezone) Time {
var this Time
this.InitTimestamp(year, month, day, hour, minute, second, nanosecond, timezone)
return this
}
func (this *Time) InitTimestamp(year, month, day, hour, minute, second, nanosecond int, tz Timezone) {
this.Year = year
this.Month = uint8(month)
this.Day = uint8(day)
this.Hour = uint8(hour)
this.Minute = uint8(minute)
this.Second = uint8(second)
this.Nanosecond = uint32(nanosecond)
this.Timezone = tz
this.Type = TimeTypeTimestamp
}
func (this *Time) IsZeroValue() bool {
return this.Timezone.Type == TimezoneTypeUnset
}
// Check if two times are equivalent. This handles cases where the time zones
// are technically equivalent (Z == UTC == Etc/UTC == Etc/GMT, etc)
func (this *Time) IsEquivalentTo(that Time) bool {
if this.Timezone.Type == TimezoneTypeUTC && that.Timezone.Type == TimezoneTypeUTC {
return this.Year == that.Year &&
this.Month == that.Month &&
this.Day == that.Day &&
this.Hour == that.Hour &&
this.Minute == that.Minute &&
this.Second == that.Second &&
this.Nanosecond == that.Nanosecond
}
return *this == that
}
// Convert a golang time value to compact time
func AsCompactTime(src gotime.Time) Time {
locationStr := src.Location().String()
if src.Location() == gotime.Local {
locationStr = "Local"
}
return NewTimestamp(src.Year(), int(src.Month()), src.Day(), src.Hour(),
src.Minute(), src.Second(), src.Nanosecond(), TZAtAreaLocation(locationStr))
}
// Convert compact time into golang time.
// Note: Go time doesn't support latitude/longitude time zones. Attempting to
// convert this type of time zone will result in an error.
// Note: Converting to go time will validate area/location time zone (if any)
func (this *Time) AsGoTime() (result gotime.Time, err error) {
location := gotime.UTC
switch this.Timezone.Type {
case TimezoneTypeUTC:
location = gotime.UTC
case TimezoneTypeLocal:
location = gotime.Local
case TimezoneTypeLatitudeLongitude:
err = fmt.Errorf("Latitude/Longitude time zones are not supported by time.Time")
return
case TimezoneTypeAreaLocation:
location, err = gotime.LoadLocation(this.Timezone.LongAreaLocation)
if err != nil {
return
}
case TimezoneTypeUTCOffset:
location = gotime.FixedZone("", int(this.Timezone.MinutesOffsetFromUTC)*60)
default:
err = fmt.Errorf("%v: Unknown time zone type", this.Timezone.Type)
return
}
result = gotime.Date(this.Year,
gotime.Month(this.Month),
int(this.Day),
int(this.Hour),
int(this.Minute),
int(this.Second),
int(this.Nanosecond),
location)
return
}
func (this Time) String() string {
// Workaround for go's broken Stringer type handling
return this.pString()
}
func (this *Time) pString() string {
if this.IsZeroValue() {
return "<zero time value>"
}
switch this.Type {
case TimeTypeDate:
return this.formatDate()
case TimeTypeTime:
return this.formatTime()
case TimeTypeTimestamp:
return this.formatTimestamp()
default:
return fmt.Sprintf("Error: %v: Unknown time type", this.Type)
}
}
func (this *Time) Validate() error {
if this.Type == TimeTypeDate || this.Type == TimeTypeTimestamp {
if this.Year == 0 {
return fmt.Errorf("Year cannot be 0")
}
if this.Month < monthMin || this.Month > monthMax {
return fmt.Errorf("%v: Invalid month (must be %v to %v)", this.Month, monthMin, monthMax)
}
if this.Day < dayMin || this.Day > dayMax[this.Month] {
return fmt.Errorf("%v: Invalid day (must be %v to %v)", this.Day, dayMin, dayMax[this.Month])
}
}
if this.Type == TimeTypeTime || this.Type == TimeTypeTimestamp {
if this.Hour < hourMin || this.Hour > hourMax {
return fmt.Errorf("%v: Invalid hour (must be %v to %v)", this.Hour, hourMin, hourMax)
}
if this.Minute < minuteMin || this.Minute > minuteMax {
return fmt.Errorf("%v: Invalid minute (must be %v to %v)", this.Minute, minuteMin, minuteMax)
}
if this.Second < secondMin || this.Second > secondMax {
return fmt.Errorf("%v: Invalid second (must be %v to %v)", this.Second, secondMin, secondMax)
}
if this.Nanosecond < nanosecondMin || this.Nanosecond > nanosecondMax {
return fmt.Errorf("%v: Invalid nanosecond (must be %v to %v)", this.Nanosecond, nanosecondMin, nanosecondMax)
}
return this.Timezone.Validate()
}
return nil
}
// =============================================================================
func splitAreaLocation(areaLocation string) (shortAreaLocation, longAreaLocation string) {
longAreaLocation = areaLocation
tzPair := strings.SplitN(areaLocation, "/", 2)
if len(tzPair) > 1 {
area := tzPair[0]
location := tzPair[1]
if len(area) == 1 {
shortAreaLocation = areaLocation
if longArea := shortAreaToArea[area]; longArea != "" {
longAreaLocation = longArea + "/" + location
} else {
longAreaLocation = areaLocation
}
} else {
if shortArea := areaToShortArea[area]; shortArea != "" {
shortAreaLocation = shortArea + "/" + location
} else {
shortAreaLocation = areaLocation
}
}
} else {
shortAreaLocation = areaLocation
}
return
}
func (this *Time) formatDate() string {
return fmt.Sprintf("%d-%02d-%02d", this.Year, this.Month, this.Day)
}
func (this *Time) formatTime() string {
var builder strings.Builder
builder.WriteString(fmt.Sprintf("%02d:%02d:%02d", this.Hour, this.Minute, this.Second))
if this.Nanosecond != 0 {
str := []byte(fmt.Sprintf("%09d", this.Nanosecond))
for str[len(str)-1] == '0' {
str = str[:len(str)-1]
}
builder.WriteByte('.')
builder.WriteString(string(str))
}
builder.WriteString(this.Timezone.String())
return builder.String()
}
func (this *Time) formatTimestamp() string {
var builder strings.Builder
builder.WriteString(this.formatDate())
builder.WriteByte('/')
builder.WriteString(this.formatTime())
return builder.String()
}
var shortAreaToArea = map[string]string{
"F": "Africa",
"M": "America",
"N": "Antarctica",
"R": "Arctic",
"S": "Asia",
"T": "Atlantic",
"U": "Australia",
"C": "Etc",
"E": "Europe",
"I": "Indian",
"P": "Pacific",
"L": "Local",
"Z": "Zero",
}
var areaToShortArea = map[string]string{
"Africa": "F",
"America": "M",
"Antarctica": "N",
"Arctic": "R",
"Asia": "S",
"Atlantic": "T",
"Australia": "U",
"Etc": "C",
"Europe": "E",
"Indian": "I",
"Pacific": "P",
"Local": "L",
"Zero": "Z",
}
const (
monthMin = 1
monthMax = 12
dayMin = 1
hourMin = 0
hourMax = 23
minuteMin = 0
minuteMax = 59
secondMin = 0
secondMax = 60
nanosecondMin = 0
nanosecondMax = 999999999
latitudeMin = -9000
latitudeMax = 9000
longitudeMin = -18000
longitudeMax = 18000
minutesFromUTCMin = -1439
minutesFromUTCMax = 1439
)
var dayMax = [...]uint8{0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}
type internalTZType int
const (
internalTZAreaLocation = iota
internalTZUTC
internalTZUTCPreserve
internalTZLocal
)
var areaLocationToTimezoneType = map[string]internalTZType{
"": internalTZUTC,
"Etc/UTC": internalTZUTC,
"Z": internalTZUTC,
"Zero": internalTZUTC,
"Etc/GMT": internalTZUTCPreserve,
"Etc/GMT+0": internalTZUTCPreserve,
"Etc/GMT-0": internalTZUTCPreserve,
"Etc/GMT0": internalTZUTCPreserve,
"Etc/Greenwich": internalTZUTCPreserve,
"Etc/UCT": internalTZUTCPreserve,
"Etc/Universal": internalTZUTCPreserve,
"Etc/Zulu": internalTZUTCPreserve,
"Factory": internalTZUTCPreserve,
"GMT": internalTZUTCPreserve,
"GMT+0": internalTZUTCPreserve,
"GMT-0": internalTZUTCPreserve,
"GMT0": internalTZUTCPreserve,
"Greenwich": internalTZUTCPreserve,
"UCT": internalTZUTCPreserve,
"Universal": internalTZUTCPreserve,
"UTC": internalTZUTCPreserve,
"Zulu": internalTZUTCPreserve,
"L": internalTZLocal,
"Local": internalTZLocal,
} | time.go | 0.797793 | 0.610889 | time.go | starcoder |
package entities
import (
"fmt"
"reflect"
)
// Chunk represents square area. Many Entities are deployed over Chunk.
type Chunk struct {
Base
ChunkPoint
Residence *DelegateResidence
Company *DelegateCompany
RailNode *DelegateRailNode
Parent *Cluster
InRailEdges map[uint]*DelegateRailEdge
OutRailEdges map[uint]*DelegateRailEdge
}
// NewChunk create Chunk on specified Cluster
func (m *Model) NewChunk(p *Cluster, o *Player) *Chunk {
ch := &Chunk{
Base: m.NewBase(CHUNK, o),
ChunkPoint: p.ChunkPoint,
}
ch.Init(m)
ch.Resolve(p)
m.Add(ch)
return ch
}
// B returns base information of this elements.
func (ch *Chunk) B() *Base {
return &ch.Base
}
// Init creates map.
func (ch *Chunk) Init(m *Model) {
ch.Base.Init(CHUNK, m)
ch.InRailEdges = make(map[uint]*DelegateRailEdge)
ch.OutRailEdges = make(map[uint]*DelegateRailEdge)
}
// Add deploy Entity over Chunk
func (ch *Chunk) Add(raw Entity) {
switch obj := raw.(type) {
case Localable:
ch.addLocalable(obj)
case Connectable:
ch.addConnectable(obj)
}
}
func (ch *Chunk) addLocalable(obj Localable) {
fieldName := obj.B().T.String()
oid := obj.B().OwnerID
nodeField := reflect.ValueOf(ch).Elem().FieldByName(fieldName)
if !nodeField.IsValid() {
return
}
if nodeField.IsNil() {
var pids []uint
var pid uint
if parent := ch.Parent.Parent; parent != nil && parent.Data[oid] != nil {
parentTarget := reflect.ValueOf(parent.Data[oid]).Elem().FieldByName(fieldName)
pid = uint(parentTarget.Elem().FieldByName("ID").Uint())
pids = append(parentTarget.Elem().FieldByName("ParentIDs").Interface().([]uint), pid)
}
node := reflect.New(delegateTypes[obj.B().T])
node.Elem().FieldByName("DelegateNode").Set(reflect.ValueOf(ch.NewDelegateNode(obj, pids)))
nodeField.Set(node)
}
nodeField.MethodByName("Add").Call([]reflect.Value{reflect.ValueOf(obj)})
}
func (ch *Chunk) addConnectable(obj Connectable) {
fromID := reflect.ValueOf(ch.ID)
toCh := ch.M.RootCluster.FindChunk(obj.To(), ch.Parent.Scale)
if toCh == nil {
// ex. no Platform, Gate in Chunk referred by Step
return
}
toID := reflect.ValueOf(toCh.ID)
outMapName := fmt.Sprintf("Out%ss", obj.B().T.String())
outMap := reflect.ValueOf(ch).Elem().FieldByName(outMapName)
if !outMap.IsValid() {
// ex. no OutSteps in DelegateNode
return
}
if !outMap.MapIndex(toID).IsValid() {
nodeFieldName := connectTypes[obj.B().T].String()
from := reflect.ValueOf(ch).Elem().FieldByName(nodeFieldName)
to := reflect.ValueOf(toCh).Elem().FieldByName(nodeFieldName)
edge := reflect.New(delegateTypes[obj.B().T])
edge.Elem().FieldByName("DelegateEdge").Set(reflect.ValueOf(ch.NewDelegateEdge(
obj, from.Interface().(delegateLocalable), to.Interface().(delegateLocalable))))
outMap.SetMapIndex(toID, edge)
inMapName := fmt.Sprintf("In%ss", obj.B().T.String())
inMap := reflect.ValueOf(toCh).Elem().FieldByName(inMapName)
inMap.SetMapIndex(fromID, edge)
if _, ok := obj.(*RailEdge); ok {
ch.setReverse(edge.Interface().(*DelegateRailEdge), toCh)
}
}
edge := outMap.MapIndex(toID)
edge.MethodByName("Add").Call([]reflect.Value{reflect.ValueOf(obj)})
}
func (ch *Chunk) setReverse(dre *DelegateRailEdge, toCh *Chunk) {
if reverse, ok := toCh.OutRailEdges[ch.ID]; ok {
dre.Reverse = reverse
dre.ReverseID = reverse.ID
reverse.Reverse = dre
reverse.ReverseID = dre.ID
}
}
// Remove undeploy Entity over Chunk
func (ch *Chunk) Remove(raw Entity) {
switch obj := raw.(type) {
case Localable:
ch.removeLocalable(obj)
case Connectable:
ch.removeConnectable(obj)
}
}
func (ch *Chunk) removeLocalable(obj Localable) {
fieldName := obj.B().T.String()
nodeField := reflect.ValueOf(ch).Elem().FieldByName(fieldName)
if !nodeField.IsValid() {
return
}
nodeField.MethodByName("Remove").Call([]reflect.Value{reflect.ValueOf(obj)})
if nodeField.Elem().FieldByName("List").Len() == 0 {
nodeField.Set(reflect.Zero(nodeField.Type()))
}
}
func (ch *Chunk) removeConnectable(obj Connectable) {
fromID := reflect.ValueOf(ch.ID)
toCh := ch.M.RootCluster.FindChunk(obj.To(), ch.Parent.Scale)
if toCh == nil {
// ex. no Platform, Gate in Chunk referred by Step
return
}
toID := reflect.ValueOf(toCh.ID)
outMapName := fmt.Sprintf("Out%ss", obj.B().T.String())
outMap := reflect.ValueOf(ch).Elem().FieldByName(outMapName)
if !outMap.IsValid() {
// ex. no OutSteps in DelegateNode
return
}
delegate := outMap.MapIndex(toID)
delegate.MethodByName("Remove").Call([]reflect.Value{reflect.ValueOf(obj)})
if delegate.Elem().FieldByName("List").Len() == 0 {
outMap.SetMapIndex(toID, reflect.ValueOf(nil))
inMapName := fmt.Sprintf("In%ss", obj.B().T.String())
inMap := reflect.ValueOf(toCh).Elem().FieldByName(inMapName)
inMap.SetMapIndex(fromID, reflect.ValueOf(nil))
}
}
// Has returns whether specified Entity is deployed over Chunk or not.
func (ch *Chunk) Has(raw Entity) bool {
id := reflect.ValueOf(raw.B().ID)
switch obj := raw.(type) {
case Localable:
fieldName := obj.B().T.String()
nodeField := reflect.ValueOf(ch).Elem().FieldByName(fieldName)
if !nodeField.IsValid() || !nodeField.Elem().IsValid() {
return false
}
return nodeField.Elem().FieldByName("List").MapIndex(id).IsValid()
case Connectable:
toCh := ch.M.RootCluster.FindChunk(obj.To(), ch.Parent.Scale)
if toCh == nil {
// ex. no Platform, Gate in Chunk referred by Step
return false
}
toID := reflect.ValueOf(toCh.ID)
outMapName := fmt.Sprintf("Out%ss", obj.B().T.String())
outMap := reflect.ValueOf(ch).Elem().FieldByName(outMapName)
if !outMap.IsValid() {
// ex. no OutSteps in DelegateNode
return false
}
return outMap.MapIndex(toID).IsValid()
}
return false
}
// IsEmpty returns whether any Entity is deployed over Chunk or not.
func (ch *Chunk) IsEmpty() bool {
return ch.RailNode == nil
}
// CheckDelete check remaining reference.
func (ch *Chunk) CheckDelete() error {
return nil
}
// BeforeDelete remove reference of related entity
func (ch *Chunk) BeforeDelete() {
ch.Parent.UnResolve(ch)
}
// Delete removes this entity with related ones.
func (ch *Chunk) Delete() {
ch.M.Delete(ch)
}
// Resolve set reference
func (ch *Chunk) Resolve(args ...Entity) {
for _, raw := range args {
switch obj := raw.(type) {
case *Cluster:
ch.Parent = obj
default:
panic(fmt.Errorf("invalid type: %T %+v", obj, obj))
}
}
}
// Export set delegate Entity to DelegateMap
func (ch *Chunk) Export(dm *DelegateMap) {
dm.Add(ch.Residence)
dm.Add(ch.Company)
dm.Add(ch.RailNode)
for _, re := range ch.InRailEdges {
dm.Add(re)
dm.Add(re.From)
}
for _, re := range ch.OutRailEdges {
dm.Add(re)
dm.Add(re.To)
}
}
// String represents status
func (ch *Chunk) String() string {
return fmt.Sprintf("%s(%d:%d):u=%d,r=%v,c=%v,rn=%v,i=%d,o=%d:%v", ch.T.Short(),
ch.Parent.Scale, ch.ID, ch.OwnerID,
ch.Residence, ch.Company, ch.RailNode,
len(ch.InRailEdges), len(ch.OutRailEdges), ch.ChunkPoint)
} | entities/chunk.go | 0.624064 | 0.400984 | chunk.go | starcoder |
package bytequantity
import (
"fmt"
"math"
"regexp"
"strconv"
"strings"
)
const (
/// Examples: 1mb, 1 gb, 1.0tb, 1mib, 2g, 2.001 t
byteQuantityRegex = `^([0-9]+\.?[0-9]{0,3})[ ]?(mi?b?|gi?b?|ti?b?)?$`
mib = "MiB"
gib = "GiB"
tib = "TiB"
gbConvert = 1 << 10
tbConvert = gbConvert << 10
maxGiB = math.MaxUint64 / gbConvert
maxTiB = math.MaxUint64 / tbConvert
)
// ByteQuantity is a data type representing a byte quantity
type ByteQuantity struct {
Quantity uint64
}
// ParseToByteQuantity parses a string representation of a byte quantity to a ByteQuantity type.
// A unit can be appended such as 16 GiB. If no unit is appended, GiB is assumed.
func ParseToByteQuantity(byteQuantityStr string) (ByteQuantity, error) {
bqRegexp := regexp.MustCompile(byteQuantityRegex)
matches := bqRegexp.FindStringSubmatch(strings.ToLower(byteQuantityStr))
if len(matches) < 2 {
return ByteQuantity{}, fmt.Errorf("%s is not a valid byte quantity", byteQuantityStr)
}
quantityStr := matches[1]
unit := gib
if len(matches) > 2 && matches[2] != "" {
unit = matches[2]
}
quantity := uint64(0)
switch strings.ToLower(string(unit[0])) {
//mib
case "m":
inputDecSplit := strings.Split(quantityStr, ".")
if len(inputDecSplit) == 2 {
d, err := strconv.Atoi(inputDecSplit[1])
if err != nil {
return ByteQuantity{}, err
}
if d != 0 {
return ByteQuantity{}, fmt.Errorf("cannot accept floating point MB value, only integers are accepted")
}
}
// need error here so that this quantity doesn't bind in the local scope
var err error
quantity, err = strconv.ParseUint(inputDecSplit[0], 10, 64)
if err != nil {
return ByteQuantity{}, err
}
//gib
case "g":
quantityDec, err := strconv.ParseFloat(quantityStr, 10)
if err != nil {
return ByteQuantity{}, err
}
if quantityDec > maxGiB {
return ByteQuantity{}, fmt.Errorf("error GiB value is too large")
}
quantity = uint64(quantityDec * gbConvert)
//tib
case "t":
quantityDec, err := strconv.ParseFloat(quantityStr, 10)
if err != nil {
return ByteQuantity{}, err
}
if quantityDec > maxTiB {
return ByteQuantity{}, fmt.Errorf("error TiB value is too large")
}
quantity = uint64(quantityDec * tbConvert)
default:
return ByteQuantity{}, fmt.Errorf("error unit %s is not supported", unit)
}
return ByteQuantity{
Quantity: quantity,
}, nil
}
// FromTiB returns a byte quantity of the passed in tebibytes quantity
func FromTiB(tib uint64) ByteQuantity {
return ByteQuantity{
Quantity: tib * tbConvert,
}
}
// FromGiB returns a byte quantity of the passed in gibibytes quantity
func FromGiB(gib uint64) ByteQuantity {
return ByteQuantity{
Quantity: gib * gbConvert,
}
}
// FromMiB returns a byte quantity of the passed in mebibytes quantity
func FromMiB(mib uint64) ByteQuantity {
return ByteQuantity{
Quantity: mib,
}
}
// StringMiB returns a byte quantity in a mebibytes string representation
func (bq ByteQuantity) StringMiB() string {
return fmt.Sprintf("%.0f %s", bq.MiB(), mib)
}
// StringGiB returns a byte quantity in a gibibytes string representation
func (bq ByteQuantity) StringGiB() string {
return fmt.Sprintf("%.3f %s", bq.GiB(), gib)
}
// StringTiB returns a byte quantity in a tebibytes string representation
func (bq ByteQuantity) StringTiB() string {
return fmt.Sprintf("%.3f %s", bq.TiB(), tib)
}
// MiB returns a byte quantity in mebibytes
func (bq ByteQuantity) MiB() float64 {
return float64(bq.Quantity)
}
// GiB returns a byte quantity in gibibytes
func (bq ByteQuantity) GiB() float64 {
return float64(bq.Quantity) * 1 / gbConvert
}
// TiB returns a byte quantity in tebibytes
func (bq ByteQuantity) TiB() float64 {
return float64(bq.Quantity) * 1 / tbConvert
} | pkg/bytequantity/bytequantity.go | 0.839471 | 0.42173 | bytequantity.go | starcoder |
package continuous
import (
"fmt"
"math"
)
// Distance calculates the distance between to vectors,
// given the set of indices
func Distance(a, b []float64, indices []int) float64 {
d := 0.0
for _, v := range indices {
d += (a[v] - b[v]) * (a[v] - b[v])
}
return math.Sqrt(d)
}
// Harmonic calculates the harmonic according to
// <NAME>, <NAME>, and <NAME>.
// Estimating mutual information. Phys. Rev. E, 69:066138, Jun 2004.
func Harmonic(n int) (r float64) {
if n == 0 {
return
}
r = -0.5772156649
if n > 0 {
for i := 2.0; i <= float64(n); i++ {
r -= 1.0 / i
}
}
return
}
// Normalise can be used to normalise the data before passing it
// to FrenzelPompe or KraskovStoegbauerGrassberger1/2.
// This function calls NormaliseByDomain
func Normalise(data [][]float64, verbose bool) ([][]float64, []float64, []float64) {
min := make([]float64, len(data[0]), len(data[0]))
max := make([]float64, len(data[0]), len(data[0]))
for column := range data[0] {
min[column] = data[0][column]
max[column] = data[0][column]
}
for column := range data[0] {
for row := range data {
if min[column] > data[row][column] {
min[column] = data[row][column]
}
if max[column] < data[row][column] {
max[column] = data[row][column]
}
}
}
return NormaliseByDomain(data, min, max, verbose), min, max
}
// NormaliseByDomain can be used to normalise the data before passing it
// to FrenzelPompe or KraskovStoegbauerGrassberger1/2
// It takes the data and the minimum and maximum values per column
func NormaliseByDomain(data [][]float64, min, max []float64, verbose bool) [][]float64 {
if verbose == true {
minStr := ""
maxStr := ""
for i := range min {
minStr = fmt.Sprintf("%s %f", minStr, min[i])
maxStr = fmt.Sprintf("%s %f", maxStr, max[i])
}
}
r := make([][]float64, len(data), len(data))
for row := range data {
r[row] = make([]float64, len(data[0]), len(data[0]))
for column := range data[0] {
if math.Abs(min[column]-max[column]) > 0.000001 {
value := data[row][column]
if value > max[column] {
value = max[column]
}
if value < min[column] {
value = min[column]
}
r[row][column] = (value - min[column]) / (max[column] - min[column])
}
}
}
return r
} | continuous/Functions.go | 0.7413 | 0.474327 | Functions.go | starcoder |
package haversine
import (
"math"
)
const (
EarthRadiusMi = 3958 // radius of the earth in miles.
EarthRadiusKm = 6371 // radius of the earth in kilometers.
EarthRadiusNM = 3440 // radius of the earth in nautical miles
)
// Coord represents a lat/long geographic coordinate, usually in degrees +EN/-WS.
type Coord struct {
Lat float64
Lon float64
}
// Degrees converts a radians-based Coord to a degrees-based Coord
func (c *Coord) Degrees() Coord {
return Coord{c.Lat * 180 / math.Pi, c.Lon * 180 / math.Pi}
}
// Radians converts a degrees-based Coord to a radians-based Coord
func (c *Coord) Radians() Coord {
return Coord{c.Lat * math.Pi / 180, c.Lon * math.Pi / 180}
}
// NmToMi converts from nautical miles to statute miles.
func NmToMi(d float64) float64 {
return d * EarthRadiusMi / EarthRadiusNM
}
// NmToKm converts from nautical miles to kilometers.
func NmToKm(d float64) float64 {
return d * EarthRadiusKm / EarthRadiusNM
}
// KmToNm converts from kilometers to nautical miles.
func KmToNm(d float64) float64 {
return d * EarthRadiusNM / EarthRadiusKm
}
// IntAgle calculates the internal angle between two coordinates on a surface
// and returns the result in radians
func IntAngle(p, q Coord) float64 {
point1 := p.Radians()
point2 := q.Radians()
diffLat := point2.Lat - point1.Lat
diffLon := point2.Lon - point1.Lon
a := math.Pow(math.Sin(diffLat/2), 2) + math.Cos(point1.Lat)*math.Cos(point2.Lat)*
math.Pow(math.Sin(diffLon/2), 2)
return 2 * math.Atan2(math.Sqrt(a), math.Sqrt(1-a))
}
// Distance calculates the shortest (aka great circle) arc between two coordinates on a sphere
// of a given radius and returns the resulting great circle arc length
func Distance(p, q Coord, radius float64) (gc float64) {
c := IntAngle(p, q)
return c * radius
}
// DistanceMi calculates the shortest path between two coordinates on the surface
// of the Earth and returns the result in statute miles.
func DistanceMi(p, q Coord) (mi float64) {
return Distance(p, q, EarthRadiusMi)
}
// DistanceKm calculates the shortest path between two coordinates on the surface
// of the Earth and returns the result in kilometers.
func DistanceKm(p, q Coord) (km float64) {
return Distance(p, q, EarthRadiusKm)
}
// DistanceNM calculates the shortest path between two coordinates on the surface
// of the Earth and returns the result in nautical miles.
func DistanceNM(p, q Coord) (nm float64) {
return Distance(p, q, EarthRadiusNM)
} | haversine.go | 0.921746 | 0.673739 | haversine.go | starcoder |
package bitfield
import (
"encoding/binary"
"math/bits"
)
var _ = Bitfield(Bitvector256{})
// Bitvector256 is a bitfield with a fixed defined size of 256. There is no length bit
// present in the underlying byte array.
type Bitvector256 []byte
const bitvector256ByteSize = 32
const bitvector256BitSize = bitvector256ByteSize * 8
// NewBitvector256 creates a new bitvector of size 256.
func NewBitvector256() Bitvector256 {
byteArray := [bitvector256ByteSize]byte{}
return byteArray[:]
}
// BitAt returns the bit value at the given index. If the index requested
// exceeds the number of bits in the bitvector, then this method returns false.
func (b Bitvector256) BitAt(idx uint64) bool {
// Out of bounds, must be false.
if idx >= b.Len() || len(b) != bitvector256ByteSize {
return false
}
i := uint8(1 << (idx % 8))
return b[idx/8]&i == i
}
// SetBitAt will set the bit at the given index to the given value. If the index
// requested exceeds the number of bits in the bitvector, then this method returns
// false.
func (b Bitvector256) SetBitAt(idx uint64, val bool) {
// Out of bounds, do nothing.
if idx >= b.Len() || len(b) != bitvector256ByteSize {
return
}
bit := uint8(1 << (idx % 8))
if val {
b[idx/8] |= bit
} else {
b[idx/8] &^= bit
}
}
// Len returns the number of bits in the bitvector.
func (b Bitvector256) Len() uint64 {
return bitvector256BitSize
}
// Count returns the number of 1s in the bitvector.
func (b Bitvector256) Count() uint64 {
if len(b) == 0 {
return 0
}
c := 0
for i, bt := range b {
if i >= bitvector256ByteSize {
break
}
c += bits.OnesCount8(bt)
}
return uint64(c)
}
// Bytes returns the bytes data representing the Bitvector256.
func (b Bitvector256) Bytes() []byte {
if len(b) == 0 {
return []byte{}
}
ln := min(len(b), bitvector256ByteSize)
ret := make([]byte, ln)
copy(ret, b[:ln])
return ret[:]
}
// Shift bitvector by i. If i >= 0, perform left shift, otherwise right shift.
func (b Bitvector256) Shift(i int) {
if len(b) == 0 {
return
}
// Shifting greater than 256 bits is pointless and can have unexpected behavior.
if i > bitvector256BitSize {
i = bitvector256BitSize
} else if i < -bitvector256BitSize {
i = -bitvector256BitSize
}
if i >= 0 {
num := binary.BigEndian.Uint64(b)
num <<= uint8(i)
binary.BigEndian.PutUint64(b, num)
} else {
num := binary.BigEndian.Uint64(b)
num >>= uint8(i * -1)
binary.BigEndian.PutUint64(b, num)
}
}
// BitIndices returns the list of indices that are set to 1.
func (b Bitvector256) BitIndices() []int {
indices := make([]int, 0, bitvector256BitSize)
for i, bt := range b {
if i >= bitvector256ByteSize {
break
}
for j := 0; j < 8; j++ {
bit := byte(1 << uint(j))
if bt&bit == bit {
indices = append(indices, i*8+j)
}
}
}
return indices
} | bitvector256.go | 0.789193 | 0.468183 | bitvector256.go | starcoder |
package scaling
import (
"github.com/wieku/danser-go/framework/math/vector"
)
type Scaling int
const (
// The source is not scaled.
None = Scaling(iota)
// Scales the source to fit the target while keeping the same aspect ratio. This may cause the source to be smaller than the
// target in one direction.
Fit
// Scales the source to fill the target while keeping the same aspect ratio. This may cause the source to be larger than the
// target in one direction.
Fill
// Scales the source to fill the target in the x direction while keeping the same aspect ratio. This may cause the source to be
// smaller or larger than the target in the y direction.
FillX
// Scales the source to fill the target in the y direction while keeping the same aspect ratio. This may cause the source to be
// smaller or larger than the target in the x direction.
FillY
// Scales the source to fill the target. This may cause the source to not keep the same aspect ratio.
Stretch
// Scales the source to fill the target in the x direction, without changing the y direction. This may cause the source to not
// keep the same aspect ratio.
StretchX
// Scales the source to fill the target in the y direction, without changing the x direction. This may cause the source to not
// keep the same aspect ratio.
StretchY
)
// Returns the size of the source scaled to the target. Note the same Vector2 instance is always returned and should never be
// cached.
func (s Scaling) Apply(sourceX, sourceY, targetX, targetY float32) vector.Vector2f {
var res vector.Vector2f
switch s {
case Fit:
targetRatio := targetY / targetX
sourceRatio := sourceY / sourceX
scale := targetY / sourceY
if targetRatio > sourceRatio {
scale = targetX / sourceX
}
res.X = sourceX * scale
res.Y = sourceY * scale
case Fill:
targetRatio := targetY / targetX
sourceRatio := sourceY / sourceX
scale := targetX / sourceX
if targetRatio > sourceRatio {
scale = targetY / sourceY
}
res.X = sourceX * scale
res.Y = sourceY * scale
case FillX:
scale := targetX / sourceX
res.X = sourceX * scale
res.Y = sourceY * scale
case FillY:
scale := targetY / sourceY
res.X = sourceX * scale
res.Y = sourceY * scale
case Stretch:
res.X = targetX
res.Y = targetY
case StretchX:
res.X = targetX
res.Y = sourceY
case StretchY:
res.X = sourceX
res.Y = targetY
default:
res.X = sourceX
res.Y = sourceY
}
return res
} | framework/math/scaling/scaling.go | 0.786705 | 0.534066 | scaling.go | starcoder |
package parseg
import (
"io"
"github.com/ajiyoshi-vg/parseg/stream"
)
type Parser[T any] interface {
Parse(stream.Stream) (*T, int, error)
TryParser() Parser[T]
IntoFunc() ParserFunc[T]
}
var (
_ Parser[int] = (ParserFunc[int])(nil)
_ Parser[int] = (*ParserFunc[int])(nil)
)
type ParserFunc[T any] func(stream.Stream) (*T, int, error)
func (f ParserFunc[T]) Parse(r stream.Stream) (*T, int, error) {
return f(r)
}
func (f ParserFunc[T]) TryParser() Parser[T] {
return TryParser[T](f)
}
func (f ParserFunc[T]) IntoFunc() ParserFunc[T] {
return f
}
func (f ParserFunc[T]) ID() Parser[T] {
return f
}
func IntoFunc[T any](p Parser[T]) ParserFunc[T] {
return func(r stream.Stream) (*T, int, error) {
return p.Parse(r)
}
}
func Lazy[T any](t *T) T {
return *t
}
func Map[T, S any](p Parser[T], f func(T) S) Parser[S] {
return ParserFunc[S](func(r stream.Stream) (*S, int, error) {
x, n, err := p.Parse(r)
if isError(err) {
return nil, n, err
}
if x == nil {
return nil, n, nil
}
return Ptr(f(*x)), n, nil
})
}
func Apply[T, S any](p Parser[T], f func(T) (S, error)) Parser[S] {
return ParserFunc[S](func(r stream.Stream) (*S, int, error) {
x, n, err := p.Parse(r)
if isError(err) {
return nil, n, err
}
if x == nil {
return nil, n, nil
}
ret, err := f(*x)
if isError(err) {
return nil, n, err
}
return Ptr(ret), n, nil
})
}
func SequenceOf[T any](ps []Parser[T]) Parser[[]T] {
return ParserFunc[[]T](func(r stream.Stream) (*[]T, int, error) {
var parsed []T
nRead := 0
for _, p := range ps {
x, n, err := p.Parse(r)
nRead += n
if isError(err) {
return nil, nRead, err
}
if x == nil {
return nil, nRead, nil
}
parsed = append(parsed, *x)
}
return &parsed, nRead, nil
}).TryParser()
}
func Sequence[T any](ps ...Parser[T]) Parser[[]T] {
return SequenceOf(ps)
}
func TryParser[T any](p Parser[T]) Parser[T] {
return ParserFunc[T](func(r stream.Stream) (*T, int, error) {
parsed, n, err := p.Parse(r)
if isError(err) {
return nil, n, err
}
if parsed == nil {
_, err := r.Seek(int64(-n), io.SeekCurrent)
return nil, 0, err
}
return parsed, n, nil
})
}
func OneOf[T any](ps ...Parser[T]) Parser[T] {
return ParserFunc[T](func(r stream.Stream) (*T, int, error) {
for _, p := range ps {
x, n, err := p.Parse(r)
if isError(err) {
return nil, n, err
}
if x != nil {
return x, n, nil
}
}
return nil, 0, nil
})
}
func Cons[T any](car Parser[T], cdr Parser[[]T]) Parser[[]T] {
return ParserFunc[[]T](func(r stream.Stream) (*[]T, int, error) {
x, n, err := car.Parse(r)
if isError(err) || x == nil {
return nil, n, err
}
ret := []T{*x}
y, m, err := cdr.Parse(r)
n += m
if isError(err) || y == nil {
return Ptr(ret), n, err
}
return Ptr(append(ret, *y...)), n, nil
})
}
func Many[T any](p Parser[T]) Parser[[]T] {
return ParserFunc[[]T](func(r stream.Stream) (*[]T, int, error) {
var ret []T
n := 0
for {
x, num, err := p.Parse(r)
n += num
if isError(err) {
return nil, n, err
}
if x == nil {
return &ret, n, nil
}
ret = append(ret, *x)
}
})
}
func Many1[T any](p Parser[T]) Parser[[]T] {
return Cons(p, Many(p))
}
func Next[T, S any](a Parser[T], b Parser[S]) Parser[S] {
return ParserFunc[S](func(r stream.Stream) (*S, int, error) {
x, n, err := a.Parse(r)
if isError(err) {
return nil, n, err
}
if x == nil {
return nil, n, nil
}
ret, m, err := b.Parse(r)
n += m
if isError(err) {
return nil, n, err
}
return ret, n, nil
}).TryParser()
}
func Prev[T, S any](a Parser[T], b Parser[S]) Parser[T] {
return ParserFunc[T](func(r stream.Stream) (*T, int, error) {
ret, n, err := a.Parse(r)
if isError(err) {
return nil, n, err
}
if ret == nil {
return nil, n, nil
}
x, m, err := b.Parse(r)
n += m
if isError(err) {
return nil, n, err
}
if x == nil {
return nil, n, nil
}
return ret, n, nil
}).TryParser()
}
func Center[A, B, C any](a Parser[A], b Parser[B], c Parser[C]) Parser[B] {
return Prev(Next(a, b), c).TryParser()
}
func Ptr[T any](x T) *T {
return &x
}
func isError(err error) bool {
return err != nil && err != io.EOF
} | parser.go | 0.509764 | 0.433921 | parser.go | starcoder |
package metrics
import (
"bytes"
"fmt"
"sort"
"text/tabwriter"
)
// A ConfusionMatrix stores true positives (TP), true negatives (TN), false
// positives (FP) and false negatives (FN).
type ConfusionMatrix map[float64]map[float64]float64
// NClasses returns the number of classes in a ConfusionMatrix.
func (cm ConfusionMatrix) NClasses() int {
return len(cm)
}
// Classes returns a slice of classes included in a ConfusionMatrix. The
// result is ordered in ascending order.
func (cm ConfusionMatrix) Classes() []float64 {
var (
classes = make([]float64, len(cm))
i int
)
for class := range cm {
classes[i] = class
i++
}
sort.Float64s(classes)
return classes
}
// TruePositives returns the number of times a class was correctly predicted.
func (cm ConfusionMatrix) TruePositives(class float64) float64 {
if _, ok := cm[class]; !ok {
return 0
}
return cm[class][class]
}
// FalsePositives returns the number of times a class was wrongly predicted.
func (cm ConfusionMatrix) FalsePositives(class float64) float64 {
if _, ok := cm[class]; !ok {
return 0
}
var FP float64
for tc := range cm {
if tc != class {
FP += cm[tc][class]
}
}
return FP
}
// FalseNegatives returns the number of times a class was wrongly not predicted.
func (cm ConfusionMatrix) FalseNegatives(class float64) float64 {
if _, ok := cm[class]; !ok {
return 0
}
var FN float64
for pc := range cm[class] {
if pc != class {
FN += cm[class][pc]
}
}
return FN
}
// TrueNegatives returns the number of times a class was correctly not
// predicted.
func (cm ConfusionMatrix) TrueNegatives(class float64) float64 {
if _, ok := cm[class]; !ok {
return 0
}
var TN float64
for tc := range cm {
if tc != class {
for pc := range cm[tc] {
if pc != class {
TN += cm[tc][pc]
}
}
}
}
return TN
}
// String returns a string that can easily be read by a human in a terminal.
func (cm ConfusionMatrix) String() string {
var (
buffer bytes.Buffer
w = tabwriter.NewWriter(&buffer, 0, 8, 0, '\t', 0)
)
var classes = cm.Classes()
// Display one column for each predicted class
for _, class := range classes {
fmt.Fprint(w, fmt.Sprintf("\tPredicted %0.f", class))
}
fmt.Fprint(w, "\t\n")
// Display one row for each true class
for i, tc := range classes {
fmt.Fprintf(w, "True %0.f", tc)
for _, pc := range classes {
fmt.Fprintf(w, "\t%0.f", cm[tc][pc])
}
// Only add a carriage return if the current class is not the last one
if i != len(classes)-1 {
fmt.Fprint(w, "\t\n")
} else {
fmt.Fprint(w, "\t")
}
}
w.Flush()
return buffer.String()
}
// MakeConfusionMatrix returns a ConfusionMatrix from a slice of true classes
// and another slice of predicted classes.
func MakeConfusionMatrix(yTrue, yPred, weights []float64) (ConfusionMatrix, error) {
if len(yTrue) != len(yPred) {
return nil, &errMismatchedLengths{len(yTrue), len(yPred)}
}
if weights != nil && len(yTrue) != len(weights) {
return nil, &errMismatchedLengths{len(yTrue), len(weights)}
}
var cm = make(ConfusionMatrix)
if weights != nil {
for i, yt := range yTrue {
if _, ok := cm[yt]; ok {
cm[yt][yPred[i]] += weights[i]
} else {
cm[yt] = make(map[float64]float64)
cm[yt][yPred[i]] = weights[i]
}
}
return cm, nil
}
for i, yt := range yTrue {
if _, ok := cm[yt]; ok {
cm[yt][yPred[i]]++
} else {
cm[yt] = make(map[float64]float64)
cm[yt][yPred[i]] = 1
}
}
return cm, nil
} | metrics/confusion_matrix.go | 0.776538 | 0.428831 | confusion_matrix.go | starcoder |
package unit
import (
"fmt"
"strings"
)
// DataSizeUnit defines available units used for specifying expected storage size, expected upload size, and expected download size
var DataSizeUnit = []string{"kb", "mb", "gb", "tb", "kib", "mib", "gib", "tib"}
var DataSizeMultiplier = map[string]uint64{
"kb": 1e3,
"mb": 1e6,
"gb": 1e9,
"tb": 1e12,
"kib": 1 << 10,
"mib": 1 << 20,
"gib": 1 << 30,
"tib": 1 << 40,
}
// ParseStorage will convert the string with the unit into uint64 in the unit of byte
func ParseStorage(str string) (parsed uint64, err error) {
// string format
str = strings.Replace(str, " ", "", -1)
str = strings.ToLower(str)
// convert the data size into bytes
for unit, multiplier := range DataSizeMultiplier {
if strings.HasSuffix(str, unit) {
return ParseUint64(str, multiplier, unit)
}
}
if strings.HasSuffix(str, "b") {
return ParseUint64(str, 1, "b")
}
err = fmt.Errorf("data provided does not have valid unit: %s. valid units are: %v",
str, DataSizeUnit)
return
}
// FormatStorage is used to format the data for console display purpose
func FormatStorage(dataSize uint64, storage bool) (formatted string) {
additionalInfo := ""
if !storage {
additionalInfo = "/block"
}
switch {
case dataSize%DataSizeMultiplier["tib"] == 0:
formatted = fmt.Sprintf("%v TiB%s", dataSize/DataSizeMultiplier["tib"], additionalInfo)
return
case dataSize%DataSizeMultiplier["gib"] == 0:
formatted = fmt.Sprintf("%v GiB%s", dataSize/DataSizeMultiplier["gib"], additionalInfo)
return
case dataSize%DataSizeMultiplier["mib"] == 0:
formatted = fmt.Sprintf("%v MiB%s", dataSize/DataSizeMultiplier["mib"], additionalInfo)
return
case dataSize%DataSizeMultiplier["kib"] == 0:
formatted = fmt.Sprintf("%v KiB%s", dataSize/DataSizeMultiplier["kib"], additionalInfo)
return
case dataSize%DataSizeMultiplier["tb"] == 0:
formatted = fmt.Sprintf("%v TB%s", dataSize/DataSizeMultiplier["tb"], additionalInfo)
return
case dataSize%DataSizeMultiplier["gb"] == 0:
formatted = fmt.Sprintf("%v GB%s", dataSize/DataSizeMultiplier["gb"], additionalInfo)
return
case dataSize%DataSizeMultiplier["mb"] == 0:
formatted = fmt.Sprintf("%v MB%s", dataSize/DataSizeMultiplier["mb"], additionalInfo)
return
case dataSize%DataSizeMultiplier["kb"] == 0:
formatted = fmt.Sprintf("%v KB%s", dataSize/DataSizeMultiplier["kb"], additionalInfo)
return
default:
formatted = fmt.Sprintf("%v B%s", dataSize, additionalInfo)
return
}
} | common/unit/storage.go | 0.580233 | 0.478955 | storage.go | starcoder |
package assert
import (
"errors"
"testing"
)
// Equal calls t.Fatalf if result != expected.
func Equal[T comparable](t testing.TB, result, expected T) {
t.Helper()
if result != expected {
t.Fatalf("%v != %v", result, expected)
}
}
// EqualSlices calls t.Fatalf if result expected do not contain the same elements in the same order.
func EqualSlices[T comparable, TS ~[]T](t testing.TB, result, expected TS) {
t.Helper()
if resLen, expLen := len(result), len(expected); resLen != expLen {
t.Fatalf("%v != %v: slices are not the same length: %d != %d", result, expected, resLen, expLen)
}
for i := 0; i < len(expected)-1; i++ {
if res, exp := result[i], expected[i]; res != exp {
t.Fatalf("%v != %v: unequal elements at index %d: %v != %v", result, expected, i, res, exp)
}
}
}
// EqualMaps calls t.Fatalf if result expected do not contain the same elements.
func EqualMaps[K, V comparable](t testing.TB, result, expected map[K]V) {
t.Helper()
if resLen, expLen := len(result), len(expected); resLen != expLen {
t.Fatalf("%v != %v: maps are not the same length: %d != %d", result, expected, resLen, expLen)
}
for expKey, expVal := range expected {
resVal, ok := result[expKey]
if !ok {
t.Fatalf("%v != %v: result did not contain key %v", result, expected, expKey)
}
if expVal != resVal {
t.Fatalf("%v != %v: unequal elements at key %v: %v != %v", result, expected, expKey, resVal, expVal)
}
}
}
// A Comparable can be compared to other instances of the same type.
type Comparable[T any] interface {
// Equal should return true if t is equal to the receiver.
Equal(t T) bool
}
// EqualC calls t.Fatalf if result != expected.
func EqualC[T any](t testing.TB, result Comparable[T], expected T) {
t.Helper()
if !result.Equal(expected) {
t.Fatalf("%v != %v", result, expected)
}
}
// Contains calls t.Fatalf if any value v is not present in s.
func Contains[T comparable](t testing.TB, s []T, v ...T) {
t.Helper()
for _, expected := range v {
var found bool
for _, actual := range s {
if expected == actual {
found = true
break
}
}
if !found {
t.Fatalf("%v not present in %v", expected, s)
}
}
}
// ContainsKeys calls t.Fatalf if any of the specified keys are not present in m.
func ContainsKeys[T comparable, A any](t testing.TB, m map[T]A, keys ...T) {
t.Helper()
for _, key := range keys {
if _, ok := m[key]; !ok {
t.Fatalf("key %v not present in %v", key, m)
}
}
}
// ContainsVals calls t.Fatalf if any of the specified vals are not present in m.
func ContainsVals[A, T comparable](t testing.TB, m map[A]T, vals ...T) {
t.Helper()
for _, expected := range vals {
var found bool
for _, actual := range m {
if expected == actual {
found = true
break
}
}
if !found {
t.Fatalf("val %v not present in %v", expected, m)
}
}
}
// Error calls t.Fatalf if err is nil.
func Error(t testing.TB, err error) {
t.Helper()
if err == nil {
t.Fatalf("error is nil")
}
}
// NilError calls t.Fatalf if err is not nil.
func NilError(t testing.TB, err error) {
t.Helper()
if err != nil {
t.Fatalf("error is not nil: %v", err)
}
}
// ErrorsIs calls t.Fatalf if errors.Is(err, target) fails.
func ErrorIs(t testing.TB, err, target error) {
t.Helper()
if !errors.Is(err, target) {
t.Fatalf("error.Is check failed for %v (target: %v)", err, target)
}
}
// ErrorsAs calls t.Fatalf if errors.As(err, target) fails.
func ErrorAs(t testing.TB, err error, target any) {
t.Helper()
if !errors.As(err, target) {
t.Fatalf("error.As check failed for %v (target: %v)", err, target)
}
} | assert/assert.go | 0.63273 | 0.691797 | assert.go | starcoder |
package walker
import (
"fmt"
"strings"
"github.com/beefsack/go-astar"
)
// Graph extends a dijkstra's graph based on the schema.
type Graph struct {
schema *Schema
pathers map[Vertex]*pather
}
// NewGraph prepares a graph from a schema.
func NewGraph(s *Schema) *Graph {
var g Graph
g.pathers = make(map[Vertex]*pather)
g.schema = s
for _, v := range s.Verticies {
g.pathers[v] = &pather{Vertex: v, Distances: make(map[astar.Pather]uint)}
}
for _, p := range g.pathers {
p.Neighbors = make([]astar.Pather, 0, len(s.Arcs))
for _, a := range s.Arcs {
var v Vertex
switch p.Vertex {
case a.Source:
v = a.Destination
case a.Destination:
v = a.Source
default:
continue
}
p.Neighbors = append(p.Neighbors, g.pathers[v])
p.Distances[g.pathers[v]] = a.Weight
}
}
return &g
}
// shortest returns the shortest path between from and to.
func (g *Graph) shortest(from, to string) ([]string, error) {
if from == to || to == "" {
return []string{from}, nil
}
f, t := g.pathers[from], g.pathers[to]
if f == nil || t == nil {
return nil, &PathError{Src: from, Dst: to}
}
pathers, _, ok := astar.Path(t, f)
if !ok {
return nil, &PathError{Src: from, Dst: to}
}
path := make([]string, 0, len(pathers))
for _, pa := range pathers {
p := pa.(*pather)
path = append(path, p.Vertex)
}
return path, nil
}
// drawPath links and aliases the elements of the path.
func (g *Graph) drawPath(path []string) []string {
table := path[0]
if alias, ok := g.schema.aliases[table]; ok {
table = alias + " AS " + table
}
out := make([]string, 0, len(path))
out = append(out, table)
for i := 0; i < len(path)-1; i++ {
v1, v2 := path[i], path[i+1]
for _, a := range g.schema.Arcs {
var table string
if a.Source == v1 && a.Destination == v2 {
table = a.Destination
}
if a.Destination == v1 && a.Source == v2 {
table = a.Source
}
if table != "" {
if alias, ok := g.schema.aliases[v2]; ok {
table = alias + " AS " + table
}
out = append(out, fmt.Sprintf("JOIN %s ON %s", table, a.Link))
}
}
}
return out
}
// From builds the FROM ... JOIN clause of the query to link the src with all dst.
func (g *Graph) From(src string, dst ...string) (string, error) {
out := make([]string, 0, 2*len(dst)+1)
indexes := make(map[string]struct{})
if len(dst) == 0 {
dst = append(dst, src)
}
for _, to := range dst {
path, err := g.shortest(src, to)
if err != nil {
return "", err
}
path = g.drawPath(path)
for _, p := range path {
if _, ok := indexes[p]; !ok {
indexes[p] = struct{}{}
out = append(out, p)
}
}
}
return strings.Join(out, " "), nil
} | graph.go | 0.607197 | 0.429549 | graph.go | starcoder |
package extraction
import (
"image"
"log"
"math"
"github.com/alevinval/fingerprints/src/matrix"
"github.com/alevinval/fingerprints/src/types"
)
// Frame detects the boundaries of the fingerprint and establishes
// a reference point and the angle of such reference point.
func Frame(binarizedSegmented *matrix.M) types.Frame {
h := findHorizontalAxis(binarizedSegmented, false)
v := findVerticalAxis(binarizedSegmented, false)
d := image.Rect(h.Min.X, v.Min.Y, h.Max.X, v.Max.Y)
hx, hy := halfPoint(h)
vx, vy := halfPoint(v)
angle := math.Sin(float64(hx-vx) / float64(hy-vy))
log.Printf("frame angle: %f", angle*180/math.Pi)
return types.Frame{Horizontal: h, Vertical: v, Diagonal: d, Angle: angle}
}
type axis byte
const (
xAxis axis = iota
yAxis
)
func findVerticalAxis(binarizedSegmented *matrix.M, isReversed bool) image.Rectangle {
frame := findAxis(binarizedSegmented, xAxis, yAxis, isReversed)
log.Printf("vertical frame: %s", frame)
return frame
}
func findHorizontalAxis(binarizedSegmented *matrix.M, isReversed bool) image.Rectangle {
frame := findAxis(binarizedSegmented, yAxis, xAxis, false)
log.Printf("horizontal frame: %s", frame)
return frame
}
func findAxis(in *matrix.M, firstAxis, secondAxis axis, isReversed bool) image.Rectangle {
bounds := in.Bounds()
longestY := 0
a0, b0, b1 := 0, 0, 0
providePoints(bounds, firstAxis, isReversed, func(a int) {
c := 0
_b0 := 0
providePoints(bounds, secondAxis, false, func(b int) {
var v float64
if firstAxis == xAxis {
v = in.At(a, b)
} else {
v = in.At(b, a)
}
if c == 0 && v < 125 {
// do nothing
} else if v > 125 && _b0 == 0 {
_b0 = b
c++
} else if v > 125 {
c++
} else {
if c > longestY {
longestY = c
a0 = a
b0 = _b0
b1 = b
}
}
})
})
var frame image.Rectangle
if firstAxis == xAxis {
frame = image.Rect(a0, b0, a0, b1)
} else {
frame = image.Rect(b0, a0, b1, a0)
}
return frame
}
func providePoints(bounds image.Rectangle, ax axis, isReversed bool, f func(n int)) {
var ini, max int
if ax == xAxis {
ini = bounds.Min.X
max = bounds.Max.X
} else {
ini = bounds.Min.Y
max = bounds.Max.Y
}
if isReversed {
for n := max - 1; n >= ini; n-- {
f(n)
}
} else {
for n := ini; n < max; n++ {
f(n)
}
}
}
func mergeFrame(a, b types.Frame) types.Frame {
return types.Frame{
Horizontal: halfPointAB(a.Horizontal, b.Horizontal),
Vertical: halfPointAB(a.Vertical, b.Vertical),
}
}
func halfPointAB(a, b image.Rectangle) image.Rectangle {
return image.Rect(
int((a.Min.X+b.Min.X)/2),
int((a.Min.Y+b.Min.Y)/2),
int((a.Max.X+b.Max.X)/2),
int((a.Max.Y+b.Max.Y)/2),
)
}
func halfPoint(r image.Rectangle) (int, int) {
return (r.Max.X + r.Min.X) / 2, (r.Max.Y + r.Min.Y) / 2
} | src/extraction/frame.go | 0.760295 | 0.471953 | frame.go | starcoder |
package main
import (
"fmt"
"math"
)
const threeSixty float64 = 360.0
const oneEighty float64 = 180.0
const radius float64 = 6378137.0
const webMercatorLatLimit float64 = 85.05112877980659
type ErrTile struct {
X int `json:"x"`
Y int `json:"y"`
Z int `json:"z"`
Res string `json:"res"`
}
//Tile 自定义瓦片存储
type Tile struct {
X int
Y int
Z int
C []byte
}
type TileXyz struct {
X int
Y int
Z int
}
func (tile Tile) flipY() int {
return (1 << tile.Z) - tile.Y - 1
}
type LngLatBbox struct {
West float64 `json:"west"`
East float64 `json:"east"`
North float64 `json:"north"`
South float64 `json:"south"`
}
//Layer 级别&瓦片数
type TileOption struct {
URL string
Zoom int
Count int
Bound LngLatBbox
}
// Constants representing TileFormat types
const (
GZIP string = "gzip" // encoding = gzip
ZLIB = "zlib" // encoding = deflate
PNG = "png"
JPG = "jpg"
PBF = "pbf"
WEBP = "webp"
)
type GenerateTilesOptions struct {
Bounds *LngLatBbox
Zoom int
Consumer chan TileXyz
}
//LngLat holds a standard geographic coordinate pair in decimal degrees
type LngLat struct {
Lng, Lat float64
}
//LngLatBbox bounding box of a tile, in decimal degrees
// Intersects returns true if this bounding box intersects with the other bounding box.
func (b *LngLatBbox) Intersects(o *LngLatBbox) bool {
latOverlaps := (o.North > b.South) && (o.South < b.North)
lngOverlaps := (o.East > b.West) && (o.West < b.East)
return latOverlaps && lngOverlaps
}
//XY holds a Spherical Mercator point
type XY struct {
X, Y float64
}
func deg2rad(deg float64) float64 {
return deg * (math.Pi / oneEighty)
}
func rad2deg(rad float64) float64 {
return rad * (oneEighty / math.Pi)
}
func min(a int, b int) int {
if a < b {
return a
}
return b
}
// GetTile returns a tile for a given longitude latitude and zoom level
func GetTile(lng float64, lat float64, zoom int) *TileXyz {
latRad := deg2rad(lat)
n := math.Pow(2.0, float64(zoom))
x := int(math.Floor((lng + oneEighty) / threeSixty * n))
y := int(math.Floor((1.0 - math.Log(math.Tan(latRad)+(1.0/math.Cos(latRad)))/math.Pi) / 2.0 * n))
return &TileXyz{x, y, zoom}
}
func GetTileCount(bounds *LngLatBbox, zoom int) int {
var boxes []*LngLatBbox
if bounds.West > bounds.East {
boxes = []*LngLatBbox{
{-180.0, bounds.South, bounds.East, bounds.North},
{bounds.West, bounds.South, 180.0, bounds.North},
}
} else {
boxes = []*LngLatBbox{bounds}
}
var count int
for _, box := range boxes {
// Clamp the individual boxes to web mercator limits
clampedBox := &LngLatBbox{
West: math.Max(-180.0, box.West),
South: math.Max(-webMercatorLatLimit, box.South),
East: math.Min(180.0, box.East),
North: math.Min(webMercatorLatLimit, box.North),
}
ll := GetTile(clampedBox.West, clampedBox.South, zoom)
ur := GetTile(clampedBox.East, clampedBox.North, zoom)
llx := ll.X
if llx < 0 {
llx = 0
}
ury := ur.Y
if ury < 0 {
ury = 0
}
row := min(ur.X+1, 1<<zoom)
column := min(ll.Y+1, 1<<zoom)
count += (column - llx) * (row - ury)
}
return count
}
func GenerateTiles(opts *GenerateTilesOptions, stop chan int) {
bounds := opts.Bounds
zoom := opts.Zoom
consumer := opts.Consumer
complete := false
defer func() {
if complete {
close(consumer)
}
}()
var boxes []*LngLatBbox
if bounds.West > bounds.East {
boxes = []*LngLatBbox{
{-180.0, bounds.South, bounds.East, bounds.North},
{bounds.West, bounds.South, 180.0, bounds.North},
}
} else {
boxes = []*LngLatBbox{bounds}
}
for _, box := range boxes {
// Clamp the individual boxes to web mercator limits
clampedBox := &LngLatBbox{
West: math.Max(-180.0, box.West),
South: math.Max(-webMercatorLatLimit, box.South),
East: math.Min(180.0, box.East),
North: math.Min(webMercatorLatLimit, box.North),
}
ll := GetTile(clampedBox.West, clampedBox.South, zoom)
ur := GetTile(clampedBox.East, clampedBox.North, zoom)
llx := ll.X
if llx < 0 {
llx = 0
}
ury := ur.Y
if ury < 0 {
ury = 0
}
row := min(ur.X+1, 1<<zoom)
column := min(ll.Y+1, 1<<zoom)
for i := llx; i < column; i++ {
for j := ury; j < row; j++ {
x := i
y := j
select {
case <-stop:
complete = true
return
case consumer <- TileXyz{Z: zoom, X: x, Y: y}:
}
}
}
}
complete = true
}
// Equals compares 2 tiles
func (tile *TileXyz) Equals(t2 *Tile) bool {
return tile.X == t2.X && tile.Y == t2.Y && tile.Z == t2.Z
}
//Ul returns the upper left corner of the tile decimal degrees
func (tile *TileXyz) Ul() *LngLat {
n := math.Pow(2.0, float64(tile.Z))
lonDeg := float64(tile.X)/n*threeSixty - oneEighty
latRad := math.Atan(math.Sinh(math.Pi * (1 - (2 * float64(tile.Y) / n))))
latDeg := rad2deg(latRad)
return &LngLat{lonDeg, latDeg}
}
//Bounds returns a LngLatBbox for a given tile
func (tile *TileXyz) Bounds() *LngLatBbox {
a := tile.Ul()
shifted := TileXyz{tile.X + 1, tile.Y + 1, tile.Z}
b := shifted.Ul()
return &LngLatBbox{a.Lng, b.Lat, b.Lng, a.Lat}
}
func (tile *TileXyz) Parent() *TileXyz {
if tile.Z == 0 && tile.X == 0 && tile.Y == 0 {
return tile
}
if math.Mod(float64(tile.X), 2) == 0 && math.Mod(float64(tile.Y), 2) == 0 {
return &TileXyz{tile.X / 2, tile.Y / 2, tile.Z - 1}
}
if math.Mod(float64(tile.X), 2) == 0 {
return &TileXyz{tile.X / 2, (tile.Y - 1) / 2, tile.Z - 1}
}
if math.Mod(float64(tile.X), 2) != 0 && math.Mod(float64(tile.Y), 2) != 0 {
return &TileXyz{(tile.X - 1) / 2, (tile.Y - 1) / 2, tile.Z - 1}
}
if math.Mod(float64(tile.X), 2) != 0 && math.Mod(float64(tile.Y), 2) == 0 {
return &TileXyz{(tile.X - 1) / 2, tile.Y / 2, tile.Z - 1}
}
return nil
}
func (tile *TileXyz) Children() []*TileXyz {
kids := []*TileXyz{
{tile.X * 2, tile.Y * 2, tile.Z + 1},
{tile.X*2 + 1, tile.Y * 2, tile.Z + 1},
{tile.X*2 + 1, tile.Y*2 + 1, tile.Z + 1},
{tile.X * 2, tile.Y*2 + 1, tile.Z + 1},
}
return kids
}
// ToString returns a string representation of the tile.
func (tile *TileXyz) ToString() string {
return fmt.Sprintf("{%d/%d/%d}", tile.Z, tile.X, tile.Y)
}
//ToXY transforms WGS84 DD to Spherical Mercator meters
func ToXY(ll *LngLat) *XY {
x := radius * deg2rad(ll.Lng)
intrx := (math.Pi * 0.25) + (0.5 * deg2rad(ll.Lat))
y := radius * math.Log(math.Tan(intrx))
return &XY{x, y}
} | tilepack.go | 0.772015 | 0.403861 | tilepack.go | starcoder |
package main
import (
"math"
. "github.com/jakecoffman/cp"
"github.com/jakecoffman/cp/examples"
)
const (
FLUID_DENSITY = 0.00014
FLUID_DRAG = 2.0
)
func kScalarBody(body *Body, point, n Vector) float64 {
rcn := point.Sub(body.Position()).Cross(n)
return 1.0/body.Mass() + rcn*rcn/body.Moment()
}
func waterPreSolve(arb *Arbiter, space *Space, ptr interface{}) bool {
water, polyShape := arb.Shapes()
poly := polyShape.Class.(*PolyShape)
body := poly.Body()
// Get the top of the water sensor bounding box to use as the water level.
level := water.BB().T
// Clip the polygon against the water level
count := poly.Count()
var clippedCount int
clipped := make([]Vector, count + 1)
j := count - 1
for i := 0; i < count; i++ {
a := body.LocalToWorld(poly.Vert(j))
b := body.LocalToWorld(poly.Vert(i))
if a.Y < level {
clipped[clippedCount] = a
clippedCount++
}
aLevel := a.Y - level
bLevel := b.Y - level
if aLevel*bLevel < 0 {
t := math.Abs(aLevel) / (math.Abs(aLevel) + math.Abs(bLevel))
clipped[clippedCount] = a.Lerp(b, t)
clippedCount++
}
j = i
}
// Calculate buoyancy from the clipped polygon area
clippedArea := AreaForPoly(clippedCount, clipped, 0)
displacedMass := clippedArea * FLUID_DENSITY
centroid := CentroidForPoly(clippedCount, clipped)
examples.DrawPolygon(clippedCount, clipped, 0, FColor{0, 0, 1, 1}, FColor{0, 0, 1, 0.1})
examples.DrawDot(5, centroid, FColor{0, 0, 1, 1})
dt := space.TimeStep()
g := space.Gravity()
// Apply the buoyancy force as an impulse.
body.ApplyImpulseAtWorldPoint(g.Mult(-displacedMass*dt), centroid)
// Apply linear damping for the fluid drag.
vCentroid := body.VelocityAtWorldPoint(centroid)
k := kScalarBody(body, centroid, vCentroid.Normalize())
damping := clippedArea * FLUID_DRAG * FLUID_DENSITY
vCoef := math.Exp(-damping * dt * k) // linear drag
body.ApplyImpulseAtWorldPoint(vCentroid.Mult(vCoef).Sub(vCentroid).Mult(1.0/k), centroid)
// Apply angular damping for the fluid drag.
cog := body.LocalToWorld(body.CenterOfGravity())
wDamping := MomentForPoly(FLUID_DENSITY*FLUID_DRAG*clippedArea, clippedCount, clipped, cog.Neg(), 0)
body.SetAngularVelocity(body.AngularVelocity() * math.Exp(-wDamping*dt/body.Moment()))
return true
}
func main() {
space := NewSpace()
space.Iterations = 30
space.SetGravity(Vector{0, -500})
space.SleepTimeThreshold = 0.5
space.SetCollisionSlop(0.5)
walls := []Vector{
{-320, -240}, {-320, 240},
{320, -240}, {320, 240},
{-320, -240}, {320, -240},
{-320, 240}, {320, 240},
}
for i := 0; i < len(walls)-1; i += 2 {
shape := space.AddShape(NewSegment(space.StaticBody, walls[i], walls[i+1], 0))
shape.SetElasticity(1)
shape.SetFriction(1)
shape.SetFilter(examples.NotGrabbableFilter)
}
// add the edges of the bucket
{
bb := BB{-300, -200, 100, 0}
radius := 5.0
shape := space.AddShape(NewSegment(space.StaticBody, Vector{bb.L, bb.B}, Vector{bb.L, bb.T}, radius))
shape.SetElasticity(1)
shape.SetFriction(1)
shape.SetFilter(examples.NotGrabbableFilter)
shape = space.AddShape(NewSegment(space.StaticBody, Vector{bb.R, bb.B}, Vector{bb.R, bb.T}, radius))
shape.SetElasticity(1)
shape.SetFriction(1)
shape.SetFilter(examples.NotGrabbableFilter)
shape = space.AddShape(NewSegment(space.StaticBody, Vector{bb.L, bb.B}, Vector{bb.R, bb.B}, radius))
shape.SetElasticity(1)
shape.SetFriction(1)
shape.SetFilter(examples.NotGrabbableFilter)
// Add the sensor for the water.
shape = space.AddShape(NewBox2(space.StaticBody, bb, 0))
shape.SetSensor(true)
shape.SetCollisionType(1)
}
{
width := 200.0
height := 50.0
mass := 0.3 * FLUID_DENSITY * width * height
moment := MomentForBox(mass, width, height)
body := space.AddBody(NewBody(mass, moment))
body.SetPosition(Vector{-50, -100})
body.SetVelocity(0, -100)
body.SetAngularVelocity(1)
shape := space.AddShape(NewBox(body, width, height, 0))
shape.SetFriction(0.8)
}
{
width := 40.0
height := width * 2
mass := 0.3 * FLUID_DENSITY * width * height
moment := MomentForBox(mass, width, height)
body := space.AddBody(NewBody(mass, moment))
body.SetPosition(Vector{-200, -50})
body.SetVelocity(0, -100)
body.SetAngularVelocity(1)
shape := space.AddShape(NewBox(body, width, height, 0))
shape.SetFriction(0.8)
}
handler := space.NewCollisionHandler(1, 0)
handler.PreSolveFunc = waterPreSolve
examples.Main(space, 1.0/180.0, update, examples.DefaultDraw)
}
func update(space *Space, dt float64) {
space.Step(dt)
} | examples/buoyancy/buoyancy.go | 0.758242 | 0.530784 | buoyancy.go | starcoder |
package cryptypes
import "database/sql/driver"
// EncryptedByte supports encrypting Byte data
type EncryptedByte struct {
Field
Raw byte
}
// Scan converts the value from the DB into a usable EncryptedByte value
func (s *EncryptedByte) Scan(value interface{}) error {
return decrypt(value.([]byte), &s.Raw)
}
// Value converts an initialized EncryptedByte value into a value that can safely be stored in the DB
func (s EncryptedByte) Value() (driver.Value, error) {
return encrypt(s.Raw)
}
// NullEncryptedByte supports encrypting nullable Byte data
type NullEncryptedByte struct {
Field
Raw byte
Empty bool
}
// Scan converts the value from the DB into a usable NullEncryptedByte value
func (s *NullEncryptedByte) Scan(value interface{}) error {
if value == nil {
s.Raw = 0
s.Empty = true
return nil
}
return decrypt(value.([]byte), &s.Raw)
}
// Value converts an initialized NullEncryptedByte value into a value that can safely be stored in the DB
func (s NullEncryptedByte) Value() (driver.Value, error) {
if s.Empty {
return nil, nil
}
return encrypt(s.Raw)
}
// SignedByte supports signing Byte data
type SignedByte struct {
Field
Raw byte
Valid bool
}
// Scan converts the value from the DB into a usable SignedByte value
func (s *SignedByte) Scan(value interface{}) (err error) {
s.Valid, err = verify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized SignedByte value into a value that can safely be stored in the DB
func (s SignedByte) Value() (driver.Value, error) {
return sign(s.Raw)
}
// NullSignedByte supports signing nullable Byte data
type NullSignedByte struct {
Field
Raw byte
Empty bool
Valid bool
}
// Scan converts the value from the DB into a usable NullSignedByte value
func (s *NullSignedByte) Scan(value interface{}) (err error) {
if value == nil {
s.Raw = 0
s.Empty = true
s.Valid = true
return nil
}
s.Valid, err = verify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized NullSignedByte value into a value that can safely be stored in the DB
func (s NullSignedByte) Value() (driver.Value, error) {
if s.Empty {
return nil, nil
}
return sign(s.Raw)
}
// SignedEncryptedByte supports signing and encrypting Byte data
type SignedEncryptedByte struct {
Field
Raw byte
Valid bool
}
// Scan converts the value from the DB into a usable SignedEncryptedByte value
func (s *SignedEncryptedByte) Scan(value interface{}) (err error) {
s.Valid, err = decryptVerify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized SignedEncryptedByte value into a value that can safely be stored in the DB
func (s SignedEncryptedByte) Value() (driver.Value, error) {
return encryptSign(s.Raw)
}
// NullSignedEncryptedByte supports signing and encrypting nullable Byte data
type NullSignedEncryptedByte struct {
Field
Raw byte
Empty bool
Valid bool
}
// Scan converts the value from the DB into a usable NullSignedEncryptedByte value
func (s *NullSignedEncryptedByte) Scan(value interface{}) (err error) {
if value == nil {
s.Raw = 0
s.Empty = true
s.Valid = true
return nil
}
s.Valid, err = decryptVerify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized NullSignedEncryptedByte value into a value that can safely be stored in the DB
func (s NullSignedEncryptedByte) Value() (driver.Value, error) {
if s.Empty {
return nil, nil
}
return encryptSign(s.Raw)
} | cryptypes/type_byte.go | 0.824956 | 0.625867 | type_byte.go | starcoder |
package advent
import (
"bufio"
"fmt"
"log"
"os"
"strconv"
"strings"
)
// Parse a line with format 1-2 a: abcde
// Returns the two integers, the single letter and the string
func parsePasswordLine(line string) (first int, second int, target string, password string, err error) {
line = strings.Replace(line, ":", "", 1)
splitLine := strings.Split(line, " ")
first, err = strconv.Atoi(strings.Split(splitLine[0], "-")[0])
if err != nil { return 0, 0, "", "", err }
second, err = strconv.Atoi(strings.Split(splitLine[0], "-")[1])
if err != nil { return 0, 0, "", "", err }
target = splitLine[1]
password = splitLine[2]
return
}
// Verify that the number of occurrences of the target letter is between the maximum
// and minimum.
func checkCountAllowed(password string, target string, min int, max int) (allowed bool) {
occurrences := strings.Count(password, target)
allowed = min <= occurrences && max >= occurrences
return
}
// Check that the target value is present at only one of the two provided indices
func checkPositionAllowed(password string, target string, first int, second int) (allowed bool) {
firstAllowed := string(password[first - 1]) == target
secondAllowed := string(password[second - 1]) == target
allowed = (firstAllowed || secondAllowed) && !(firstAllowed && secondAllowed)
return
}
// Find the valid passwords stored in the file using the two criteria above
func findValidPasswords(inputFile string) {
file, err := os.Open(inputFile)
if err != nil {
log.Fatal(err)
}
defer file.Close()
scanner := bufio.NewScanner(file)
nAllowed1 := 0
nAllowed2 := 0
for scanner.Scan() {
first, second, target, password, err := parsePasswordLine(scanner.Text())
if err != nil { fmt.Println(err) }
if checkCountAllowed(password, target, first, second) {
nAllowed1 += 1
}
if checkPositionAllowed(password, target, first, second) {
nAllowed2 += 1
}
}
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
fmt.Println("Part 1:", nAllowed1)
fmt.Println("Part 2:", nAllowed2)
}
/*
--- Day 2: Password Philosophy ---
Your flight departs in a few days from the coastal airport; the easiest way down to the coast from here is via toboggan.
The shopkeeper at the North Pole Toboggan Rental Shop is having a bad day. "Something's wrong with our computers; we can't log in!" You ask if you can take a look.
Their password database seems to be a little corrupted: some of the passwords wouldn't have been allowed by the Official Toboggan Corporate Policy that was in effect when they were chosen.
To try to debug the problem, they have created a list (your puzzle input) of passwords (according to the corrupted database) and the corporate policy when that password was set.
For example, suppose you have the following list:
1-3 a: abcde
1-3 b: cdefg
2-9 c: ccccccccc
Each line gives the password policy and then the password. The password policy indicates the lowest and highest number of times a given letter must appear for the password to be valid. For example, 1-3 a means that the password must contain a at least 1 time and at most 3 times.
In the above example, 2 passwords are valid. The middle password, cdefg, is not; it contains no instances of b, but needs at least 1. The first and third passwords are valid: they contain one a or nine c, both within the limits of their respective policies.
How many passwords are valid according to their policies?
--- Part Two ---
While it appears you validated the passwords correctly, they don't seem to be what the Official Toboggan Corporate Authentication System is expecting.
The shopkeeper suddenly realizes that he just accidentally explained the password policy rules from his old job at the sled rental place down the street! The Official Toboggan Corporate Policy actually works a little differently.
Each policy actually describes two positions in the password, where 1 means the first character, 2 means the second character, and so on. (Be careful; Toboggan Corporate Policies have no concept of "index zero"!) Exactly one of these positions must contain the given letter. Other occurrences of the letter are irrelevant for the purposes of policy enforcement.
Given the same example list from above:
1-3 a: abcde is valid: position 1 contains a and position 3 does not.
1-3 b: cdefg is invalid: neither position 1 nor position 3 contains b.
2-9 c: ccccccccc is invalid: both position 2 and position 9 contain c.
How many passwords are valid according to the new interpretation of the policies?
*/
func Day2() {
fmt.Println("Test")
findValidPasswords("inputs/day2_test.txt")
fmt.Println("Main")
findValidPasswords("inputs/day2.txt")
} | cmd/day2.go | 0.645567 | 0.434281 | day2.go | starcoder |
package num
import (
"encoding/binary"
"math"
"github.com/flier/gocombine/pkg/parser"
"github.com/flier/gocombine/pkg/parser/bytes"
"github.com/flier/gocombine/pkg/parser/combinator"
)
// Uint16 reads a uint16 out of the byte stream with the specified endianess.
func Uint16(endian binary.ByteOrder) parser.Func[byte, uint16] {
return combinator.Map(bytes.TakeOf[uint16](), endian.Uint16).Expected("uint16")
}
// Uint32 reads a uint32 out of the byte stream with the specified endianess.
func Uint32(endian binary.ByteOrder) parser.Func[byte, uint32] {
return combinator.Map(bytes.TakeOf[uint32](), endian.Uint32).Expected("uint32")
}
// Uint64 reads a uint64 out of the byte stream with the specified endianess.
func Uint64(endian binary.ByteOrder) parser.Func[byte, uint64] {
return combinator.Map(bytes.TakeOf[uint64](), endian.Uint64).Expected("uint64")
}
// Int16 reads a int16 out of the byte stream with the specified endianess.
func Int16(endian binary.ByteOrder) parser.Func[byte, int16] {
return combinator.Map(bytes.TakeOf[uint16](), func(b []byte) int16 {
return int16(endian.Uint16(b))
}).Expected("int16")
}
// Int32 reads a int32 out of the byte stream with the specified endianess.
func Int32(endian binary.ByteOrder) parser.Func[byte, int32] {
return combinator.Map(bytes.TakeOf[uint32](), func(b []byte) int32 {
return int32(endian.Uint32(b))
}).Expected("int32")
}
// Int64 reads a int64 out of the byte stream with the specified endianess.
func Int64(endian binary.ByteOrder) parser.Func[byte, int64] {
return combinator.Map(bytes.TakeOf[uint64](), func(b []byte) int64 {
return int64(endian.Uint64(b))
}).Expected("int64")
}
// Float32 reads a float32 out of the byte stream with the specified endianess.
func Float32(endian binary.ByteOrder) parser.Func[byte, float32] {
return combinator.Map(bytes.TakeOf[uint32](), func(b []byte) float32 {
return math.Float32frombits(endian.Uint32(b))
}).Expected("float32")
}
// Float64 reads a float64 out of the byte stream with the specified endianess.
func Float64(endian binary.ByteOrder) parser.Func[byte, float64] {
return combinator.Map(bytes.TakeOf[uint64](), func(b []byte) float64 {
return math.Float64frombits(endian.Uint64(b))
}).Expected("float64")
} | pkg/parser/bytes/num/num.go | 0.785309 | 0.505859 | num.go | starcoder |
package main
import (
"fmt"
"log"
"sort"
"strconv"
)
func strToFloat64(str string) float64 {
f, err := strconv.ParseFloat(str, 64)
if err != nil {
log.Fatal(err)
}
return f
}
func main() {
rows := [][]string{
[]string{"cdomain.com", "3", "-5.02", "aaa", "aaa"},
[]string{"cdomain.com", "2", "133.02", "aaa", "aaa"},
[]string{"cdomain.com", "1", "1.02", "aaa", "aaa"},
[]string{"bdomain.com", "2", "23.02", "aaa", "aaa"},
[]string{"bdomain.com", "1", "12.02", "aaa", "aaa"},
[]string{"bdomain.com", "3", "53.02", "aaa", "aaa"},
[]string{"adomain.com", "5", "32.1232", "aaa", "aaa"},
[]string{"adomain.com", "3", "2.02202", "aaa", "aaa"},
[]string{"adomain.com", "1", "511.02", "aaa", "aaa"},
}
ascendingName0 := func(row1, row2 *[]string) bool {
return (*row1)[0] < (*row2)[0]
}
descendingVal := func(row1, row2 *[]string) bool {
return strToFloat64((*row1)[2]) > strToFloat64((*row2)[2])
}
ascendingName1 := func(row1, row2 *[]string) bool {
return (*row1)[1] < (*row2)[1]
}
by(rows, ascendingName0, descendingVal, ascendingName1).Sort(rows)
rs := fmt.Sprintf("%v", rows)
if rs != "[[adomain.com 1 511.02 aaa aaa] [adomain.com 5 32.1232 aaa aaa] [adomain.com 3 2.02202 aaa aaa] [bdomain.com 3 53.02 aaa aaa] [bdomain.com 2 23.02 aaa aaa] [bdomain.com 1 12.02 aaa aaa] [cdomain.com 2 133.02 aaa aaa] [cdomain.com 1 1.02 aaa aaa] [cdomain.com 3 -5.02 aaa aaa]]" {
fmt.Errorf("%v", rows)
}
}
// by returns a multiSorter that sorts using the less functions
func by(rows [][]string, lesses ...lessFunc) *multiSorter {
return &multiSorter{
data: rows,
less: lesses,
}
}
// lessFunc compares between two string slices.
type lessFunc func(p1, p2 *[]string) bool
func makeAscendingFunc(idx int) func(row1, row2 *[]string) bool {
return func(row1, row2 *[]string) bool {
return (*row1)[idx] < (*row2)[idx]
}
}
// multiSorter implements the Sort interface
// , sorting the two dimensional string slices within.
type multiSorter struct {
data [][]string
less []lessFunc
}
// Sort sorts the rows according to lessFunc.
func (ms *multiSorter) Sort(rows [][]string) {
sort.Sort(ms)
}
// Len is part of sort.Interface.
func (ms *multiSorter) Len() int {
return len(ms.data)
}
// Swap is part of sort.Interface.
func (ms *multiSorter) Swap(i, j int) {
ms.data[i], ms.data[j] = ms.data[j], ms.data[i]
}
// Less is part of sort.Interface.
func (ms *multiSorter) Less(i, j int) bool {
p, q := &ms.data[i], &ms.data[j]
var k int
for k = 0; k < len(ms.less)-1; k++ {
less := ms.less[k]
switch {
case less(p, q):
// p < q
return true
case less(q, p):
// p > q
return false
}
// p == q; try next comparison
}
return ms.less[k](p, q)
} | doc/go_sort_algorithm/code/07_sort_by.go | 0.592431 | 0.454048 | 07_sort_by.go | starcoder |
package avalanche
// Vote represents a single vote for a target
type Vote struct {
err uint32 // this is called "error" in abc for some reason
hash Hash
}
// NewVote creates a new Vote for the given hash
func NewVote(err uint32, hash Hash) Vote {
return Vote{err, hash}
}
// GetHash returns the target hash
func (v Vote) GetHash() Hash {
return v.hash
}
// GetError returns the vote
func (v Vote) GetError() uint32 {
return v.err
}
// VoteRecord keeps track of a series of votes for a target
type VoteRecord struct {
votes uint8
consider uint8
confidence uint16
}
// NewVoteRecord instantiates a new base record for voting on a target
// `accepted` indicates whether or not the initial state should be acceptance
func NewVoteRecord(accepted bool) *VoteRecord {
return &VoteRecord{confidence: boolToUint16(accepted)}
}
// isAccepted returns whether or not the voted state is acceptance or not
func (vr VoteRecord) isAccepted() bool {
return (vr.confidence & 0x01) == 1
}
// getConfidence returns the confidence in the current state's finalization
func (vr VoteRecord) getConfidence() uint16 {
return vr.confidence >> 1
}
// hasFinalized returns whether or not the record has finalized a state
func (vr VoteRecord) hasFinalized() bool {
return vr.getConfidence() >= AvalancheFinalizationScore
}
// regsiterVote adds a new vote for an item and update confidence accordingly.
// Returns true if the acceptance or finalization state changed.
func (vr *VoteRecord) regsiterVote(err uint32) bool {
vr.votes = (vr.votes << 1) | boolToUint8(err == 0)
vr.consider = (vr.consider << 1) | boolToUint8(int32(err) >= 0)
yes := countBits8(vr.votes&vr.consider&0xff) > 6
// The round is inconclusive
if !yes && countBits8((-vr.votes-1)&vr.consider&0xff) <= 6 {
return false
}
// Vote is conclusive and agrees with our current state
if vr.isAccepted() == yes {
vr.confidence += 2
return vr.getConfidence() == AvalancheFinalizationScore
}
// Vote is conclusive but does not agree with our current state
vr.confidence = boolToUint16(yes)
return true
}
func (vr *VoteRecord) status() (status Status) {
finalized := vr.hasFinalized()
accepted := vr.isAccepted()
switch {
case !finalized && accepted:
status = StatusAccepted
case !finalized && !accepted:
status = StatusRejected
case finalized && accepted:
status = StatusFinalized
case finalized && !accepted:
status = StatusInvalid
}
return status
}
func countBits8(i uint8) (count int) {
for ; i > 0; i &= (i - 1) {
count++
}
return count
}
func boolToUint8(b bool) uint8 {
if b {
return 1
}
return 0
}
func boolToUint16(b bool) uint16 {
return uint16(boolToUint8(b))
} | vote.go | 0.806853 | 0.47591 | vote.go | starcoder |
package mdr
import (
"fmt"
"math"
"math/rand"
)
var NormalZtable *Table
func init() {
Verbose.Printf("mdr.randgen.go init() entry\n")
defer Verbose.Printf("mdr.randgen.go init() exit\n")
// table contains : z value, area (ie. probability) to left of z value
// upper half of table only, negative values = (1.0 - upper value)
// table values could be expanded if it makes sense to do so
// the 60 value is a wag
NormalZtable = new(Table)
NormalZtable.Name = "normal_Z_table"
NormalZtable.Data =
[]DblPair{
{0.0, 0.5000},
{0.1, 0.5398},
{0.2, 0.5793},
{0.25, 0.5987},
{0.5, 0.6915},
{0.75, 0.7734},
{1.0, 0.8413},
{1.5, 0.9332},
{2.0, 0.9722},
{2.4, 0.9938},
{3.0, 0.9987},
{3.4, 0.9997},
{3.49, 0.9998}, // last value in table IV of Walpole & Meyers "Prob & Stat for Engrs & Sci"
{4.00, 0.99999},
{5.00, 0.999999},
{6.00, 0.9999999},
{8.00, 0.999999999},
{25.00, 1.00},
}
}
// GenRandomZNormal returns a float64 with average of 0 and standard deviation of 1.0
// as implemented the range of values returned will be in [-60..60]
func GenRandomZNormal() float64 {
if true {
return rand.NormFloat64()
} else {
rnd := 0.5 + (rand.Float64() / 2.0) // should return [.5 .. 1.0]
rv, err := NormalZtable.ReverseEval(rnd)
if err != nil {
Crash(fmt.Sprintf("reverse eval of normalZtable failed with err %v", err))
}
if len(rv) > 1 {
Crash(fmt.Sprintf("got more than one return value\n"))
}
if FlipCoin() {
rv[0] = -rv[0]
}
return rv[0]
}
}
// RandIntBtw endpoints may occur (HasTest widget).
func GenRandIntBtw(lo, hi int) int {
if lo > hi {
lo, hi = hi, lo
}
dif := (hi - lo) + 1
t := rand.Int31() % int32(dif)
return int(t) + lo
}
// GenRandomNormal returns a float64 with average mu and standard deviation stdev
// depending on mu and stdev picked the values returned could be virtually any float64
func GenRandomNormal(mu, stdev float64) float64 {
rnd := GenRandomZNormal()
dev := stdev * rnd
if FlipCoin() {
dev = -dev
}
rv := mu + dev
return rv
}
// GenRandomPoisson returns an int with Poisson distribution
// typically used to determine the number of time units before some event occurs
func GenRandomPoisson(lambda float64) int {
L := 1.0 / math.Exp(lambda)
k := 0
p := 1.0
for {
k += 1
u := rand.Float64()
p *= u
if p <= L {
break
}
}
return k
}
// expect to see about 50% head, 50% tails (HasTest)
func GenFlipCoin() bool {
return rand.Int31n(2) == 0
}
// GenRandomUniform returns a float64 in range {0 .. r}
func GenRandomUniform(r float64) float64 {
return rand.Float64() * r
}
// GenRandomUniformLo returns a float64 in range {low .. high}
// high > low is NOT required, not sure if this is right or should panic 8888 ?
func GenRandomUniformLoHi(low, high float64) float64 {
if low > high {
low, high = high, low
}
myrange := high - low
return low + GenRandomUniform(myrange)
}
// GenRandIntBetween endpoints may occur in output
func GenRandIntBetween(lo, hi int) int {
if lo > hi {
lo, hi = hi, lo
}
dif := (hi - lo) + 1
t := rand.Int31() % int32(dif)
return int(t) + lo
}
// GenRandF64Between endpoints may occur in output
func GenRandF64Between(lo, hi float64) float64 {
if lo > hi {
lo, hi = hi, lo
}
dif := hi - lo
return rand.Float64()*dif + lo
} | mdr_randgen.go | 0.545286 | 0.474936 | mdr_randgen.go | starcoder |
package photon
import (
"math"
"math/rand"
"github.com/alan-christopher/bb84/go/bb84/bitmap"
)
// NewSimulatedChannel creates a pair of (Sender, Receiver) structs simulating a
// Quantum channel. It is expected that each call to Send() will be mirrored by
// a call to Receive(). Expect errors if that is not the case, and for calls to
// Send() to hang if more than 1 of them are made before a Receive().
func NewSimulatedChannel(pMain, muLo, muMed, muHi, pLo, pMed, pHi float64,
sendRand, receiveRand *rand.Rand) (*SimulatedSender, *SimulatedReceiver) {
bits := make(chan bitmap.Dense, 1)
bases := make(chan bitmap.Dense, 1)
drops := make(chan bitmap.Dense, 1)
ss := &SimulatedSender{
bits: bits,
bases: bases,
drops: drops,
muLo: muLo,
muMed: muMed,
muHi: muHi,
pLo: pLo,
pMed: pMed,
pHi: pHi,
pMain: pMain,
rand: sendRand,
}
sr := &SimulatedReceiver{
bits: bits,
bases: bases,
drops: drops,
pMain: pMain,
rand: receiveRand,
}
return ss, sr
}
type SimulatedSender struct {
bits chan<- bitmap.Dense
bases chan<- bitmap.Dense
drops chan<- bitmap.Dense
pMain float64
muLo, muMed, muHi float64
pLo, pMed, pHi float64
rand *rand.Rand
}
type SimulatedReceiver struct {
Errors []byte
Drops []byte
pMain float64
bits <-chan bitmap.Dense
bases <-chan bitmap.Dense
drops <-chan bitmap.Dense
rand *rand.Rand
}
func (ss *SimulatedSender) Next(bytes int) (bits, bases, lo, med, hi []byte, err error) {
bits = make([]byte, bytes)
ss.rand.Read(bits)
baBases := bitmap.Empty()
baLo := bitmap.Empty()
baMed := bitmap.Empty()
baHi := bitmap.Empty()
drops := bitmap.Empty()
pZ := 1 - ss.pMain
for i := 0; i < bytes*8; i++ {
baBases.AppendBit(ss.rand.Float64() < pZ)
r := ss.rand.Float64()
isLo := r < ss.pLo
isMed := !isLo && r < ss.pLo+ss.pMed
isHi := !isLo && !isMed
baLo.AppendBit(isLo)
baMed.AppendBit(isMed)
baHi.AppendBit(isHi)
mu := ss.muLo
if isMed {
mu = ss.muMed
} else if isHi {
mu = ss.muHi
}
drops.AppendBit(ss.rand.Float64() < math.Exp(-mu))
}
bases = baBases.Data()
lo = baLo.Data()
med = baMed.Data()
hi = baHi.Data()
ss.bits <- bitmap.NewDense(bits, -1)
ss.bases <- baBases
ss.drops <- drops
return
}
func (sr *SimulatedReceiver) Next(bytes int) (bits, bases, dropped []byte, err error) {
sendBits := <-sr.bits
sendBases := <-sr.bases
drops := <-sr.drops
receiveBases := bitmap.Empty()
pZ := 1 - sr.pMain
for i := 0; i < sendBits.Size(); i++ {
receiveBases.AppendBit(sr.rand.Float64() < pZ)
}
synthErrs, err := sr.resize(bitmap.NewDense(sr.Errors, -1), bytes*8)
if err != nil {
return nil, nil, nil, err
}
synthDrops, err := sr.resize(bitmap.NewDense(sr.Drops, -1), bytes*8)
if err != nil {
return nil, nil, nil, err
}
buf := make([]byte, sendBits.SizeBytes())
rand.Read(buf)
flips := bitmap.NewDense(buf, -1)
flips = bitmap.And(flips, bitmap.XOr(sendBases, receiveBases))
flips = bitmap.Or(flips, synthErrs)
drops = bitmap.Or(drops, synthDrops)
return bitmap.XOr(flips, sendBits).Data(), receiveBases.Data(), drops.Data(), nil
}
func (sr *SimulatedReceiver) resize(r bitmap.Dense, s int) (bitmap.Dense, error) {
if r.Size() < s {
r2 := bitmap.NewDense(nil, s-r.Size())
r.Append(r2)
return r, nil
}
if r.Size() > s {
return bitmap.Slice(r, 0, s)
}
return r, nil
} | go/bb84/photon/simulated.go | 0.658527 | 0.500793 | simulated.go | starcoder |
package labels
import (
"fmt"
"sync"
"github.com/janelia-flyem/dvid/dvid"
)
var (
mc mergeCache
labelsMerging dirtyCache
labelsSplitting dirtyCache
)
const (
// MaxAllowedLabel is the largest label that should be allowed by DVID if we want to take
// into account the maximum integer size within Javascript (due to its underlying use of
// a double float for numbers, leading to max int = 2^53 - 1).
// This would circumvent the need to use strings within JSON (e.g., the Google solution)
// to represent integer labels that could exceed the max javascript number. It would
// require adding a value check on each label voxel of a mutation request, which might
// be too much of a hit to handle an edge case.
MaxAllowedLabel = 9007199254740991
)
// LabelMap returns a label mapping for a version of a data instance.
// If no label mapping is available, a nil is returned.
func LabelMap(iv dvid.InstanceVersion) *Mapping {
return mc.LabelMap(iv)
}
// MergeStart handles label map caches during an active merge operation. Note that if there are
// multiple synced label instances, the InstanceVersion always be the labelblk instance.
func MergeStart(iv dvid.InstanceVersion, op MergeOp) error {
// Don't allow a merge to start in the middle of a concurrent split.
if labelsSplitting.IsDirty(iv, op.Target) { // we might be able to relax this one.
return fmt.Errorf("can't merge into label %d while it has an ongoing split", op.Target)
}
for merged := range op.Merged {
if labelsSplitting.IsDirty(iv, merged) {
return fmt.Errorf("can't merge label %d while it has an ongoing split", merged)
}
}
// Add the merge to the mapping.
if err := mc.Add(iv, op); err != nil {
return err
}
// Adjust the dirty counts on the involved labels.
labelsMerging.AddMerge(iv, op)
return nil
}
// MergeStop marks the end of a merge operation.
func MergeStop(iv dvid.InstanceVersion, op MergeOp) {
// Adjust the dirty counts on the involved labels.
labelsMerging.RemoveMerge(iv, op)
// If the instance version's dirty cache is empty, we can delete the merge cache.
if labelsMerging.Empty(iv) {
dvid.Debugf("Merge cache now empty for %s\n", iv)
mc.DeleteMap(iv)
}
}
// SplitStart checks current label map to see if the split conflicts.
func SplitStart(iv dvid.InstanceVersion, op DeltaSplitStart) error {
if labelsMerging.IsDirty(iv, op.NewLabel) {
return fmt.Errorf("can't split into label %d while it is undergoing a merge", op.NewLabel)
}
if labelsMerging.IsDirty(iv, op.OldLabel) {
return fmt.Errorf("can't split label %d while it is undergoing a merge", op.OldLabel)
}
labelsSplitting.Incr(iv, op.NewLabel)
labelsSplitting.Incr(iv, op.OldLabel)
return nil
}
// SplitStop marks the end of a split operation.
func SplitStop(iv dvid.InstanceVersion, op DeltaSplitEnd) {
labelsSplitting.Decr(iv, op.NewLabel)
labelsSplitting.Decr(iv, op.OldLabel)
}
type mergeCache struct {
sync.RWMutex
m map[dvid.InstanceVersion]*Mapping
}
// Add adds a merge operation to the given InstanceVersion's cache.
func (mc *mergeCache) Add(iv dvid.InstanceVersion, op MergeOp) error {
mc.Lock()
defer mc.Unlock()
if mc.m == nil {
mc.m = make(map[dvid.InstanceVersion]*Mapping)
}
mapping, found := mc.m[iv]
if !found {
mapping = &Mapping{
f: make(map[uint64]uint64, len(op.Merged)),
r: make(map[uint64]Set),
}
mc.m[iv] = mapping
}
for merged := range op.Merged {
if err := mapping.set(merged, op.Target); err != nil {
return err
}
}
return nil
}
// LabelMap returns a label mapping for a version of a data instance.
// If no label mapping is available, a nil is returned.
func (mc *mergeCache) LabelMap(iv dvid.InstanceVersion) *Mapping {
mc.RLock()
defer mc.RUnlock()
if mc.m == nil {
return nil
}
mapping, found := mc.m[iv]
if found {
if len(mapping.f) == 0 {
return nil
}
return mapping
}
return nil
}
// DeleteMap removes a mapping of the given InstanceVersion.
func (mc *mergeCache) DeleteMap(iv dvid.InstanceVersion) {
mc.Lock()
defer mc.Unlock()
if mc.m != nil {
delete(mc.m, iv)
}
}
// Mapping is a thread-safe, mapping of labels to labels in both forward and backward direction.
// Mutation of a Mapping instance can only be done through labels.MergeCache.
type Mapping struct {
sync.RWMutex
f map[uint64]uint64
r map[uint64]Set
}
// ConstituentLabels returns a set of labels that will be mapped to the given label.
// The set will always include the given label.
func (m *Mapping) ConstituentLabels(final uint64) Set {
m.RLock()
defer m.RUnlock()
if m.r == nil {
return Set{final: struct{}{}}
}
// We need to return all labels that will eventually have the given final label
// including any intermediate ones that were subsequently merged.
constituents := Set{}
toCheck := []uint64{final}
for {
endI := len(toCheck) - 1
label := toCheck[endI]
toCheck = toCheck[:endI]
constituents[label] = struct{}{}
s, found := m.r[label]
if found {
// push these labels onto stack
for c := range s {
toCheck = append(toCheck, c)
}
}
if len(toCheck) == 0 {
break
}
}
return constituents
}
// FinalLabel follows mappings from a start label until
// a final mapped label is reached.
func (m *Mapping) FinalLabel(start uint64) (uint64, bool) {
m.RLock()
defer m.RUnlock()
if m.f == nil {
return start, false
}
cur := start
found := false
for {
v, ok := m.f[cur]
if !ok {
break
}
cur = v
found = true
}
return cur, found
}
// Get returns the mapping or false if no mapping exists.
func (m *Mapping) Get(label uint64) (uint64, bool) {
m.RLock()
defer m.RUnlock()
if m.f == nil {
return 0, false
}
mapped, found := m.f[label]
if found {
return mapped, true
}
return 0, false
}
// set returns error if b is currently being mapped to another label.
func (m *Mapping) set(a, b uint64) error {
m.Lock()
defer m.Unlock()
if m.f == nil {
m.f = make(map[uint64]uint64)
m.r = make(map[uint64]Set)
} else {
if c, found := m.f[b]; found {
return fmt.Errorf("label %d is currently getting merged into label %d", b, c)
}
}
m.f[a] = b
s, found := m.r[b]
if found {
s[a] = struct{}{}
m.r[b] = s
} else {
m.r[b] = Set{a: struct{}{}}
}
return nil
}
func (m *Mapping) delete(label uint64) {
m.Lock()
defer m.Unlock()
if m.f != nil {
mapped, found := m.f[label]
if !found {
return
}
delete(m.f, label)
s, found := m.r[mapped]
if found {
delete(s, label)
m.r[mapped] = s
}
}
}
// Set is a set of labels.
type Set map[uint64]struct{}
func (s Set) String() string {
var str string
for k := range s {
str += fmt.Sprintf("%d ", k)
}
return str
}
// Counts is a thread-safe type for counting label references.
type Counts struct {
sync.RWMutex
m map[uint64]int
}
// Incr increments the count for a label.
func (c *Counts) Incr(label uint64) {
if c.m == nil {
c.m = make(map[uint64]int)
}
c.Lock()
defer c.Unlock()
c.m[label] = c.m[label] + 1
}
// Decr decrements the count for a label.
func (c *Counts) Decr(label uint64) {
if c.m == nil {
c.m = make(map[uint64]int)
}
c.Lock()
defer c.Unlock()
c.m[label] = c.m[label] - 1
if c.m[label] == 0 {
delete(c.m, label)
}
}
// Value returns the count for a label.
func (c *Counts) Value(label uint64) int {
if c.m == nil {
return 0
}
c.RLock()
defer c.RUnlock()
return c.m[label]
}
// Empty returns true if there are no counts.
func (c *Counts) Empty() bool {
if len(c.m) == 0 {
return true
}
return false
}
// dirtyCache is a thread-safe cache for tracking dirty labels across versions, which is necessary when we
// don't know exactly how a label is being transformed. For example, when merging
// we can easily track what a label will be, however during a split, we don't know whether
// a particular voxel with label X will become label Y unless we also store the split
// voxels. So DirtyCache is good for tracking "changing" status in splits while MergeCache
// can give us complete label transformation of non-dirty labels.
type dirtyCache struct {
sync.RWMutex
dirty map[dvid.InstanceVersion]*Counts
}
func (d *dirtyCache) Incr(iv dvid.InstanceVersion, label uint64) {
d.Lock()
defer d.Unlock()
if d.dirty == nil {
d.dirty = make(map[dvid.InstanceVersion]*Counts)
}
d.incr(iv, label)
}
func (d *dirtyCache) Decr(iv dvid.InstanceVersion, label uint64) {
d.Lock()
defer d.Unlock()
if d.dirty == nil {
d.dirty = make(map[dvid.InstanceVersion]*Counts)
}
d.decr(iv, label)
}
func (d *dirtyCache) IsDirty(iv dvid.InstanceVersion, label uint64) bool {
d.RLock()
defer d.RUnlock()
if d.dirty == nil {
return false
}
cnts, found := d.dirty[iv]
if !found || cnts == nil {
return false
}
if cnts.Value(label) == 0 {
return false
}
return true
}
func (d *dirtyCache) Empty(iv dvid.InstanceVersion) bool {
d.RLock()
defer d.RUnlock()
if len(d.dirty) == 0 {
return true
}
cnts, found := d.dirty[iv]
if !found || cnts == nil {
return true
}
return cnts.Empty()
}
func (d *dirtyCache) AddMerge(iv dvid.InstanceVersion, op MergeOp) {
d.Lock()
defer d.Unlock()
if d.dirty == nil {
d.dirty = make(map[dvid.InstanceVersion]*Counts)
}
d.incr(iv, op.Target)
for label := range op.Merged {
d.incr(iv, label)
}
}
func (d *dirtyCache) RemoveMerge(iv dvid.InstanceVersion, op MergeOp) {
d.Lock()
defer d.Unlock()
if d.dirty == nil {
d.dirty = make(map[dvid.InstanceVersion]*Counts)
}
d.decr(iv, op.Target)
for label := range op.Merged {
d.decr(iv, label)
}
}
func (d *dirtyCache) incr(iv dvid.InstanceVersion, label uint64) {
cnts, found := d.dirty[iv]
if !found || cnts == nil {
cnts = new(Counts)
d.dirty[iv] = cnts
}
cnts.Incr(label)
}
func (d *dirtyCache) decr(iv dvid.InstanceVersion, label uint64) {
cnts, found := d.dirty[iv]
if !found || cnts == nil {
dvid.Errorf("decremented non-existant count for label %d, version %v\n", label, iv)
return
}
cnts.Decr(label)
} | datatype/common/labels/labels.go | 0.716814 | 0.557905 | labels.go | starcoder |
package suncalc
import (
m "math"
"time"
)
const rad = m.Pi / 180
// time conversions
const (
daySec = 60 * 60 * 24
j1970 = 2440588.0
j2000 = 2451545.0
)
func toJulian(t time.Time) float64 {
return float64(t.Unix()) / daySec - 0.5 + j1970
}
func fromJulian(j float64) time.Time {
return time.Unix(int64((j + 0.5 - j1970) * daySec), 0)
}
func toDays(t time.Time) float64 {
return toJulian(t) - j2000
}
// general utilities for celestial body position
const e = rad * 23.4397
func rightAscension(l, b float64) float64 {
return m.Atan2(m.Sin(l) * m.Cos(e) - m.Tan(b) * m.Sin(e), m.Cos(l))
}
func declination(l, b float64) float64 {
return m.Asin(m.Sin(b) * m.Cos(e) + m.Cos(b) * m.Sin(e) * m.Sin(l))
}
func azimuth(H, phi, dec float64) float64 {
return m.Atan2(m.Sin(H), m.Cos(H) * m.Sin(phi) - m.Tan(dec) * m.Cos(phi))
}
func altitude(H, phi, dec float64) float64 {
return m.Sin(m.Sin(phi) * m.Sin(dec) + m.Cos(phi) * m.Cos(dec) * m.Cos(H))
}
func siderealTime(d, lw float64) float64 {
return rad * (280.16 + 360.9856235 * d) - lw
}
// general sun calculations
func solarMeanAnomaly(d float64) float64 {
return rad * (357.5291 + 0.98560028 * d)
}
func eclipticLongitude(ma float64) float64 {
c := rad * (1.9148 * m.Sin(ma) + 0.02 * m.Sin(2 * ma) + 0.0003 * m.Sin(3 * ma)) // equation of center
p := rad * 102.9372 // perihelion of the Earth
return ma + c + p + m.Pi
}
func sunCoords(d float64) (float64, float64) {
l := eclipticLongitude(solarMeanAnomaly(d))
return declination(l, 0), rightAscension(l, 0)
}
// returns sun's azimuth and altitude given time and latitude/longitude
func SunPosition(t time.Time, lat, lng float64) (float64, float64) {
lw := rad * -lng
phi := rad * lat
d := toDays(t)
dec, ra := sunCoords(d)
h := siderealTime(d, lw) - ra
return azimuth(h, phi, dec), altitude(h, phi, dec)
}
// calculations for sun times
const j0 = 0.0009
func julianCycle(d, lw float64) float64 {
return m.Floor(d - j0 - lw / (2.0 * m.Pi) + 0.5)
}
func approxTransit(ht, lw, n float64) float64 {
return j0 + (ht + lw) / (2.0 * m.Pi) + n
}
func solarTransitJ(ds, ma, l float64) float64 {
return j2000 + ds + 0.0053 * m.Sin(ma) - 0.0069 * m.Sin(2 * l)
}
func hourAngle(h, phi, d float64) float64 {
return m.Acos((m.Sin(h) - m.Sin(phi) * m.Sin(d)) / (m.Cos(phi) * m.Cos(d)))
}
// returns set time for the given sun altitude
func getSetJ(h, lw, phi, dec, n, m, l float64) float64 {
w := hourAngle(h, phi, dec)
a := approxTransit(w, lw, n)
return solarTransitJ(a, m, l)
}
// sun times configuration
type SunAngle struct {
angle float64
riseName string
setName string
}
var sunAngles = [...]SunAngle{
SunAngle{-0.833, "sunrise", "sunset"},
SunAngle{-0.3, "sunriseEnd", "sunsetStart"},
SunAngle{-6.0, "dawn", "dusk"},
SunAngle{-12.0, "nauticalDawn", "nauticalDusk"},
SunAngle{-18.0, "nightEnd", "night"},
SunAngle{6.0, "goldenHourEnd", "goldenHour"},
}
// calculates sun times for a given date and latitude/longitude
func SunTimes(t time.Time, lat, lng float64) map[string]time.Time {
lw := rad * -lng
phi := rad * lat
d := toDays(t)
n := julianCycle(d, lw)
ds := approxTransit(0, lw, n)
ma := solarMeanAnomaly(ds)
l := eclipticLongitude(ma)
dec := declination(l, 0)
jNoon := solarTransitJ(ds, ma, l)
times := map[string]time.Time{
"solarNoon": fromJulian(jNoon),
"nadir": fromJulian(jNoon - 0.5),
}
for _, sunAngle := range sunAngles {
jSet := getSetJ(sunAngle.angle * rad, lw, phi, dec, n, ma, l)
times[sunAngle.riseName] = fromJulian(jNoon - (jSet - jNoon))
times[sunAngle.setName] = fromJulian(jSet)
}
return times
}
// moon calculations, based on http://aa.quae.nl/en/reken/hemelpositie.html formulas
func moonCoords(d float64) (float64, float64, float64) { // geocentric ecliptic coordinates of the moon
el := rad * (218.316 + 13.176396 * d) // ecliptic longitude
ma := rad * (134.963 + 13.064993 * d) // mean anomaly
f := rad * (93.272 + 13.229350 * d) // mean distance
l := rad * 6.289 * m.Sin(ma) + el // longitude
b := rad * 5.128 * m.Sin(f) // latitude
dist := 385001 - 20905 * m.Cos(ma) // distance to the moon in km
return declination(l, b), rightAscension(l, b), dist
}
func MoonPosition(t time.Time, lat, lng float64) (float64, float64, float64) {
lw := rad * -lng
phi := rad * lat
d := toDays(t)
dec, ra, dist := moonCoords(d)
ha := siderealTime(d, lw) - ra
h := altitude(ha, phi, dec)
// altitude correction for refraction
h = h + rad * 0.017 / m.Tan(h + rad * 10.26 / (h + rad * 5.10))
return azimuth(ha, phi, dec), h, dist
}
// example:
// azimuth, altitude := SunPosition(time.Now(), 50.5, 30.5)
// times := SunTimes(time.Now(), 50.5, 30.5) | vendor/src/github.com/whosonfirst/suncalc-go/suncalc.go | 0.878171 | 0.519278 | suncalc.go | starcoder |
package finder
import (
"context"
"errors"
"math"
"strings"
"sync"
)
// Finder is the type to find the nearest reference
type Finder struct {
referenceMap referenceMapType
reference []string
referenceBucket referenceBucketType
Alg Algorithm
LengthTolerance float64 // A number between 0.0-1.0 (percentage) to allow for length miss-match, anything outside this is considered not similar. Set to 0 to disable.
lock sync.RWMutex
bucketChars uint // @todo figure out what (type of) bucket approach to take. Prefix or perhaps using an ngram/trie approach
}
// Errors
var (
ErrNoAlgorithmDefined = errors.New("no algorithm defined")
)
type referenceMapType map[string]struct{}
type referenceBucketType map[rune][]string
// These constants hold the value of the lowest and highest possible scores. Compatible with JSON serialization.
// It's not ideal to mix presentation with business logic but in this instance it was convenient and similarly
// effective as math.Inf(-1)
const (
WorstScoreValue = -1 * math.MaxFloat32
BestScoreValue = math.MaxFloat32
)
// New creates a new instance of Finder. The order of the list is significant
func New(list []string, options ...Option) (*Finder, error) {
i := &Finder{}
for _, o := range options {
o(i)
}
i.Refresh(list)
if i.Alg == nil {
return i, ErrNoAlgorithmDefined
}
return i, nil
}
// Refresh replaces the internal reference list.
func (t *Finder) Refresh(list []string) {
rm := make(referenceMapType, len(list))
rb := make(referenceBucketType, 26)
for _, r := range list {
if r == "" {
continue
}
rm[r] = struct{}{}
// @todo make the bucket prefix length configurable
if t.bucketChars > 0 {
l := rune(r[0])
if _, ok := rb[l]; !ok {
rb[l] = make([]string, 0, 16)
}
rb[l] = append(rb[l], r)
}
}
t.lock.Lock()
t.reference = append(t.reference[0:0], list...)
t.referenceMap = rm
t.referenceBucket = rb
t.lock.Unlock()
}
// Exact returns true if the input is an exact match.
func (t *Finder) Exact(input string) bool {
t.lock.RLock()
_, ok := t.referenceMap[input]
t.lock.RUnlock()
return ok
}
// Find returns the best alternative a score and if it was an exact match or not.
// Since algorithms can define their own upper-bound, there is no "best" value.
func (t *Finder) Find(input string) (string, float64, bool) {
matches, score, exact := t.FindTopRankingCtx(context.Background(), input)
return matches[0], score, exact
}
// FindCtx is the same as Find, with context support.
func (t *Finder) FindCtx(ctx context.Context, input string) (string, float64, bool) {
matches, score, exact := t.FindTopRankingCtx(ctx, input)
return matches[0], score, exact
}
// FindTopRankingCtx returns a list (of at least one element) of references with the same "best" score
func (t *Finder) FindTopRankingCtx(ctx context.Context, input string) ([]string, float64, bool) {
r, s, e, _ := t.findTopRankingCtx(ctx, input, 0)
return r, s, e
}
// FindTopRankingPrefixCtx requires the references to have an exact prefix match on N characters of the input.
// prefixLength cannot exceed length of input
func (t *Finder) FindTopRankingPrefixCtx(ctx context.Context, input string, prefixLength uint) (list []string, exact bool, err error) {
list, _, exact, err = t.findTopRankingCtx(ctx, input, prefixLength)
return
}
// getRefList returns the appropriate list of references. getRefList does not deal with locks!
func (t *Finder) getRefList(input string) []string {
if len(input) > 0 {
r := rune(input[0])
if _, ok := t.referenceBucket[r]; ok {
return t.referenceBucket[r]
}
}
return t.reference
}
// GetMatchingPrefix returns up to max ref's, that start with the prefix argument
func (t *Finder) GetMatchingPrefix(ctx context.Context, prefix string, max uint) ([]string, error) {
t.lock.RLock()
defer t.lock.RUnlock()
var (
list = t.getRefList(prefix)
result = make([]string, 0, max)
)
for _, ref := range list {
select {
case <-ctx.Done():
return result, ctx.Err()
default:
}
if strings.HasPrefix(ref, prefix) {
result = append(result, ref)
}
if max > 0 && max == uint(len(result)) {
return result, nil
}
}
return result, nil
}
func (t *Finder) findTopRankingCtx(ctx context.Context, input string, prefixLength uint) ([]string, float64, bool, error) {
var hs = WorstScoreValue
if prefixLength > 0 && uint(len(input)) < prefixLength {
return []string{input}, WorstScoreValue, false, errors.New("prefix length exceeds input length")
}
t.lock.RLock()
defer t.lock.RUnlock()
// Exact matches
if _, exists := t.referenceMap[input]; exists || len(input) == 0 {
return []string{input}, BestScoreValue, true, nil
}
var (
list = t.getRefList(input)
sameScore = []string{input}
)
for _, ref := range list {
select {
case <-ctx.Done():
return []string{input}, WorstScoreValue, false, ctx.Err()
default:
}
if !meetsPrefixLengthMatch(prefixLength, input, ref) {
continue
}
// Test if the input length differs too much from the reference, making it an unlikely typo.
if !meetsLengthTolerance(t.LengthTolerance, input, ref) {
continue
}
score := t.Alg(input, ref)
if score > hs {
hs = score
sameScore = []string{ref}
} else if score == hs {
sameScore = append(sameScore, ref)
}
}
return sameScore, hs, false, nil
}
// meetsPrefixLengthMatch tests is the strings both match until the specified length. A 0 length returns true
func meetsPrefixLengthMatch(length uint, input, reference string) bool {
if length > 0 {
if uint(len(reference)) < length {
return false
}
if pi := length - 1; input[0:pi] != reference[0:pi] {
return false
}
}
return true
}
// meetsLengthTolerance checks if the input meets the length tolerance criteria. The percentage is based on `input`
func meetsLengthTolerance(t float64, input, reference string) bool {
if t <= 0 {
return true
}
if t > 1 {
return false
}
inputLen := len(input)
refLen := len(reference)
threshold := int(math.Ceil(float64(inputLen) * t))
// The result is N% of the length or at least 1 (due to math.Ceil)
return refLen-threshold <= inputLen && inputLen <= refLen+threshold
} | finder/find.go | 0.760917 | 0.42322 | find.go | starcoder |
package msgraph
// ProvisioningStepType undocumented
type ProvisioningStepType string
const (
// ProvisioningStepTypeVImport undocumented
ProvisioningStepTypeVImport ProvisioningStepType = "Import"
// ProvisioningStepTypeVScoping undocumented
ProvisioningStepTypeVScoping ProvisioningStepType = "Scoping"
// ProvisioningStepTypeVMatching undocumented
ProvisioningStepTypeVMatching ProvisioningStepType = "Matching"
// ProvisioningStepTypeVProcessing undocumented
ProvisioningStepTypeVProcessing ProvisioningStepType = "Processing"
// ProvisioningStepTypeVReferenceResolution undocumented
ProvisioningStepTypeVReferenceResolution ProvisioningStepType = "ReferenceResolution"
// ProvisioningStepTypeVExport undocumented
ProvisioningStepTypeVExport ProvisioningStepType = "Export"
// ProvisioningStepTypeVUnknownFutureValue undocumented
ProvisioningStepTypeVUnknownFutureValue ProvisioningStepType = "UnknownFutureValue"
)
// ProvisioningStepTypePImport returns a pointer to ProvisioningStepTypeVImport
func ProvisioningStepTypePImport() *ProvisioningStepType {
v := ProvisioningStepTypeVImport
return &v
}
// ProvisioningStepTypePScoping returns a pointer to ProvisioningStepTypeVScoping
func ProvisioningStepTypePScoping() *ProvisioningStepType {
v := ProvisioningStepTypeVScoping
return &v
}
// ProvisioningStepTypePMatching returns a pointer to ProvisioningStepTypeVMatching
func ProvisioningStepTypePMatching() *ProvisioningStepType {
v := ProvisioningStepTypeVMatching
return &v
}
// ProvisioningStepTypePProcessing returns a pointer to ProvisioningStepTypeVProcessing
func ProvisioningStepTypePProcessing() *ProvisioningStepType {
v := ProvisioningStepTypeVProcessing
return &v
}
// ProvisioningStepTypePReferenceResolution returns a pointer to ProvisioningStepTypeVReferenceResolution
func ProvisioningStepTypePReferenceResolution() *ProvisioningStepType {
v := ProvisioningStepTypeVReferenceResolution
return &v
}
// ProvisioningStepTypePExport returns a pointer to ProvisioningStepTypeVExport
func ProvisioningStepTypePExport() *ProvisioningStepType {
v := ProvisioningStepTypeVExport
return &v
}
// ProvisioningStepTypePUnknownFutureValue returns a pointer to ProvisioningStepTypeVUnknownFutureValue
func ProvisioningStepTypePUnknownFutureValue() *ProvisioningStepType {
v := ProvisioningStepTypeVUnknownFutureValue
return &v
} | beta/ProvisioningStepTypeEnum.go | 0.54577 | 0.435241 | ProvisioningStepTypeEnum.go | starcoder |
package core
import (
"strings"
"github.com/raviqqe/hamt"
)
// DictionaryType represents a dictionary in the language.
type DictionaryType struct {
hamt.Map
}
// Eval evaluates a value into a WHNF.
func (d *DictionaryType) eval() Value {
return d
}
var (
emtpyDictionary = DictionaryType{hamt.NewMap()}
// EmptyDictionary is a thunk of an empty dictionary.
EmptyDictionary = &emtpyDictionary
)
// KeyValue is a pair of a key and value inserted into dictionaries.
type KeyValue struct {
Key, Value Value
}
// NewDictionary creates a dictionary from keys of values and their
// corresponding values of thunks.
func NewDictionary(kvs []KeyValue) Value {
d := Value(EmptyDictionary)
for _, kv := range kvs {
d = PApp(Assign, d, kv.Key, kv.Value)
}
return d
}
func (d *DictionaryType) assign(k Value, v Value) Value {
e, err := evalEntry(k)
if err != nil {
return err
}
return &DictionaryType{d.Map.Insert(e, v)}
}
func (d *DictionaryType) index(k Value) Value {
v, err := d.find(k)
if err != nil {
return err
}
return v
}
func (d *DictionaryType) find(k Value) (Value, Value) {
e, err := evalEntry(k)
if err != nil {
return nil, err
}
v := d.Map.Find(e)
if v == nil {
return nil, keyNotFoundError(k)
}
return v.(Value), nil
}
func (d *DictionaryType) toList() Value {
k, v, rest := d.FirstRest()
if k == nil {
return EmptyList
}
return cons(
NewList(k.(Value), v.(Value)),
PApp(ToList, &DictionaryType{rest}))
}
func (d *DictionaryType) merge(vs ...Value) Value {
for _, v := range vs {
dd, err := EvalDictionary(v)
if err != nil {
return err
}
d = &DictionaryType{d.Merge(dd.Map)}
}
return d
}
func (d *DictionaryType) delete(k Value) Value {
e, err := evalEntry(k)
if err != nil {
return err
}
return &DictionaryType{d.Map.Delete(e)}
}
func (d *DictionaryType) compare(c comparable) int {
return compare(d.toList(), c.(*DictionaryType).toList())
}
func (d *DictionaryType) string() Value {
ss := []string{}
for d.Size() != 0 {
k, v, m := d.FirstRest()
d = &DictionaryType{m}
sk, err := StrictDump(k.(Value))
if err != nil {
return err
}
sv, err := StrictDump(EvalPure(v.(Value)))
if err != nil {
return err
}
ss = append(ss, string(sk), string(sv))
}
return NewString("{" + strings.Join(ss, " ") + "}")
}
func (d *DictionaryType) size() Value {
return NewNumber(float64(d.Size()))
}
func (d *DictionaryType) include(k Value) Value {
e, err := evalEntry(k)
if err != nil {
return err
}
return NewBoolean(d.Include(e))
}
func evalEntry(v Value) (hamt.Entry, Value) {
e, ok := v.(hamt.Entry)
if !ok {
return nil, TypeError(v, "hashable")
}
return e, nil
} | src/lib/core/dictionary.go | 0.713831 | 0.513546 | dictionary.go | starcoder |
package main
import (
"container/heap"
"encoding/base64"
"fmt"
"image"
"image/color"
"image/png"
"io"
"golang.org/x/image/draw"
)
type maze struct {
image.Gray
maze []byte
}
func newMaze(src image.Image, width, height int) *maze {
r := image.Rect(0, 0, width, height)
w, h := r.Dx(), r.Dy()
pix := make([]uint8, w*h)
m := &maze{
Gray: image.Gray{
Pix: pix,
Stride: w,
Rect: r,
},
}
draw.CatmullRom.Scale(&m.Gray, r, src, src.Bounds(), draw.Src, nil)
return m
}
func (m *maze) index(x, y int) int {
return y*m.Stride + x
}
func (m *maze) position(p int) (int, int) {
return p % m.Stride, p / m.Stride
}
func (m *maze) weight(n, p int) int {
return 4*int(m.Pix[p]) + int(m.Pix[n])
}
type edge struct {
n, p, a, v int
}
type edgeHeap []edge
func (e edgeHeap) Len() int {
return len(e)
}
func (e edgeHeap) Less(i, j int) bool {
vi, vj := e[i].v, e[j].v
if vi > vj {
return true
}
if vj == vi {
return e[i].a > e[j].a
}
return false
}
func (e edgeHeap) Swap(i, j int) {
e[i], e[j] = e[j], e[i]
}
func (e *edgeHeap) Push(x interface{}) {
*e = append(*e, x.(edge))
}
func (e *edgeHeap) Pop() interface{} {
old := *e
n := len(old)
item := old[n-1]
*e = old[:n-1]
return item
}
func (m *maze) generate() {
width, height := m.Rect.Dx(), m.Rect.Dy()
size := width * height
m.maze = make([]byte, size, size)
// Fill with walls.
for i := range m.maze {
m.maze[i] = 1
}
startx := width / 2
starty := height / 2
if startx%2 == 0 {
startx--
}
if starty%2 == 0 {
starty--
}
p := m.index(startx, starty)
// Clear start position.
m.maze[p] = 0
h := &edgeHeap{}
heap.Init(h)
age := 0
loop:
x, y := m.position(p)
if yp := y - 2; yp > 0 {
if next := m.index(x, yp); m.maze[next] == 1 {
heap.Push(h, edge{next, next + width, age, m.weight(next, next+width)})
}
}
if yn := y + 2; yn < height {
if next := m.index(x, yn); m.maze[next] == 1 {
heap.Push(h, edge{next, next - width, age, m.weight(next, next-width)})
}
}
if xp := x - 2; xp > 0 {
if next := m.index(xp, y); m.maze[next] == 1 {
heap.Push(h, edge{next, next + 1, age, m.weight(next, next+1)})
}
}
if xn := x + 2; xn < width {
if next := m.index(xn, y); m.maze[next] == 1 {
heap.Push(h, edge{next, next - 1, age, m.weight(next, next-1)})
}
}
for h.Len() > 0 {
item := heap.Pop(h).(edge)
if m.maze[item.n] == 0 {
continue
}
m.maze[item.n], m.maze[item.p] = 0, 0
p = item.n
age = item.a + 1
goto loop
}
}
var palette = []color.Color{
color.NRGBA{0xff, 0xff, 0xff, 0xff},
color.NRGBA{0, 0, 0, 0xff}}
func (m *maze) writeBase64(w io.Writer) error {
if m.maze == nil {
return nil
}
p := image.NewPaletted(m.Rect, palette)
width, height := m.Rect.Dx(), m.Rect.Dy()
pos := 0
for y := 0; y < height; y++ {
for x, v := range m.maze[pos : pos+width] {
p.SetColorIndex(x, y, uint8(v))
}
pos += width
}
b64enc := base64.NewEncoder(base64.StdEncoding, w)
png.Encode(b64enc, p)
return b64enc.Close()
}
func (m *maze) writeBase64Image(w io.Writer) error {
fmt.Fprintf(w,
`<div><img alt="maze" width="%d" height="%d" src="data:image/png;base64,`,
m.Rect.Dx(), m.Rect.Dy())
m.writeBase64(w)
_, err := fmt.Fprintln(w, `"></div>`)
return err
} | maze.go | 0.564339 | 0.4474 | maze.go | starcoder |
package genetic_algorithm
import (
"fmt"
"math"
"math/rand"
)
// https://hg.python.org/cpython/file/4480506137ed/Lib/statistics.py#l453
func meanFloat64(values []float64) float64 {
if len(values) == 0 {
return 0
}
var sum float64
for _, val := range values {
sum += val
}
return sum / float64(len(values))
}
func meanFloat64Iter(count int, value func(int) float64) float64 {
if count == 0 {
return 0
}
var sum float64
for i := 0; i < count; i++ {
sum += value(i)
}
return sum / float64(count)
}
// Expects at least one value in each array
func meanFloat64Arr(values [][]float64) []float64 {
if len(values) == 0 {
return []float64{}
}
length := 0
for _, arr := range values {
if length < len(arr) {
length = len(arr)
}
}
sum := make([]float64, length)
for i := 0; i < length; i++ {
for _, arr := range values {
if len(arr) > i {
sum[i] += arr[i]
} else {
sum[i] += arr[len(arr)-1]
}
}
}
for i := 0; i < length; i++ {
sum[i] /= float64(len(values))
}
return sum
}
// Expects at least one value in each array
func meanFloat64ArrIter(count int, value func(int) []float64) []float64 {
values := make([][]float64, count)
for i := 0; i < count; i++ {
values[i] = value(i)
}
return meanFloat64Arr(values)
}
func meanInt64(values []int64) int64 {
if len(values) == 0 {
return 0
}
var sum int64
for _, val := range values {
sum += val
}
return sum / int64(len(values))
}
func meanInt64Iter(count int, value func(int) int64) int64 {
if count == 0 {
return 0
}
var sum int64
for i := 0; i < count; i++ {
sum += value(i)
}
return sum / int64(count)
}
// Return sum of square deviations
func ssFloat64(values []float64) float64 {
mean := meanFloat64(values)
var sum float64
var dsum float64
for _, val := range values {
dsum += val - mean
sum += math.Pow(val-mean, 2)
}
// Rounding error compensation. Ideally dsum equals zero.
sum -= math.Pow(dsum, 2) / float64(len(values))
return sum
}
// Sample variance
func varianceFloat64(values []float64) float64 {
return ssFloat64(values) / (float64(len(values)) - 1)
}
// Population variance
func pvarianceFloat64(values []float64) float64 {
return ssFloat64(values) / float64(len(values))
}
func chooseTwoPointCrossSection(genesLen int, canProduceCopiesOfParents bool) (crossPoint1 int, crossPoint2 int) {
crossPoint1 = rand.Intn(genesLen)
if !canProduceCopiesOfParents && crossPoint1 == 0 {
crossPoint2 = rand.Intn(genesLen-1) + 1
} else {
crossPoint2 = rand.Intn(genesLen-crossPoint1) + 1 + crossPoint1
}
return
}
func chooseDifferentRandomNumbers(count, upperBound int) []int {
if upperBound < count {
panic(fmt.Sprintf("Can't select %d different numbers on inerval [0:%d)", count, upperBound))
}
numbersMap := make(map[int]bool, count)
numbersList := make([]int, count)
for i := 0; i < count; i++ {
for {
number := rand.Intn(upperBound)
if !numbersMap[number] {
numbersMap[number] = true
numbersList[i] = number
break
}
}
}
return numbersList
}
func round(val float64) int {
return int(roundEx(val, .5, 0))
}
func roundEx(val float64, roundOn float64, places int) (newVal float64) {
var round float64
pow := math.Pow(10, float64(places))
digit := pow * val
_, div := math.Modf(digit)
_div := math.Copysign(div, val)
_roundOn := math.Copysign(roundOn, val)
if _div >= _roundOn {
round = math.Ceil(digit)
} else {
round = math.Floor(digit)
}
newVal = round / pow
return
} | helper.go | 0.619701 | 0.524151 | helper.go | starcoder |
package is
import (
"reflect"
"strings"
"testing"
)
type suiteTest struct {
fn func(Is)
name string
}
// Suite runs the given test suite.
// If the suite contains a method named `Setup` it is called before any tests are run.
// Tests must start with `Test` and take `is.IS` as the first arg.
// Finally after all the tests are run the `Teardown` method is called.
func Suite(t *testing.T, suite interface{}) {
t.Helper()
runSuite(t, suite, false)
}
// SuiteP like `Suite` but calls all test function in parallel.
func SuiteP(t *testing.T, suite interface{}) {
t.Helper()
runSuite(t, suite, true)
}
func runSuite(t *testing.T, s interface{}, parallel bool) {
t.Helper()
suite := reflect.ValueOf(s)
if k := suite.Kind(); k != reflect.Ptr && k != reflect.Interface {
t.Fatal("is.Suite: suite must be a ptr or interface")
}
suiteType := suite.Type()
var tests []*suiteTest
var setup = func() {}
var teardown = func() {}
var ok bool
for i := 0; i < suite.NumMethod(); i++ {
methodType := suiteType.Method(i)
method := suite.Method(i)
if !isExported(methodType) {
continue
}
if name := methodType.Name; strings.HasPrefix(name, "Test") {
if fn, ok := method.Interface().(func(Is)); ok {
tests = append(tests, &suiteTest{name: name, fn: fn})
continue
}
t.Logf("is.Suite: Skipping test function '%s' with incorrect method signature. Should be func(Is) ", name)
continue
} else if name == "Setup" {
if setup, ok = method.Interface().(func()); !ok {
t.Fatal("is.Suite: Setup function should be have no args and no return values")
}
} else if name == "Teardown" {
if teardown, ok = method.Interface().(func()); !ok {
t.Fatal("is.Suite: Teardown function should be have no args and no return values")
}
}
}
if len(tests) == 0 {
t.Fatalf("is.Suite: skipped suite '%s' with no tests", suiteType.Name())
}
setup()
for i := range tests {
test := tests[i]
t.Run(test.name, func(t *testing.T) {
t.Helper()
if parallel {
t.Parallel()
}
test.fn(New(t))
})
}
t.Cleanup(teardown)
} | suite.go | 0.564699 | 0.42316 | suite.go | starcoder |
package gridmap
import (
"geometry"
"sort"
)
const (
MAP_WIDTH = 7200 // 地图度(x)
MAP_HEIGHT = 7200 // 地图高(y)
MIST_BLOCK_SIZE = 90 // 迷雾块大小
MIST_CELL_SIZE = 15 // 迷雾格子大小
)
// 正方形左下角
func GetSquareBottom(center geometry.Coordinate, width int32) geometry.Coordinate {
var originX = center.X - width/2
if originX < 0 {
originX = 0
} else if originX >= MAP_WIDTH {
originX = MAP_WIDTH - 1
}
var originZ = center.Z - width/2
if originZ < 0 {
originZ = 0
} else if originZ >= MAP_HEIGHT {
originZ = MAP_HEIGHT - 1
}
return geometry.NewCoordinate(originX, originZ)
}
// 坐标所在的迷雾格子
func MistCellOfPoint(pt geometry.Coordinate) geometry.Coordinate {
var cellX = pt.X - (pt.X % MIST_CELL_SIZE)
var cellZ = pt.Z - (pt.Z % MIST_CELL_SIZE)
return geometry.Coordinate{X: cellX, Z: cellZ}
}
// 获取正方形在地图所占格子
func GetMistCellsBySquare(pos geometry.Coordinate, width int32) []geometry.Coordinate {
var bottomLeft = MistCellOfPoint(pos)
var bottomRight = MistCellOfPoint(geometry.Coordinate{X: pos.X + width, Z: pos.Z})
var count = (bottomRight.X - bottomLeft.X) / MIST_CELL_SIZE
if pos.X%MIST_CELL_SIZE > 0 {
count++
}
var cells = make([]geometry.Coordinate, 0, count*count)
for i := int32(0); i < count; i++ {
for j := int32(0); j < count; j++ {
var x = bottomLeft.X + i*MIST_CELL_SIZE
var z = bottomLeft.Z + j*MIST_CELL_SIZE
var pt = geometry.Coordinate{X: x, Z: z}
cells = append(cells, pt)
}
}
return cells
}
func GetMistCellsBySquareSlow(pos geometry.Coordinate, width int32) map[geometry.Coordinate]bool {
var cells = make(map[geometry.Coordinate]bool, 9)
for x := pos.X; x < pos.X+width; x += 1 {
for y := pos.Z; y < pos.Z+width; y += 1 {
var pt = MistCellOfPoint(geometry.Coordinate{X: x, Z: y})
cells[pt] = true
}
}
return cells
}
// 获取正方形在地图所占格子
func GetMistCellsBy(center geometry.Coordinate, width int32) map[geometry.Coordinate]bool {
var bottom = GetSquareBottom(center, width)
return GetMistCellsBySquareSlow(bottom, width)
}
func GetSortedMistCellsSlow(center geometry.Coordinate, width int32) []geometry.Coordinate {
var cells = GetMistCellsBy(center, width)
var slice = make([]geometry.Coordinate, 0, len(cells))
for pt, _ := range cells {
slice = append(slice, pt)
}
sort.Slice(slice, func(i, j int) bool {
if slice[i].X == slice[j].X {
return slice[i].Z < slice[j].Z
}
return slice[i].X < slice[j].X
})
return slice
}
func GetSortedMistCells(center geometry.Coordinate, width int32) []geometry.Coordinate {
var bottom = GetSquareBottom(center, width)
var cells = GetMistCellsBySquare(bottom, width)
sort.Slice(cells, func(i, j int) bool {
if cells[i].X == cells[j].X {
return cells[i].Z < cells[j].Z
}
return cells[i].X < cells[j].X
})
return cells
} | go/gridmap.go | 0.500488 | 0.446193 | gridmap.go | starcoder |
package routing
import (
"net/http"
)
const (
nodeTypeStatic = iota
nodeTypeDynamic
)
type tree struct {
root *node
}
func (t *tree) insert(chunks []chunk, handler http.HandlerFunc) *node {
root2, leaf2 := createTreeFromChunks(chunks)
leaf2.handler = handler
t.root = combine(t.root, root2)
return leaf2
}
func combine(tree1 *node, tree2 *node) *node {
if tree1 == nil {
return tree2
}
if tree2 == nil {
return tree1
}
if tree1.t == nodeTypeDynamic {
if tree2.t == nodeTypeDynamic && tree2.prefix == tree1.prefix {
if !tree1.regexpEquals(tree2) {
tree1.sibling = combine(tree1.sibling, tree2)
tree1.sibling.parent = tree1
return tree1
}
for k := range tree2.stops {
tree2.stops[k].parent = tree1
}
for k, next1 := range tree1.stops {
if next2, ok := tree2.stops[k]; !ok {
tree2.stops[k] = next1
} else {
tree2.stops[k] = combine(next1, next2)
}
}
tree1.stops = tree2.stops
if tree2.handler != nil {
tree1.handler = tree2.handler
}
return tree1
}
if tree2.t == nodeTypeDynamic && tree2.prefix != tree1.prefix {
tree1.sibling = combine(tree1.sibling, tree2)
tree1.sibling.parent = tree1.parent
return tree1
}
if tree2.t == nodeTypeStatic {
tree2.sibling = tree1
tree2.parent = tree1.parent
return tree2
}
}
if tree2.t == nodeTypeDynamic {
tree1.sibling = combine(tree1.sibling, tree2)
tree1.sibling.parent = tree1.parent
return tree1
}
pos := common(tree1.prefix, tree2.prefix)
if pos == 0 {
tree1.sibling = combine(tree1.sibling, tree2)
tree1.sibling.parent = tree1.parent
return tree1
}
if pos == len(tree1.prefix) && pos != len(tree2.prefix) {
tree2.prefix = tree2.prefix[pos:]
tree2.parent = tree1
tree1.child = combine(tree1.child, tree2)
return tree1
}
if pos != len(tree1.prefix) && pos == len(tree2.prefix) {
tree1.prefix = tree1.prefix[pos:]
tree2.sibling = tree1.sibling
tree1.sibling = nil
tree1.parent = tree2
tree2.child = combine(tree1, tree2.child)
return tree2
}
if pos != len(tree1.prefix) && pos != len(tree2.prefix) {
split := createNodeFromChunk(chunk{t: tChunkStatic, v: tree1.prefix[:pos]})
split.parent = tree1.parent
split.sibling = tree1.sibling
tree1.prefix = tree1.prefix[pos:]
tree1.parent = split
tree1.sibling = nil
tree2.prefix = tree2.prefix[pos:]
tree2.parent = split
split.child = combine(tree1, tree2)
return split
}
if tree2.handler != nil {
tree1.handler = tree2.handler
}
if tree1.child == nil && tree2.child == nil {
return tree1
}
tree1.child = combine(tree1.child, tree2.child)
tree1.child.parent = tree1
return tree1
}
func createTreeFromChunks(chunks []chunk) (root, leaf *node) {
if len(chunks) < 1 {
return nil, nil
}
root = createNodeFromChunk(chunks[0])
n := root
for i := 1; i < len(chunks); i++ {
newNode := createNodeFromChunk(chunks[i])
if n.t == nodeTypeDynamic {
n.stops[newNode.prefix[0]] = newNode
} else {
n.child = newNode
}
newNode.parent = n
n = newNode
}
return root, n
}
func createNodeFromChunk(c chunk) *node {
var n *node
if c.t == tChunkStatic {
n = &node{prefix: c.v, handler: nil, t: nodeTypeStatic}
} else {
stops := make(map[byte]*node)
n = &node{prefix: c.v, t: nodeTypeDynamic, handler: nil, stops: stops, regexp: c.exp}
}
return n
}
func (t *tree) find(request *http.Request) *node {
return find(t.root, request.URL.Path, request)
}
func find(n *node, p string, request *http.Request) *node {
if nil == n || len(p) == 0 {
return nil
}
if n.t == nodeTypeDynamic {
traversed := false
for i := 0; i < len(p); i++ {
if next, ok := n.stops[p[i]]; ok {
validExpression := true
if n.regexp != nil {
validExpression = n.regexp.MatchString(p[0:i])
}
if validExpression {
traversed = true
h := find(next, p[i:], request)
if nil != h && h.match(request) {
return h
}
}
}
if p[i] == '/' && !n.isCatchAll() {
return find(n.sibling, p, request)
}
}
if n.match(request) && !traversed {
validExpression := true
if n.regexp != nil {
validExpression = n.regexp.MatchString(p)
}
if validExpression {
return n
}
}
return find(n.sibling, p, request)
}
pos := common(p, n.prefix)
if pos == 0 {
return find(n.sibling, p, request)
}
if pos == len(p) && len(p) == len(n.prefix) {
if n.match(request) {
return n
}
return nil
}
h := find(n.child, p[pos:], request)
if nil != h && h.match(request) {
return h
}
for next := n.sibling; nil != next; next = next.sibling {
if next.t != nodeTypeDynamic {
continue
}
return find(next, p, request)
}
return nil
}
func common(s1, s2 string) int {
for k := 0; k < len(s1); k++ {
if k == len(s2) || s1[k] != s2[k] {
return k
}
}
return len(s1)
}
func calcWeight(n *node) int {
if n == nil {
return 0
}
n.w = 0
if n.handler != nil {
n.w++
}
if n.t == nodeTypeStatic {
n.w = n.w + calcWeight(n.child) + calcSiblingsWeight(n.child)
} else {
for _, c := range n.stops {
n.w = n.w + calcWeight(c) + calcSiblingsWeight(c)
}
}
return n.w
}
func calcSiblingsWeight(n *node) int {
if n == nil {
return 0
}
w := 0
s := n.sibling
for s != nil {
if s.t == nodeTypeStatic {
w = w + calcWeight(s)
} else {
for _, c := range s.stops {
w = w + calcWeight(c)
}
}
s = s.sibling
}
return w
}
func sortByWeight(head *node) *node {
var sorted *node
current := head
for current != nil {
next := current.sibling
if current.t == nodeTypeStatic {
current.child = sortByWeight(current.child)
} else {
for k, s := range current.stops {
current.stops[k] = sortByWeight(s)
}
}
sorted = sortInsertByWeight(sorted, current)
current = next
}
return sorted
}
func sortInsertByWeight(head *node, in *node) *node {
var current *node
if head == nil || head.w < in.w {
in.sibling = head
head = in
} else {
current = head
for current.sibling != nil && current.sibling.w >= in.w {
current = current.sibling
}
in.sibling = current.sibling
current.sibling = in
}
return head
} | tree.go | 0.604516 | 0.525612 | tree.go | starcoder |
package interpreter
import (
"github.com/fract-lang/fract/pkg/arithmetic"
"github.com/fract-lang/fract/pkg/fract"
"github.com/fract-lang/fract/pkg/grammar"
"github.com/fract-lang/fract/pkg/objects"
"github.com/fract-lang/fract/pkg/parser"
"github.com/fract-lang/fract/pkg/vector"
)
func compareValues(operator string, data0, data1 objects.DataFrame) bool {
if data0.Type != data1.Type && (data0.Type == fract.VALString || data1.Type == fract.VALString) {
return false
}
switch operator {
case grammar.Equals: // Equals.
if (data0.Type == fract.VALString && data0.Data != data1.Data) ||
(data0.Type != fract.VALString && arithmetic.ToArithmetic(data0.Data) != arithmetic.ToArithmetic(data1.Data)) {
return false
}
case grammar.NotEquals: // Not equals.
if (data0.Type == fract.VALString && data0.Data == data1.Data) ||
(data0.Type != fract.VALString && arithmetic.ToArithmetic(data0.Data) == arithmetic.ToArithmetic(data1.Data)) {
return false
}
case ">": // Greater.
if (data0.Type == fract.VALString && data0.Data <= data1.Data) ||
(data0.Type != fract.VALString && arithmetic.ToArithmetic(data0.Data) <= arithmetic.ToArithmetic(data1.Data)) {
return false
}
case "<": // Less.
if (data0.Type == fract.VALString && data0.Data >= data1.Data) ||
(data0.Type != fract.VALString && arithmetic.ToArithmetic(data0.Data) >= arithmetic.ToArithmetic(data1.Data)) {
return false
}
case grammar.GreaterEquals: // Greater or equals.
if (data0.Type == fract.VALString && data0.Data < data1.Data) ||
(data0.Type != fract.VALString && arithmetic.ToArithmetic(data0.Data) < arithmetic.ToArithmetic(data1.Data)) {
return false
}
case grammar.LessEquals: // Less or equals.
if (data0.Type == fract.VALString && data0.Data > data1.Data) ||
(data0.Type != fract.VALString && arithmetic.ToArithmetic(data0.Data) > arithmetic.ToArithmetic(data1.Data)) {
return false
}
}
return true
}
func compare(value0, value1 objects.Value, operator string) bool {
// String comparison.
if !value0.Array || !value1.Array {
data0 := value0.Content[0]
data1 := value1.Content[0]
if (data0.Type == fract.VALString && data1.Type != fract.VALString) ||
(data0.Type != fract.VALString && data1.Type == fract.VALString) {
return false
}
return compareValues(operator, data0, data1)
}
// Array comparison.
if value0.Array || value1.Array {
if (value0.Array && !value1.Array) || (!value0.Array && value1.Array) {
return false
}
if len(value0.Content) != len(value1.Content) {
return operator == grammar.NotEquals
}
for index, val0Content := range value0.Content {
if !compareValues(operator, val0Content, value1.Content[index]) {
return false
}
}
return true
}
// Single value comparison.
return compareValues(operator, value0.Content[0], value1.Content[0])
}
// processCondition returns condition result.
func (i *Interpreter) processCondition(tokens []objects.Token) string {
i.processRange(&tokens)
TRUE := objects.Value{Content: []objects.DataFrame{{Data: grammar.KwTrue}}}
// Process condition.
ors := parser.DecomposeConditionalProcess(tokens, grammar.LogicalOr)
for _, or := range *ors {
// Decompose and conditions.
ands := parser.DecomposeConditionalProcess(or, grammar.LogicalAnd)
// Is and long statement?
if len(*ands) > 1 {
for _, and := range *ands {
operatorIndex, operator := parser.FindConditionOperator(and)
// Operator is not found?
if operatorIndex == -1 {
if compare(i.processValue(and), TRUE, grammar.Equals) {
return grammar.KwTrue
}
continue
}
// Operator is first or last?
if operatorIndex == 0 {
fract.Error(and[0], "Comparison values are missing!")
} else if operatorIndex == len(and)-1 {
fract.Error(and[len(and)-1], "Comparison values are missing!")
}
if !compare(
i.processValue(*vector.Sublist(and, 0, operatorIndex)),
i.processValue(*vector.Sublist(and, operatorIndex+1, len(and)-operatorIndex-1)),
operator) {
return grammar.KwFalse
}
}
return grammar.KwTrue
}
operatorIndex, operator := parser.FindConditionOperator(or)
// Operator is not found?
if operatorIndex == -1 {
if compare(i.processValue(or), TRUE, grammar.Equals) {
return grammar.KwTrue
}
continue
}
// Operator is first or last?
if operatorIndex == 0 {
fract.Error(or[0], "Comparison values are missing!")
} else if operatorIndex == len(or)-1 {
fract.Error(or[len(or)-1], "Comparison values are missing!")
}
if compare(
i.processValue(*vector.Sublist(or, 0, operatorIndex)),
i.processValue(*vector.Sublist(or, operatorIndex+1, len(or)-operatorIndex-1)),
operator) {
return grammar.KwTrue
}
}
return grammar.KwFalse
} | internal/interpreter/process_condition.go | 0.534127 | 0.466481 | process_condition.go | starcoder |
package nums
import (
"math"
)
const (
minTValue = 0.0
halfTVaue = 0.5
maxTValue = 1.0
)
var (
// MinT is the smallest T parameter
MinT = TParam{minTValue}
// HalfT is the average between min and max T values
HalfT = TParam{halfTVaue}
// MaxT is the biggest T Parameter
MaxT = TParam{maxTValue}
)
/*
A TParam is a parameter which takes values between in the range [0, 1].
*/
type TParam struct {
value float64
}
/* <-- Construction --> */
/*
MakeTParam returns a new T parameter with the given value.
If the value is out of range, it is approximated to the closest end.
*/
func MakeTParam(value float64) TParam {
switch {
case value < minTValue:
return MinT
case value > maxTValue:
return MaxT
default:
return TParam{value}
}
}
/*
AverageT creates a new T parameter which value is the average of the given two.
*/
func AverageT(a, b TParam) TParam {
return MakeTParam(0.5 * (a.value + b.value))
}
/* <-- Methods --> */
/*
Equals compares the given t parameters and returns true if their values are equal.
*/
func (t TParam) Equals(other TParam) bool {
return FloatsEqual(t.value, other.value)
}
/*
DistanceTo computes the distance between the values of two T parameters.
*/
func (t TParam) DistanceTo(other TParam) float64 {
return math.Abs(t.value - other.value)
}
/*
IsGreaterThan returns true if this t parameter's value is greater than the other's.
*/
func (t TParam) IsGreaterThan(other TParam) bool {
return t.value > other.value
}
/*
IsLessThan returns true if this t parameter's value is smaller than the other's.
*/
func (t TParam) IsLessThan(other TParam) bool {
return t.value < other.value
}
/* <-- Properties --> */
/*
IsMin returns true if this T parameter's value is the minimum value allowed.
*/
func (t TParam) IsMin() bool {
return FloatsEqual(t.value, minTValue)
}
/*
IsMax returns true if this T parameter's value is the maximum value allowed.
*/
func (t TParam) IsMax() bool {
return FloatsEqual(t.value, maxTValue)
}
/*
IsExtreme returns true if this T parameter's value is either minimum or maximum.
*/
func (t TParam) IsExtreme() bool {
return t.IsMax() || t.IsMin()
}
/*
Value returns the value of the parameter.
*/
func (t TParam) Value() float64 {
return t.value
}
/* <-- Functions --> */
/*
SubTParamRangeTimes subdivides a given range of t parameters a given number of times,
resulting in a times + 1 size slice.
*/
func SubTParamRangeTimes(startT, endT TParam, times int) []TParam {
tParams := make([]TParam, times+1)
step := startT.DistanceTo(endT) / float64(times)
if startT.IsLessThan(endT) {
tParams[0] = startT
tParams[times] = endT
} else {
tParams[0] = endT
tParams[times] = startT
}
for i := 1; i < times; i++ {
tParams[i] = TParam{tParams[i-1].Value() + step}
}
return tParams
}
/*
SubTParamCompleteRangeTimes subdivides the entire range of [t_min, t_max] a
given number of times.
*/
func SubTParamCompleteRangeTimes(times int) []TParam {
return SubTParamRangeTimes(MinT, MaxT, times)
} | nums/tparam.go | 0.746971 | 0.444625 | tparam.go | starcoder |
package dotmatrix
import (
"fmt"
"image"
"image/color"
"image/draw"
"io"
)
// Flushes an image to the io.Writer. E.g. by using braille characters.
type Flusher interface {
Flush(w io.Writer, img image.Image) error
}
// Filter may alter an image in any way, including resizing it.
// It is applied prior to drawing the image in the dotmatrix palette.
type Filter interface {
Filter(image.Image) image.Image
}
type noop struct{}
func (noop) Filter(img image.Image) image.Image {
return img
}
type Config struct {
Filter Filter
Flusher Flusher
Drawer draw.Drawer
// Reset is invoked between animated frames of an image. It can be used to
// apply custom cursor positioning.
Reset func(w io.Writer, rows int)
}
var defaultConfig = Config{
Filter: noop{},
Flusher: BrailleFlusher{},
Drawer: draw.FloydSteinberg,
}
func mergeConfig(c *Config) Config {
if c == nil {
return defaultConfig
}
if c.Filter == nil {
c.Filter = defaultConfig.Filter
}
if c.Drawer == nil {
c.Drawer = defaultConfig.Drawer
}
if c.Flusher == nil {
c.Flusher = defaultConfig.Flusher
}
if c.Reset == nil {
c.Reset = func(w io.Writer, rows int) {
fmt.Fprintf(w, "\033[999D\033[%dA", rows)
}
}
return *c
}
var defaultPalette = []color.Color{color.Black, color.White, color.Transparent}
type Printer struct {
w io.Writer
c Config
}
func Print(w io.Writer, img image.Image) error {
return NewPrinter(w, &defaultConfig).Print(img)
}
// NewPrinter provides an Printer. If drawer is nil, draw.FloydSteinberg is used.
func NewPrinter(w io.Writer, c *Config) *Printer {
return &Printer{
w: w,
c: mergeConfig(c),
}
}
/*
Print prints the image as a series of braille and line feed characters and writes
to w. Braille symbols are useful for representing monochrome images
because any 2x4 pixel area can be represented by one of unicode's
256 braille symbols. See: https://en.wikipedia.org/wiki/Braille_Patterns
Each pixel of the image is converted to either black or white by redrawing the
image using the printer's drawer (Floyd Steinberg diffusion, by default) and a
3-color palette of black, white, and transparent. Finally, each 2x4 pixel block
is printed as a braille symbol.
As an example, this output was printed from a 134px by 108px image of Saturn:
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⡿⡻⡫⡫⡣⣣⢣⢇⢧⢫⢻⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⡟⡟⣝⣜⠼⠼⢚⢚⢚⠓⠷⣧⣇⠧⡳⡱⣻⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⡟⣏⡧⠧⠓⠍⡂⡂⠅⠌⠄⠄⠄⡁⠢⡈⣷⡹⡸⣪⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⢿⠿⢿⢿⢿⢟⢏⡧⠗⡙⡐⡐⣌⢬⣒⣖⣼⣼⣸⢸⢐⢁⠂⡐⢰⡏⣎⢮⣾⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣽⣾⣶⣿⢿⢻⡱⢕⠋⢅⠢⠱⢼⣾⣾⣿⣿⣿⣿⣿⣿⣿⡇⡇⠢⢁⢂⡯⡪⣪⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⢟⠏⢎⠪⠨⡐⠔⠁⠁⠀⠀⠀⠙⢿⣿⣿⣿⣿⣿⣿⣿⢱⠡⡁⣢⢏⢮⣾⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⢟⢍⢆⢃⢑⠤⠑⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠙⣿⣿⣿⣿⡿⡱⢑⢐⢼⢱⣵⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⢿⢫⡱⢊⢂⢢⠢⡃⠌⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠘⣿⣿⢟⢑⢌⢦⢫⣪⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⡻⡱⡑⢅⢢⣢⣳⢱⢑⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠹⡑⡑⡴⡹⣼⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⢟⢝⠜⠨⡐⣴⣵⣿⣗⡧⡣⠢⢈⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣜⢎⣷⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⡫⡱⠑⡁⣌⣮⣾⣿⣿⣿⣟⡮⡪⡪⡐⠠⠀⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡟⢏⠜⠌⠄⣕⣼⣿⣿⣿⣿⣿⣿⣯⡯⣎⢖⠌⠌⠄⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢨⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⢟⢕⠕⢁⠡⣸⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⡽⡮⡪⡪⠨⡂⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⢟⢕⠕⢁⢐⢔⣽⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⢽⡱⡱⡑⡠⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣸⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⢟⢕⠕⢁⢐⢰⣼⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣟⣞⢜⠔⢄⠡⠀⠀⠀⠀⠀⠀⠀⠀⠀⣼⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⡿⡹⡰⠃⢈⠠⣢⣿⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡮⣇⢏⢂⠢⠀⠀⠀⠀⠀⠀⠀⣠⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⢫⢒⡜⠐⠀⢢⣱⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣳⢕⢕⠌⠄⡀⠀⠀⢀⣤⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⡿⡑⣅⠗⠀⡀⣥⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠟⢙⠙⠿⣿⣿⣿⣿⣿⣿⣿⣿⣯⢮⡪⣂⣢⣬⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⡟⡜⢌⡞⡀⣡⣾⣿⣿⣿⣿⣿⣿⣿⡿⠛⠉⢀⡠⠔⢜⣱⣴⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⡿⡸⡘⢜⣧⣾⣿⣿⣿⣿⣿⣿⠿⢛⡡⠤⡒⢪⣑⣬⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⡇⡇⡣⣷⣿⣿⣿⣿⣿⠿⡛⡣⡋⣕⣬⣶⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣮⣺⣿⣿⣟⣻⣩⣢⣵⣾⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿⠿
*/
func (p *Printer) Print(img image.Image) error {
img = redraw(img, p.c.Filter, p.c.Drawer)
return flush(p.w, img, p.c.Flusher)
}
func redraw(img image.Image, filter Filter, drawer draw.Drawer) *image.Paletted {
origBounds := img.Bounds()
img = filter.Filter(img)
newBounds := img.Bounds()
scaleX := float64(newBounds.Dx()) / float64(origBounds.Dx())
scaleY := float64(newBounds.Dy()) / float64(origBounds.Dy())
// The offset is important because not all images have bounds starting at (0, 0), and
// the filter may accidentally zero the min bounding point.
offset := image.Pt(int(float64(origBounds.Min.X)*scaleX), int(float64(origBounds.Min.Y)*scaleY))
// Create a new paletted image using a monochrome+transparent color palette.
paletted := image.NewPaletted(img.Bounds(), defaultPalette)
paletted.Rect = paletted.Bounds().Add(offset)
drawer.Draw(paletted, paletted.Bounds(), img, img.Bounds().Min)
return paletted
}
func flush(w io.Writer, img image.Image, flusher Flusher) error {
return flusher.Flush(w, img)
} | image.go | 0.664323 | 0.414958 | image.go | starcoder |
package vision
import (
"image"
"image/draw"
"math"
"github.com/joaowiciuk/vision/kernel"
"github.com/joaowiciuk/matrix"
)
// Canny implements the popular canny edge detector
func Canny(img image.Image, upperThreshold, lowerThreshold uint8, k int, σ float64) (j *image.Gray) {
lowT := float64(lowerThreshold)
uppT := float64(upperThreshold)
//Convert the input image to a single src matrix
tensor := Im2Mat(img)
var src *matrix.Matrix
switch len(tensor) {
case 1:
src = Im2Mat(img)[0]
case 3, 4:
src = matrix.New(img.Bounds().Dy(), img.Bounds().Dx())
src.Law(func(r, c int) float64 {
return 0.299*(*tensor[0])[r][c] + 0.587*(*tensor[1])[r][c] + 0.114*(*tensor[2])[r][c]
})
default:
return nil
}
m, n := src.Size()
//Output matrix
out := matrix.New(m, n)
//Preprocessing to obtain magnitude and angle from source matrix
ang := matrix.New(m, n)
mag := matrix.New(m, n)
preProc(m, n, mag, ang, src, k, σ)
/* mag0, ang0 := grad(src, k)
fmt.Println(mag.Dist(mag0, matrix.Norm1))
fmt.Println(ang.Dist(ang0, matrix.Norm1)) */
//Non-maximum suppression
nonMaxSup(m, n, out, mag, ang, uppT)
//Hysterysis threshold
hystThresh(m, n, out, mag, ang, lowT)
aux := *Mat2Im([]*matrix.Matrix{out})
j = image.NewGray(aux.Bounds())
draw.Draw(j, j.Bounds(), aux, j.Bounds().Min, draw.Src)
return
}
func sobel(k int, B *matrix.Matrix) (magX, magY *matrix.Matrix) {
dx := kernel.SobelX(k)
dy := kernel.SobelY(k)
ma, na := dx.Size()
mb, nb := B.Size()
cxa, cya := dx.Center()
top, left, bottom, right := cya, cxa, ma-cya-1, na-cxa-1
x := func(c, r int) float64 {
if c >= 0 && c <= nb-1 && r >= 0 && r <= mb-1 {
return (*B)[r][c] //Inside
} else if c < 0 && r >= 0 && r <= mb-1 {
return (*B)[r][0] //Left
} else if r < 0 && c >= 0 && c <= nb-1 {
return (*B)[0][c] //Top
} else if c > nb-1 && r >= 0 && r <= mb-1 {
return (*B)[r][nb-1] //Right
} else if r > mb-1 && c >= 0 && c <= nb-1 {
return (*B)[mb-1][c] //Bottom
} else if c < 0 && r > mb-1 {
return (*B)[mb-1][0] //Bottom left corner
} else if c < 0 && r < 0 {
return (*B)[0][0] //Top left corner
} else if c > nb-1 && r < 0 {
return (*B)[0][nb-1] //Top right corner
} else {
return (*B)[mb-1][nb-1] //Bottom right corner
}
}
h := func(c, r int) (h1 float64, h2 float64) {
m, n := -r+cya, -c+cxa
if n < 0 || m < 0 {
/* fmt.Printf("h[%d, %d] = %.2f\n", c, r, 0.) */
return 1, 1
}
if n >= na || m >= ma {
/* fmt.Printf("h[%d, %d] = %.2f\n", c, r, 0.) */
return 1, 1
}
/* fmt.Printf("h[%d, %d] = %.2f\n", c, r, (*A)[m][n]) */
return (*dx)[m][n], (*dy)[m][n]
}
y := func(c, r int) (y1 float64, y2 float64) {
r0, r1 := r-top, r+bottom
c0, c1 := c-left, c+right
sum1 := 0.
sum2 := 0.
for j := r0; j <= r1; j++ {
for i := c0; i <= c1; i++ {
h1, h2 := h(c-i, r-j)
sum1 += x(i, j) * h1
sum2 += x(i, j) * h2
}
}
return sum1, sum2
}
magX, magY = matrix.New(mb, nb), matrix.New(mb, nb)
for r := 0; r < mb; r++ {
for c := 0; c < nb; c++ {
(*magX)[r][c], (*magY)[r][c] = y(c, r)
}
}
return
}
func sobelFast(k int, B *matrix.Matrix, c chan func() (*matrix.Matrix, *matrix.Matrix)) {
c <- (func() (*matrix.Matrix, *matrix.Matrix) {
dx, dy := sobel(k, B)
return dx, dy
})
}
func grad(src *matrix.Matrix, k int) (mag, ang *matrix.Matrix) {
dx := kernel.SobelX(k)
dy := kernel.SobelY(k)
ma, na := dx.Size()
mb, nb := src.Size()
cxa, cya := dx.Center()
top, left, bottom, right := cya, cxa, ma-cya-1, na-cxa-1
x := func(c, r int) float64 {
if c >= 0 && c <= nb-1 && r >= 0 && r <= mb-1 {
return (*src)[r][c] //Inside
} else if c < 0 && r >= 0 && r <= mb-1 {
return (*src)[r][0] //Left
} else if r < 0 && c >= 0 && c <= nb-1 {
return (*src)[0][c] //Top
} else if c > nb-1 && r >= 0 && r <= mb-1 {
return (*src)[r][nb-1] //Right
} else if r > mb-1 && c >= 0 && c <= nb-1 {
return (*src)[mb-1][c] //Bottom
} else if c < 0 && r > mb-1 {
return (*src)[mb-1][0] //Bottom left corner
} else if c < 0 && r < 0 {
return (*src)[0][0] //Top left corner
} else if c > nb-1 && r < 0 {
return (*src)[0][nb-1] //Top right corner
} else {
return (*src)[mb-1][nb-1] //Bottom right corner
}
}
h := func(c, r int) (h1 float64, h2 float64) {
m, n := -r+cya, -c+cxa
if n < 0 || m < 0 {
/* fmt.Printf("h[%d, %d] = %.2f\n", c, r, 0.) */
return 1, 1
}
if n >= na || m >= ma {
/* fmt.Printf("h[%d, %d] = %.2f\n", c, r, 0.) */
return 1, 1
}
/* fmt.Printf("h[%d, %d] = %.2f\n", c, r, (*A)[m][n]) */
return (*dx)[m][n], (*dy)[m][n]
}
y := func(c, r int) (y1 float64, y2 float64) {
r0, r1 := r-top, r+bottom
c0, c1 := c-left, c+right
sum1 := 0.
sum2 := 0.
for j := r0; j <= r1; j++ {
for i := c0; i <= c1; i++ {
h1, h2 := h(c-i, r-j)
sum1 += x(i, j) * h1
sum2 += x(i, j) * h2
}
}
return sum1, sum2
}
mag, ang = matrix.New(mb, nb), matrix.New(mb, nb)
for r := 0; r < mb; r++ {
for c := 0; c < nb; c++ {
h, v := y(c, r)
(*mag)[r][c] = math.Hypot(h, v)
(*ang)[r][c] = math.Atan2(v, h) * 180 / math.Pi
for (*ang)[r][c] < 0 {
(*ang)[r][c] += 180
}
}
}
return
}
func nonMaxSup(m, n int, out, mag, ang *matrix.Matrix, uppT float64) {
for x := 0; x < n; x++ {
for y := 0; y < m; y++ {
ang0 := (*ang)[y][x]
if (*mag)[y][x] < uppT {
continue
}
flag := true
if ang0 > 112.5 && ang0 <= 157.5 {
if y > 0 && x < n-1 && (*mag)[y][x] <= (*mag)[y-1][x+1] {
flag = false
}
if y < m-1 && x > 0 && (*mag)[y][x] <= (*mag)[y+1][x-1] {
flag = false
}
} else if ang0 > 67.5 && ang0 <= 112.5 {
if y > 0 && (*mag)[y][x] <= (*mag)[y-1][x] {
flag = false
}
if y < m-1 && (*mag)[y][x] <= (*mag)[y+1][x] {
flag = false
}
} else if ang0 > 22.5 && ang0 <= 67.5 {
if y > 0 && x > 0 && (*mag)[y][x] <= (*mag)[y-1][x-1] {
flag = false
}
if y < m-1 && x < n-1 && (*mag)[y][x] <= (*mag)[y+1][x+1] {
flag = false
}
} else {
if x > 0 && (*mag)[y][x] <= (*mag)[y][x-1] {
flag = false
}
if x < n-1 && (*mag)[y][x] <= (*mag)[y][x+1] {
flag = false
}
}
if flag {
(*out)[y][x] = 255.
}
}
}
}
func hystThresh(m, n int, out, mag, ang *matrix.Matrix, lowT float64) {
imageChanged := true
i := 0
for imageChanged {
imageChanged = false
i++
for x := 0; x < n; x++ {
for y := 0; y < m; y++ {
if x < 2 || x >= n-2 || y < 2 || y >= m-2 {
continue
}
ang0 := (*ang)[y][x]
if (*out)[y][x] == 255. {
(*out)[y][x] = 64.
if ang0 > 112.5 && ang0 <= 157.5 {
if y > 0 && x > 0 {
if lowT <= (*mag)[y-1][x-1] &&
(*out)[y-1][x-1] != 64. &&
(*ang)[y-1][x-1] > 112.5 &&
(*ang)[y-1][x-1] <= 157.5 &&
(*mag)[y-1][x-1] > (*mag)[y-2][x] &&
(*mag)[y-1][x-1] > (*mag)[y][x-2] {
(*out)[y-1][x-1] = 255.
imageChanged = true
}
}
if y < m-1 && x < n-1 {
if lowT <= (*mag)[y+1][x+1] &&
(*out)[y+1][x+1] != 64. &&
(*ang)[y+1][x+1] > 112.5 &&
(*ang)[y+1][x+1] <= 157.5 &&
(*mag)[y+1][x+1] > (*mag)[y+2][x] &&
(*mag)[y+1][x+1] > (*mag)[y][x+2] {
(*out)[y+1][x+1] = 255.
imageChanged = true
}
}
} else if ang0 > 67.5 && ang0 <= 112.5 {
if x > 0 {
if lowT <= (*mag)[y][x-1] &&
(*out)[y][x-1] != 64. &&
(*ang)[y][x-1] > 67.5 &&
(*ang)[y][x-1] <= 112.5 &&
(*mag)[y][x-1] > (*mag)[y-1][x-1] &&
(*mag)[y][x-1] > (*mag)[y+1][x-1] {
(*out)[y][x-1] = 255.
imageChanged = true
}
}
if x < n-1 {
if lowT <= (*mag)[y][x+1] &&
(*out)[y][x+1] != 64. &&
(*ang)[y][x+1] > 67.5 &&
(*ang)[y][x+1] <= 112.5 &&
(*mag)[y][x+1] > (*mag)[y-1][x+1] &&
(*mag)[y][x+1] > (*mag)[y+1][x+1] {
(*out)[y][x+1] = 255.
imageChanged = true
}
}
} else if ang0 > 22.5 && ang0 <= 67.5 {
if y > 0 && x < n-1 {
if lowT <= (*mag)[y-1][x+1] &&
(*out)[y-1][x+1] != 64. &&
(*ang)[y-1][x+1] > 22.5 &&
(*ang)[y-1][x+1] <= 67.5 &&
(*mag)[y-1][x+1] > (*mag)[y-2][x] &&
(*mag)[y-1][x+1] > (*mag)[y][x+2] {
(*out)[y-1][x+1] = 255.
imageChanged = true
}
}
if y < m-1 && x > 0 {
if lowT <= (*mag)[y+1][x-1] &&
(*out)[y+1][x-1] != 64. &&
(*ang)[y+1][x-1] > 22.5 &&
(*ang)[y+1][x-1] <= 67.5 &&
(*mag)[y+1][x-1] > (*mag)[y][x-2] &&
(*mag)[y+1][x-1] > (*mag)[y+2][x] {
(*out)[y+1][x-1] = 255.
imageChanged = true
}
}
} else {
if y > 0 {
if lowT <= (*mag)[y-1][x] &&
(*out)[y-1][x] != 64. &&
(*ang)[y-1][x] < 22.5 &&
(*ang)[y-1][x] >= 157.5 &&
(*mag)[y-1][x] > (*mag)[y-1][x-1] &&
(*mag)[y-1][x] > (*mag)[y-1][x+2] {
(*out)[y-1][x] = 255.
imageChanged = true
}
}
if y < m-1 {
if lowT <= (*mag)[y+1][x] &&
(*out)[y+1][x] != 64. &&
(*ang)[y+1][x] < 22.5 &&
(*ang)[y+1][x] >= 157.5 &&
(*mag)[y+1][x] > (*mag)[y+1][x-1] &&
(*mag)[y+1][x] > (*mag)[y+1][x+1] {
(*out)[y+1][x] = 255.
imageChanged = true
}
}
}
}
}
}
}
//Reassign
for x := 0; x < n; x++ {
for y := 0; y < m; y++ {
if (*out)[y][x] == 64. {
(*out)[y][x] = 255.
}
}
}
}
func preProc(m, n int, mag, ang, src *matrix.Matrix, k int, σ float64) {
srcFuture := kernel.Gaussian(k, σ).ConvFast(src)
/* srcFuture := make(chan *matrix.Matrix, 1)
srcFuture <- src */
c := make(chan func() (*matrix.Matrix, *matrix.Matrix))
go sobelFast(k, <-srcFuture, c)
magX, magY := (<-c)()
for r := 0; r < m; r++ {
for c := 0; c < n; c++ {
(*mag)[r][c] = math.Hypot((*magX)[r][c], (*magY)[r][c])
(*ang)[r][c] = math.Atan2((*magY)[r][c], (*magX)[r][c]) * 180 / math.Pi
for (*ang)[r][c] < 0 {
(*ang)[r][c] += 180
}
}
}
return
} | canny.go | 0.651022 | 0.551332 | canny.go | starcoder |
package model
var RuleCategories = make(map[string]*RuleCategory)
var RuleCategoriesOrdered []*RuleCategory
func init() {
RuleCategories[CategoryExamples] = &RuleCategory{
Id: CategoryExamples,
Name: "Examples",
Description: "Examples help consumers understand how API calls should look. They are really important for" +
"automated tooling for mocking and testing. These rules check examples have been added to component schemas, " +
"parameters and operations. These rules also check that examples match the schema and types provided.",
}
RuleCategories[CategoryOperations] = &RuleCategory{
Id: CategoryOperations,
Name: "Operations",
Description: "Operations are the core of the contract, they define paths and HTTP methods. These rules check" +
" operations have been well constructed, looks for operationId, parameter, schema and return types in depth.",
}
RuleCategories[CategoryInfo] = &RuleCategory{
Id: CategoryInfo,
Name: "Contract Information",
Description: "The info object contains licencing, contact, authorship details and more. Checks to confirm " +
"required details have been completed.",
}
RuleCategories[CategoryDescriptions] = &RuleCategory{
Id: CategoryDescriptions,
Name: "Descriptions",
Description: "Documentation is really important, in OpenAPI, just about everything can and should have a " +
"description. This set of rules checks for absent descriptions, poor quality descriptions (copy/paste)," +
" or short descriptions.",
}
RuleCategories[CategorySchemas] = &RuleCategory{
Id: CategorySchemas,
Name: "Schemas",
Description: "Schemas are how request bodies and response payloads are defined. They define the data going in " +
"and the data flowing out of an operation. These rules check for structural validity, checking types, checking" +
"required fields and validating correct use of structures.",
}
RuleCategories[CategorySecurity] = &RuleCategory{
Id: CategorySecurity,
Name: "Security",
Description: "Security plays a central role in RESTful APIs. These rules make sure that the correct definitions" +
"have been used and put in the right places.",
}
RuleCategories[CategoryTags] = &RuleCategory{
Id: CategoryTags,
Name: "Tags",
Description: "Tags are used as meta-data for operations. They are mainly used by tooling as a taxonomy mechanism" +
" to build navigation, search and more. Tags are important as they help consumers navigate the contract when " +
"using documentation, testing, code generation or analysis tools.",
}
RuleCategories[CategoryValidation] = &RuleCategory{
Id: CategoryValidation,
Name: "Validation",
Description: "Validation rules make sure that certain characters or patterns have not been used that may cause" +
"issues when rendering in different types of applications.",
}
RuleCategories[CategoryAll] = &RuleCategory{
Id: CategoryAll,
Name: "All Categories",
Description: "All the categories, for those who like a party.",
}
RuleCategoriesOrdered = append(RuleCategoriesOrdered,
RuleCategories[CategoryInfo],
RuleCategories[CategoryOperations],
RuleCategories[CategoryTags],
RuleCategories[CategorySchemas],
RuleCategories[CategoryValidation],
RuleCategories[CategoryDescriptions],
RuleCategories[CategorySecurity],
RuleCategories[CategoryExamples],
)
} | model/rule_categories.go | 0.531696 | 0.414721 | rule_categories.go | starcoder |
package mqv
import (
"math/big"
"math/bits"
)
// SubtleIntSize returns the size of a SubtleInt that can store at least
// numBits of information.
func SubtleIntSize(numBits int) int {
const wordSize = bits.UintSize / 8
numBytes := ((numBits + 7) >> 3)
numWords := (numBytes + wordSize - 1) / wordSize
return numWords
}
// SubtleInt represents a non-negative big integer with a fixed size. All
// operations on this integer are performed in constant time.
type SubtleInt []uint
// Add sets z to the sum x+y and returns the carry.
func (z SubtleInt) Add(x, y SubtleInt) uint {
if len(x) != len(y) || len(x) != len(z) {
panic("size mismatch")
}
var c uint
for i := range x {
z[i], c = addW(x[i], y[i], c)
}
return c
}
// Sub sets z to the difference x-y and returns the borrow.
func (z SubtleInt) Sub(x, y SubtleInt) uint {
if len(x) != len(y) || len(x) != len(z) {
panic("size mismatch")
}
var c uint
for i := range x {
z[i], c = subW(x[i], y[i], c)
}
return c
}
// AddMod sets z to x+y mod n. Both parameters x and y must be less than n.
func (z SubtleInt) AddMod(x, y, n SubtleInt) {
tmp := make(SubtleInt, len(x))
c1 := z.Add(x, y)
c2 := tmp.Sub(z, n)
if c1&^c2 == 1 {
panic("can not happen")
}
z.Select(c1^c2, z, tmp)
}
// Select sets z to x if p = 1 and y if p = 0.
func (z SubtleInt) Select(p uint, x, y SubtleInt) {
if len(x) != len(y) || len(x) != len(z) {
panic("size mismatch")
}
for i := range x {
z[i] = selectW(p, x[i], y[i])
}
}
// Less returns 1 if z < y and 0 otherwise.
func (z SubtleInt) Less(y SubtleInt) uint {
if len(z) != len(y) {
panic("size mismatch")
}
undecided := uint(1)
isLess := uint(0)
for i := len(z) - 1; i >= 0; i-- {
less1 := lessW(z[i], y[i])
less2 := lessW(y[i], z[i])
notequal := less1 | less2
isLess = selectW(undecided¬equal, less1, isLess)
undecided &= ^notequal
}
return isLess
}
// SetZero sets z to zero.
func (z SubtleInt) SetZero() {
for i := range z {
z[i] = 0
}
}
// SetBytes interprets buf as a big-endian byte slice and sets z to this value.
func (z SubtleInt) SetBytes(buf []byte) {
z.SetZero()
i, s := len(z)-1, uint(bits.UintSize)
for _, x := range buf {
s -= 8
z[i] |= uint(x) << s
if s == 0 {
s = bits.UintSize
i--
}
}
}
// Bytes returns the value of z as a big-endian byte slice.
func (z SubtleInt) Bytes() []byte {
const sizeBytes = bits.UintSize / 8
r := make([]byte, len(z)*sizeBytes)
i := len(r) - 1
for _, x := range z {
for s := uint(0); s < bits.UintSize; s += 8 {
r[i] = uint8(x >> s)
i--
}
}
return r
}
// Big converts the integer z to a big.Int.
func (z SubtleInt) Big() *big.Int {
return new(big.Int).SetBytes(z.Bytes())
}
// String returns the value of z.
func (z SubtleInt) String() string {
return z.Big().String()
}
// selectW returns a if v is 1 and b if v is 0.
func selectW(v, a, b uint) uint {
return ^(v-1)&a | (v-1)&b
}
// lessEqW returns 1 if a <= b and 0 if a > b.
func lessEqW(a, b uint) uint {
msbA := (a >> (bits.UintSize - 1)) & 1
msbB := (b >> (bits.UintSize - 1)) & 1
remA := a &^ (1 << (bits.UintSize - 1))
remB := b &^ (1 << (bits.UintSize - 1))
less := ((remA - remB - 1) >> (bits.UintSize - 1)) & 1
return selectW((msbA^msbB)&1, msbB, less)
}
// lessW returns 1 if a < b and 0 if a >= b.
func lessW(a, b uint) uint {
return lessEqW(b, a) ^ 1
}
// z1<<_W + z0 = a+b+c, with c == 0 or 1
func addW(a, b, c uint) (z0, z1 uint) {
bc := b + c
z0 = a + bc
z1 = lessW(z0, a) | lessW(bc, b)
return
}
// z1<<_W + z0 = a-b-c, with c == 0 or 1
func subW(a, b, c uint) (z0, z1 uint) {
bc := b + c
z0 = a - bc
z1 = lessW(a, z0) | lessW(bc, b)
return
} | subtle.go | 0.756987 | 0.517693 | subtle.go | starcoder |
package hier
import "strconv"
// Graph is the basic graph
type Graph struct {
Nodes Nodes
// Ranking
ByRank []Nodes
}
// ID is an unique identifier to a Node
type ID int
// Node is the basic information about a node
type Node struct {
ID ID
Virtual bool
In Nodes
Out Nodes
Label string
// Rank info
Rank int
// Ordering info
Coef float32
GridX float32
// Visuals
Center Vector
Radius Vector
}
// String returns node label
func (node *Node) String() string {
if node.Label == "" && node.Virtual {
return "v" + strconv.Itoa(int(node.ID))
}
if node.Label == "" {
return "#" + strconv.Itoa(int(node.ID))
}
return node.Label
}
// InDegree returns count of inbound edges
func (node *Node) InDegree() int { return len(node.In) }
// OutDegree returns count of outbound edges
func (node *Node) OutDegree() int { return len(node.Out) }
// Vector represents a 2D vector
type Vector struct {
X, Y float32
}
// NewGraph creates an empty graph
func NewGraph() *Graph { return &Graph{} }
// ensureNode adds nodes until we have reached id
func (graph *Graph) ensureNode(id int) {
for id >= len(graph.Nodes) {
graph.AddNode()
}
}
// NodeCount returns count of nodes
func (graph *Graph) NodeCount() int { return len(graph.Nodes) }
// AddNode adds a new node and returns it's ID
func (graph *Graph) AddNode() *Node {
node := &Node{ID: ID(len(graph.Nodes))}
graph.Nodes = append(graph.Nodes, node)
return node
}
// AddEdge adds a new edge to the node
func (graph *Graph) AddEdge(src, dst *Node) {
src.Out.Append(dst)
dst.In.Append(src)
}
// Roots returns nodes without any incoming edges
func (graph *Graph) Roots() Nodes {
nodes := Nodes{}
for _, node := range graph.Nodes {
if node.InDegree() == 0 {
nodes.Append(node)
}
}
return nodes
}
// CountRoots returns count of roots
func (graph *Graph) CountRoots() int {
total := 0
for _, node := range graph.Nodes {
if node.InDegree() == 0 {
total++
}
}
return total
}
// CountEdges counts all edges, including duplicates
func (graph *Graph) CountEdges() int {
total := 0
for _, src := range graph.Nodes {
total += len(src.Out)
}
return total
}
// CountUndirectedLinks counts unique edges in the graph excluding loops
func (graph *Graph) CountUndirectedLinks() int {
counted := map[[2]ID]struct{}{}
for _, src := range graph.Nodes {
for _, dst := range src.Out {
if src == dst {
continue
}
a, b := src.ID, dst.ID
if a > b {
a, b = b, a
}
counted[[2]ID{a, b}] = struct{}{}
}
}
return len(counted)
} | internal/hier/graph.go | 0.791297 | 0.434641 | graph.go | starcoder |
package main
import (
"fmt"
"math"
"testing"
)
type Var string
type literal float64
type unary struct {
op rune
x Expr
}
type binary struct {
op rune
x, y Expr
}
type call struct {
fn string
args []Expr
}
type Env map[Var]float64
type Expr interface {
Eval(env Env) float64
}
func (v Var) Eval(env Env) float64 {
return env[v]
}
func (l literal) Eval(_ Env) float64 {
return float64(l)
}
func (u unary) Eval(env Env) float64 {
switch u.op {
case '+':
return +u.x.Eval(env)
case '-':
return -u.x.Eval(env)
}
panic(fmt.Sprintf("unsupported unary operator: %q", u.op))
}
func (b binary) Eval(env Env) float64 {
switch b.op {
case '+':
return b.x.Eval(env) + b.y.Eval(env)
case '-':
return b.x.Eval(env) + b.y.Eval(env)
case '*':
return b.x.Eval(env) * b.y.Eval(env)
case '/':
return b.x.Eval(env) / b.y.Eval(env)
}
panic(fmt.Sprintf("unsuported binary operator: %q", b.op))
}
func (c call) Eval(env Env) float64 {
switch c.fn {
case "pow":
return math.Pow(c.args[0].Eval(env), c.args[1].Eval(env))
case "sin":
return math.Sin(c.args[0].Eval(env))
case "sqrt":
return math.Sqrt(c.args[0].Eval(env))
}
panic(fmt.Sprintf("unsupported function call: %s", c.fn))
}
func TestEval(t *testing.T) {
tests := []struct {
expr string
env Env
want string
} {
{"sqrt(A / pi)", Env{"A": 87616, "pi": math.Pi}, "167"},
{"pow(x, 3) + pow(y, 3)", Env{"x": 12, "y": 1}, "1729"},
{"pow(x, 3) + pow(y, 3)", Env{"x": 9, "y": 10}, "1729"},
{"5 / 9 * (F - 32)", Env{"F": -40}, "-40"},
{"5 / 9 * (F - 32)", Env{"F": 32}, "0"},
{"5 / 9 * (F - 32)", Env{"F": 212}, "100"},
}
var prevExpr string
for _, test := range tests {
if test.expr != prevExpr {
fmt.Printf("\n%s\n", test.expr)
prevExpr = test.expr
}
expr, err := Parse(test.expr)
if err != nil {
t.Error(err)
continue
}
got := fmt.Sprintf("%.6g", expr.Eval(test.env))
fmt.Printf("\t%v => %s \n", test.env, got)
if got != test.want {
t.Errorf("%s.Eval() in %v = %q, want %q \n",
test.expr, test.env, got, test.want)
}
}
} | gopl-sample/eval.go | 0.646572 | 0.453201 | eval.go | starcoder |
package xmlParser
import (
"errors"
"fmt"
"reflect"
"regexp"
"strconv"
"strings"
"time"
"github.com/clbanning/mxj"
)
/*
XMLNode is wrapper to mxj map.
It can traverse the xml to get the data.
NOTE:
All attributes will also become a node with key '-attributesName'.
And tags with attributes, their value will become a node with key '#text'.
Ex:
<ProductName sku="ABC">
This will become node also.
</ProductName>
Will become:
map[string]interface{
"-sku": "ABC",
"#text": "This will become node also.",
}
*/
type XMLNode struct {
Value interface{}
Path string
}
// GenerateXMLNode generate an XMLNode object from the xml response body.
func GenerateXMLNode(xmlBuffer []byte) (*XMLNode, error) {
m, err := mxj.NewMapXml(xmlBuffer)
xNode := XMLNode{Value: m.Old()}
return &xNode, err
}
// CurrentKey return the key (tag) for the current node.
// Root node has empty key.
func (xn *XMLNode) CurrentKey() string {
keys := strings.Split(xn.Path, ".")
return keys[len(keys)-1]
}
// FindByKey get the element data by any key (tag) in any depth.
// The method return a list XMLNode, each node represents all the sub-elements
// of the match key. The nodes then can be use to traverse deeper individually.
func (xn *XMLNode) FindByKey(key string) []XMLNode {
valuesMap := []XMLNode{}
xnode, err := xn.ToMap()
if err != nil {
return valuesMap
}
paths := xnode.PathsForKey(key)
for _, p := range paths {
nodes := xn.FindByPath(p)
valuesMap = append(valuesMap, nodes...)
}
return valuesMap
}
// FindByKeys get the element data by any keys (tag) in any depth.
// Subsequential key need to be child of previous.
// The method return a list XMLNode, each node represents all the sub-elements
// of the match key. The nodes then can be use to traverse deeper individually.
// Ex:
// If current node have path "A.B.C.D.E" and "A.B2.C2.D2.E",
// then node.FindByKeys("B", "E") will return nodes E under first path.
func (xn *XMLNode) FindByKeys(keys ...string) []XMLNode {
valuesMap := []XMLNode{}
if len(keys) <= 0 {
return valuesMap
}
keysRegexp := regexp.MustCompile(
`(^|\.)` + strings.Join(keys, `(\.(\w+\.)*)`) + `($|\.)`,
)
nodes := xn.FindByKey(keys[len(keys)-1])
for _, node := range nodes {
if keysRegexp.MatchString(node.Path) {
valuesMap = append(valuesMap, node)
}
}
return valuesMap
}
// FindByPath get the element data by path string.
// Path is relative to the current XMLNode.
// Path need to be start with direct sub node of current node.
// Path is separated by '.', ex: "Tag1.Tag2.Tag3".
// If current node is "A", and has sub path "B.C", then query on "B.C" will
// return node C. But Query on "C" will return empty nodes.
// The method return a list XMLNode, each node represents all the sub-elements
// of the match key. The nodes then can be use to traverse deeper individually.
func (xn *XMLNode) FindByPath(path string) []XMLNode {
valuesMap := []XMLNode{}
xnode, err := xn.ToMap()
if err != nil {
return valuesMap
}
values, err := xnode.ValuesForPath(path)
if err != nil {
return valuesMap
}
for _, m := range values {
node := XMLNode{Value: m, Path: path}
valuesMap = append(valuesMap, node)
}
return valuesMap
}
// FindByFullPath get the element data by absolute path.
// Path is separated by '.', ex: "Tag1.Tag2.Tag3".
// If current node have path "A.B.C", and query path is "A.B.C.D.E",
// then the method will search elements in current node with path "D.E".
// The method return a list XMLNode, each node represents all the sub-elements
// of the match key. The nodes then can be use to traverse deeper individually.
func (xn *XMLNode) FindByFullPath(path string) []XMLNode {
subPath := strings.Replace(path, xn.Path+".", "", 1)
return xn.FindByPath(subPath)
}
// Elements return the keys of immediate sub-elements of the current node.
func (xn *XMLNode) Elements() []string {
xnode, err := xn.ToMap()
if err != nil {
return []string{}
}
elements, err := xnode.Elements("")
if err != nil {
return []string{}
}
return elements
}
// IsLeaf check whether or not the current node is a leaf node.
func (xn *XMLNode) IsLeaf() bool {
return reflect.TypeOf(xn.Value).Kind() != reflect.Map
}
// LeafPaths return the path to the leaf nodes.
func (xn *XMLNode) LeafPaths() []string {
xnode, err := xn.ToMap()
if err != nil {
return []string{}
}
return xnode.LeafPaths()
}
// LeafNodes return all the leaf nodes of current node.
func (xn *XMLNode) LeafNodes() []XMLNode {
xnode, err := xn.ToMap()
if err != nil {
return []XMLNode{}
}
nodes := xnode.LeafNodes()
lnodes := make([]XMLNode, len(nodes))
for i, node := range nodes {
lnodes[i] = XMLNode{
Value: node.Value,
Path: node.Path,
}
}
return lnodes
}
// ValueType return the type of node value.
func (xn *XMLNode) ValueType() reflect.Kind {
return reflect.TypeOf(xn.Value).Kind()
}
/*
ToMap convert the node value to a mxj map.
If fail to convert, an error will be returned.
Tags have no sub tag, but have attributes is also a map.
Attributes of the tag has key '-attributesName'.
Tags' value has key '#test'.
Ex:
<MessageId MarketplaceID="ATVPDKDDIKX0D" SKU="24478624">
173964729
</MessageId>
After to map,
map[string]string{
"-MarketplaceID": "ATVPDKDDIKX0D",
"-SKU": "24478624",
"#text": "173964729",
}
*/
func (xn *XMLNode) ToMap() (mxj.Map, error) {
if xn.ValueType() == reflect.Map {
return mxj.Map(xn.Value.(map[string]interface{})), nil
}
return nil, errors.New("value is not a map")
}
// ToString convert the node value to string.
// If value is not valid string, an error will be returned.
func (xn *XMLNode) ToString() (string, error) {
if xn.ValueType() == reflect.String {
return xn.Value.(string), nil
}
return "", errors.New("value is not a valid string")
}
// ToInt convert the node value to int.
// If value is not valid int, an error will be returned.
func (xn *XMLNode) ToInt() (int, error) {
switch xn.ValueType() {
case reflect.String:
value, err := xn.ToString()
if err != nil {
return 0, errors.New("can not convert value to int")
}
return strconv.Atoi(value)
case reflect.Int:
return xn.Value.(int), nil
default:
return 0, errors.New("can not convert value to int")
}
}
// ToFloat64 convert the node value to float64.
// If value is not valid float64, an error will be returned.
func (xn *XMLNode) ToFloat64() (float64, error) {
switch xn.ValueType() {
case reflect.String:
value, err := xn.ToString()
if err != nil {
return 0.0, errors.New("can not convert value to float64")
}
return strconv.ParseFloat(value, 64)
case reflect.Float64:
return xn.Value.(float64), nil
default:
return 0.0, errors.New("can not convert value to float64")
}
}
// ToBool convert the node value to bool.
// If value is not valid bool, an error will be returned.
func (xn *XMLNode) ToBool() (bool, error) {
switch xn.ValueType() {
case reflect.String:
value, err := xn.ToString()
if err != nil {
return false, errors.New("can not convert value to bool")
}
return strconv.ParseBool(value)
case reflect.Bool:
return xn.Value.(bool), nil
default:
return false, errors.New("can not convert value to bool")
}
}
// ToTime convert the node value to timestamp.
// If value is not valid timestamp, an error will be returned.
func (xn *XMLNode) ToTime() (time.Time, error) {
value, err := xn.ToString()
if err != nil {
return time.Time{}, errors.New("can not convert value to time")
}
t, err := time.Parse(time.RFC3339, value)
return t, err
}
/*
ToStruct unmarshal the node value to struct.
If value can not be unmarshal, an error will returned.
ToStruct use json tag to unmarshal the map.
Ex:
To unmarshal the tag:
<MessageId MarketplaceID="ATVPDKDDIKX0D" SKU="24478624">
173964729
</MessageId>
Can use struct:
msgID := struct {
MarketplaceID string `json:"-MarketplaceID"`
SKU string `json:"-SKU"`
ID string `json:"#text"`
}{}
*/
func (xn *XMLNode) ToStruct(structPtr interface{}) error {
xmap, err := xn.ToMap()
if err != nil {
return errors.New("value can not be unmarshal to struct")
}
return xmap.Struct(structPtr)
}
// XML return the raw xml data.
// If current node has key, use it as root node tag.
// If current node doest has key, and only have one child node, then the child
// node's key will become root node tag.
// If current node doest has key, and have more than one child node, then
// the a default tag <doc> will use as root tag.
func (xn *XMLNode) XML() ([]byte, error) {
xmap, err := xn.ToMap()
if err != nil {
return []byte{}, err
}
rootTag := xn.CurrentKey()
if rootTag == "" {
return xmap.Xml()
}
return xmap.Xml(rootTag)
}
// PrintXML print the xml with two space indention.
func (xn *XMLNode) PrintXML() {
xmap, err := xn.ToMap()
if err != nil {
fmt.Println("")
}
xml, _ := xmap.XmlIndent("", " ")
fmt.Println(string(xml))
}
func stringInSlice(s string, slice []string) bool {
for _, str := range slice {
if s == str {
return true
}
}
return false
} | xmlParser/xml_node.go | 0.66061 | 0.402275 | xml_node.go | starcoder |
package gen
import (
pschema "github.com/pulumi/pulumi/pkg/v3/codegen/schema"
)
// typeOverlays augment the types defined by the kubernetes schema.
var typeOverlays = map[string]pschema.ComplexTypeSpec{
"kubernetes:core/v1:ServiceSpec": {
ObjectTypeSpec: pschema.ObjectTypeSpec{
Properties: map[string]pschema.PropertySpec{
"type": {
TypeSpec: pschema.TypeSpec{
OneOf: []pschema.TypeSpec{
{Type: "string"},
{Ref: "#/types/kubernetes:core/v1:ServiceSpecType"},
},
},
},
},
},
},
"kubernetes:core/v1:ServiceSpecType": {
ObjectTypeSpec: pschema.ObjectTypeSpec{
Type: "string",
},
Enum: []pschema.EnumValueSpec{
{Value: "ExternalName"},
{Value: "ClusterIP"},
{Value: "NodePort"},
{Value: "LoadBalancer"},
},
},
"kubernetes:helm.sh/v3:Release": {
ObjectTypeSpec: pschema.ObjectTypeSpec{
Description: "A Release is an instance of a chart running in a Kubernetes cluster.\nA Chart is a Helm package. It contains all of the resource definitions necessary to run an application, tool, or service inside of a Kubernetes cluster.\nNote - Helm Release is currently in BETA and may change. Use in production environment is discouraged.",
Properties: map[string]pschema.PropertySpec{
"name": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Release name.",
},
"repositoryOpts": {
TypeSpec: pschema.TypeSpec{
Ref: "#/types/kubernetes:helm.sh/v3:RepositoryOpts",
},
Description: "Specification defining the Helm chart repository to use.",
},
"chart": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Chart name to be installed. A path may be used.",
},
"version": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Specify the exact chart version to install. If this is not specified, the latest version is installed.",
},
"devel": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Use chart development versions, too. Equivalent to version '>0.0.0-0'. If `version` is set, this is ignored.",
},
"valueYamlFiles": {
TypeSpec: pschema.TypeSpec{
Type: "array",
Items: &pschema.TypeSpec{
Ref: "pulumi.json#/Asset",
},
},
Description: "List of assets (raw yaml files). Content is read and merged with values. Not yet supported.",
},
"values": {
TypeSpec: pschema.TypeSpec{
Type: "object",
AdditionalProperties: &pschema.TypeSpec{
Ref: "pulumi.json#/Any",
},
},
Description: "Custom values set for the release.",
},
"manifest": {
TypeSpec: pschema.TypeSpec{
Type: "object",
AdditionalProperties: &pschema.TypeSpec{
Ref: "pulumi.json#/Any",
},
},
Description: "The rendered manifests as JSON. Not yet supported.",
},
"resourceNames": {
TypeSpec: pschema.TypeSpec{
Type: "object",
AdditionalProperties: &pschema.TypeSpec{
Type: "array",
Items: &pschema.TypeSpec{
Type: "string",
},
},
},
Description: "Names of resources created by the release grouped by \"kind/version\".",
},
"namespace": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Namespace to install the release into.",
},
"verify": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Verify the package before installing it.",
},
"keyring": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Location of public keys used for verification. Used only if `verify` is true",
},
"timeout": {
TypeSpec: pschema.TypeSpec{
Type: "integer",
},
Description: "Time in seconds to wait for any individual kubernetes operation.",
},
"disableWebhooks": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Prevent hooks from running.",
},
"disableCRDHooks": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Prevent CRD hooks from, running, but run other hooks. See helm install --no-crd-hook",
},
"reuseValues": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "When upgrading, reuse the last release's values and merge in any overrides. If 'resetValues' is specified, this is ignored",
},
"resetValues": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "When upgrading, reset the values to the ones built into the chart.",
},
"forceUpdate": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Force resource update through delete/recreate if needed.",
},
"recreatePods": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Perform pods restart during upgrade/rollback.",
},
"cleanupOnFail": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Allow deletion of new resources created in this upgrade when upgrade fails.",
},
"maxHistory": {
TypeSpec: pschema.TypeSpec{
Type: "integer",
},
Description: "Limit the maximum number of revisions saved per release. Use 0 for no limit.",
},
"atomic": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "If set, installation process purges chart on fail. `skipAwait` will be disabled automatically if atomic is used.",
},
"skipCrds": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "If set, no CRDs will be installed. By default, CRDs are installed if not already present.",
},
"renderSubchartNotes": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "If set, render subchart notes along with the parent.",
},
"disableOpenapiValidation": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "If set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema",
},
"skipAwait": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "By default, the provider waits until all resources are in a ready state before marking the release as successful. Setting this to true will skip such await logic.",
},
"waitForJobs": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Will wait until all Jobs have been completed before marking the release as successful. This is ignored if `skipAwait` is enabled.",
},
"dependencyUpdate": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Run helm dependency update before installing the chart.",
},
"replace": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Re-use the given name, even if that name is already used. This is unsafe in production",
},
"description": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Add a custom description",
},
"createNamespace": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Create the namespace if it does not exist.",
},
"postrender": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Postrender command to run.",
},
"lint": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Run helm lint when planning.",
},
"status": {
TypeSpec: pschema.TypeSpec{
Ref: "#/types/kubernetes:helm.sh/v3:ReleaseStatus",
},
Description: "Status of the deployed release.",
},
},
Type: "object",
Required: []string{
"chart",
"repositoryOpts",
"values",
"status",
},
Language: map[string]pschema.RawMessage{
"nodejs": rawMessage(map[string][]string{
"requiredOutputs": {
"name",
"repositoryOpts",
"chart",
"version",
"devel",
"values",
"set",
"manifest",
"namespace",
"verify",
"keyring",
"timeout",
"disableWebhooks",
"disableCRDHooks",
"reuseValues",
"resetValues",
"forceUpdate",
"recreatePods",
"cleanupOnFail",
"maxHistory",
"atomic",
"skipCrds",
"renderSubchartNotes",
"disableOpenapiValidation",
"skipAwait",
"waitForJobs",
"dependencyUpdate",
"replace",
"description",
"createNamespace",
"postrender",
"lint",
"status",
},
}),
},
},
},
"kubernetes:helm.sh/v3:RepositoryOpts": {
ObjectTypeSpec: pschema.ObjectTypeSpec{
Description: "Specification defining the Helm chart repository to use.",
Properties: map[string]pschema.PropertySpec{
"repo": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Repository where to locate the requested chart. If is a URL the chart is installed without installing the repository.",
},
"keyFile": { // TODO: Content or file
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "The repository's cert key file",
},
"certFile": { // TODO: Content or file
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "The repository's cert file",
},
"caFile": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "The Repository's CA File",
},
"username": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Username for HTTP basic authentication",
},
"password": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Secret: true,
Description: "Password for HTTP basic authentication",
},
},
Language: map[string]pschema.RawMessage{
"nodejs": rawMessage(map[string][]string{
"requiredOutputs": {
"repo",
"keyFile",
"certFile",
"caFile",
"username",
"password",
}}),
},
Type: "object",
},
},
"kubernetes:helm.sh/v3:ReleaseStatus": {
ObjectTypeSpec: pschema.ObjectTypeSpec{
Required: []string{"status"},
Properties: map[string]pschema.PropertySpec{
"name": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Name is the name of the release.",
},
"revision": {
TypeSpec: pschema.TypeSpec{
Type: "integer",
},
Description: "Version is an int32 which represents the version of the release.",
},
"namespace": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Namespace is the kubernetes namespace of the release.",
},
"chart": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "The name of the chart.",
},
"version": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "A SemVer 2 conformant version string of the chart.",
},
"appVersion": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "The version number of the application being deployed.",
},
"status": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Status of the release.",
},
},
Language: map[string]pschema.RawMessage{
"nodejs": rawMessage(map[string][]string{
"requiredOutputs": {
"name",
"revision",
"namespace",
"chart",
"version",
"appVersion",
"values",
"status",
}}),
},
Type: "object",
},
},
}
// resourceOverlays augment the resources defined by the kubernetes schema.
var resourceOverlays = map[string]pschema.ResourceSpec{
"kubernetes:helm.sh/v3:Release": {
ObjectTypeSpec: pschema.ObjectTypeSpec{
Description: "A Release is an instance of a chart running in a Kubernetes cluster.\n\nA Chart is a Helm package. It contains all of the resource definitions necessary to run an application, tool, or service inside of a Kubernetes cluster.",
Properties: map[string]pschema.PropertySpec{
"name": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Release name.",
},
"repositoryOpts": {
TypeSpec: pschema.TypeSpec{
Ref: "#/types/kubernetes:helm.sh/v3:RepositoryOpts",
},
Description: "Specification defining the Helm chart repository to use.",
},
"chart": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Chart name to be installed. A path may be used.",
},
"version": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Specify the exact chart version to install. If this is not specified, the latest version is installed.",
},
"devel": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Use chart development versions, too. Equivalent to version '>0.0.0-0'. If `version` is set, this is ignored.",
},
"valueYamlFiles": {
TypeSpec: pschema.TypeSpec{
Type: "array",
Items: &pschema.TypeSpec{
Ref: "pulumi.json#/Asset",
},
},
Description: "List of assets (raw yaml files). Content is read and merged with values. Not yet supported.",
},
"values": {
TypeSpec: pschema.TypeSpec{
Type: "object",
AdditionalProperties: &pschema.TypeSpec{
Ref: "pulumi.json#/Any",
},
},
Description: "Custom values set for the release.",
},
"manifest": {
TypeSpec: pschema.TypeSpec{
Type: "object",
AdditionalProperties: &pschema.TypeSpec{
Ref: "pulumi.json#/Any",
},
},
Description: "The rendered manifests as JSON. Not yet supported.",
},
"resourceNames": {
TypeSpec: pschema.TypeSpec{
Type: "object",
AdditionalProperties: &pschema.TypeSpec{
Type: "array",
Items: &pschema.TypeSpec{
Type: "string",
},
},
},
Description: "Names of resources created by the release grouped by \"kind/version\".",
},
"namespace": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Namespace to install the release into.",
},
"verify": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Verify the package before installing it.",
},
"keyring": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Location of public keys used for verification. Used only if `verify` is true",
},
"timeout": {
TypeSpec: pschema.TypeSpec{
Type: "integer",
},
Description: "Time in seconds to wait for any individual kubernetes operation.",
},
"disableWebhooks": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Prevent hooks from running.",
},
"disableCRDHooks": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Prevent CRD hooks from, running, but run other hooks. See helm install --no-crd-hook",
},
"reuseValues": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "When upgrading, reuse the last release's values and merge in any overrides. If 'resetValues' is specified, this is ignored",
},
"resetValues": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "When upgrading, reset the values to the ones built into the chart.",
},
"forceUpdate": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Force resource update through delete/recreate if needed.",
},
"recreatePods": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Perform pods restart during upgrade/rollback.",
},
"cleanupOnFail": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Allow deletion of new resources created in this upgrade when upgrade fails.",
},
"maxHistory": {
TypeSpec: pschema.TypeSpec{
Type: "integer",
},
Description: "Limit the maximum number of revisions saved per release. Use 0 for no limit.",
},
"atomic": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "If set, installation process purges chart on fail. `skipAwait` will be disabled automatically if atomic is used.",
},
"skipCrds": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "If set, no CRDs will be installed. By default, CRDs are installed if not already present.",
},
"renderSubchartNotes": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "If set, render subchart notes along with the parent.",
},
"disableOpenapiValidation": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "If set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema",
},
"skipAwait": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "By default, the provider waits until all resources are in a ready state before marking the release as successful. Setting this to true will skip such await logic.",
},
"waitForJobs": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Will wait until all Jobs have been completed before marking the release as successful. This is ignored if `skipAwait` is enabled.",
},
"dependencyUpdate": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Run helm dependency update before installing the chart.",
},
"replace": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Re-use the given name, even if that name is already used. This is unsafe in production",
},
"description": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Add a custom description",
},
"createNamespace": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Create the namespace if it does not exist.",
},
"postrender": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Postrender command to run.",
},
"lint": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Run helm lint when planning.",
},
"status": {
TypeSpec: pschema.TypeSpec{
Ref: "#/types/kubernetes:helm.sh/v3:ReleaseStatus",
},
Description: "Status of the deployed release.",
},
},
Type: "object",
Required: []string{
"chart",
"repositoryOpts",
"values",
"status",
},
Language: map[string]pschema.RawMessage{
"nodejs": rawMessage(map[string][]string{
"requiredOutputs": {
"name",
"repositoryOpts",
"chart",
"version",
"devel",
"values",
"set",
"manifest",
"namespace",
"verify",
"keyring",
"timeout",
"disableWebhooks",
"disableCRDHooks",
"reuseValues",
"resetValues",
"forceUpdate",
"recreatePods",
"cleanupOnFail",
"maxHistory",
"atomic",
"skipCrds",
"renderSubchartNotes",
"disableOpenapiValidation",
"skipAwait",
"waitForJobs",
"dependencyUpdate",
"replace",
"description",
"createNamespace",
"postrender",
"lint",
"status",
},
}),
},
},
InputProperties: map[string]pschema.PropertySpec{
"name": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Release name.",
},
"repositoryOpts": {
TypeSpec: pschema.TypeSpec{
Ref: "#/types/kubernetes:helm.sh/v3:RepositoryOpts",
},
Description: "Specification defining the Helm chart repository to use.",
},
"chart": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Chart name to be installed. A path may be used.",
},
"version": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Specify the exact chart version to install. If this is not specified, the latest version is installed.",
},
"devel": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Use chart development versions, too. Equivalent to version '>0.0.0-0'. If `version` is set, this is ignored.",
},
"valueYamlFiles": {
TypeSpec: pschema.TypeSpec{
Type: "array",
Items: &pschema.TypeSpec{
Ref: "pulumi.json#/Asset",
},
},
Description: "List of assets (raw yaml files). Content is read and merged with values. Not yet supported.",
},
"values": {
TypeSpec: pschema.TypeSpec{
Type: "object",
AdditionalProperties: &pschema.TypeSpec{
Ref: "pulumi.json#/Any",
},
},
Description: "Custom values set for the release.",
},
"manifest": {
TypeSpec: pschema.TypeSpec{
Type: "object",
AdditionalProperties: &pschema.TypeSpec{
Ref: "pulumi.json#/Any",
},
},
Description: "The rendered manifests as JSON. Not yet supported.",
},
"resourceNames": {
TypeSpec: pschema.TypeSpec{
Type: "object",
AdditionalProperties: &pschema.TypeSpec{
Type: "array",
Items: &pschema.TypeSpec{
Type: "string",
},
},
},
Description: "Names of resources created by the release grouped by \"kind/version\".",
},
"namespace": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Namespace to install the release into.",
},
"verify": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Verify the package before installing it.",
},
"keyring": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Location of public keys used for verification. Used only if `verify` is true",
},
"timeout": {
TypeSpec: pschema.TypeSpec{
Type: "integer",
},
Description: "Time in seconds to wait for any individual kubernetes operation.",
},
"disableWebhooks": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Prevent hooks from running.",
},
"disableCRDHooks": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Prevent CRD hooks from, running, but run other hooks. See helm install --no-crd-hook",
},
"reuseValues": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "When upgrading, reuse the last release's values and merge in any overrides. If 'resetValues' is specified, this is ignored",
},
"resetValues": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "When upgrading, reset the values to the ones built into the chart.",
},
"forceUpdate": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Force resource update through delete/recreate if needed.",
},
"recreatePods": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Perform pods restart during upgrade/rollback.",
},
"cleanupOnFail": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Allow deletion of new resources created in this upgrade when upgrade fails.",
},
"maxHistory": {
TypeSpec: pschema.TypeSpec{
Type: "integer",
},
Description: "Limit the maximum number of revisions saved per release. Use 0 for no limit.",
},
"atomic": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "If set, installation process purges chart on fail. `skipAwait` will be disabled automatically if atomic is used.",
},
"skipCrds": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "If set, no CRDs will be installed. By default, CRDs are installed if not already present.",
},
"renderSubchartNotes": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "If set, render subchart notes along with the parent.",
},
"disableOpenapiValidation": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "If set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema",
},
"skipAwait": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "By default, the provider waits until all resources are in a ready state before marking the release as successful. Setting this to true will skip such await logic.",
},
"waitForJobs": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Will wait until all Jobs have been completed before marking the release as successful. This is ignored if `skipAwait` is enabled.",
},
"dependencyUpdate": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Run helm dependency update before installing the chart.",
},
"replace": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Re-use the given name, even if that name is already used. This is unsafe in production",
},
"description": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Add a custom description",
},
"createNamespace": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Create the namespace if it does not exist.",
},
"postrender": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Description: "Postrender command to run.",
},
"lint": {
TypeSpec: pschema.TypeSpec{
Type: "boolean",
},
Description: "Run helm lint when planning.",
},
"compat": {
TypeSpec: pschema.TypeSpec{
Type: "string",
},
Const: "true",
},
},
RequiredInputs: []string{
"chart",
"repositoryOpts",
"values",
},
},
} | provider/pkg/gen/overlays.go | 0.503418 | 0.405066 | overlays.go | starcoder |
package onshape
import (
"encoding/json"
)
// BTExportTessellatedEdgesEdge1364 struct for BTExportTessellatedEdgesEdge1364
type BTExportTessellatedEdgesEdge1364 struct {
BtType *string `json:"btType,omitempty"`
Id *string `json:"id,omitempty"`
Vertices *[]BTVector3d389 `json:"vertices,omitempty"`
}
// NewBTExportTessellatedEdgesEdge1364 instantiates a new BTExportTessellatedEdgesEdge1364 object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTExportTessellatedEdgesEdge1364() *BTExportTessellatedEdgesEdge1364 {
this := BTExportTessellatedEdgesEdge1364{}
return &this
}
// NewBTExportTessellatedEdgesEdge1364WithDefaults instantiates a new BTExportTessellatedEdgesEdge1364 object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTExportTessellatedEdgesEdge1364WithDefaults() *BTExportTessellatedEdgesEdge1364 {
this := BTExportTessellatedEdgesEdge1364{}
return &this
}
// GetBtType returns the BtType field value if set, zero value otherwise.
func (o *BTExportTessellatedEdgesEdge1364) GetBtType() string {
if o == nil || o.BtType == nil {
var ret string
return ret
}
return *o.BtType
}
// GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTExportTessellatedEdgesEdge1364) GetBtTypeOk() (*string, bool) {
if o == nil || o.BtType == nil {
return nil, false
}
return o.BtType, true
}
// HasBtType returns a boolean if a field has been set.
func (o *BTExportTessellatedEdgesEdge1364) HasBtType() bool {
if o != nil && o.BtType != nil {
return true
}
return false
}
// SetBtType gets a reference to the given string and assigns it to the BtType field.
func (o *BTExportTessellatedEdgesEdge1364) SetBtType(v string) {
o.BtType = &v
}
// GetId returns the Id field value if set, zero value otherwise.
func (o *BTExportTessellatedEdgesEdge1364) GetId() string {
if o == nil || o.Id == nil {
var ret string
return ret
}
return *o.Id
}
// GetIdOk returns a tuple with the Id field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTExportTessellatedEdgesEdge1364) GetIdOk() (*string, bool) {
if o == nil || o.Id == nil {
return nil, false
}
return o.Id, true
}
// HasId returns a boolean if a field has been set.
func (o *BTExportTessellatedEdgesEdge1364) HasId() bool {
if o != nil && o.Id != nil {
return true
}
return false
}
// SetId gets a reference to the given string and assigns it to the Id field.
func (o *BTExportTessellatedEdgesEdge1364) SetId(v string) {
o.Id = &v
}
// GetVertices returns the Vertices field value if set, zero value otherwise.
func (o *BTExportTessellatedEdgesEdge1364) GetVertices() []BTVector3d389 {
if o == nil || o.Vertices == nil {
var ret []BTVector3d389
return ret
}
return *o.Vertices
}
// GetVerticesOk returns a tuple with the Vertices field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTExportTessellatedEdgesEdge1364) GetVerticesOk() (*[]BTVector3d389, bool) {
if o == nil || o.Vertices == nil {
return nil, false
}
return o.Vertices, true
}
// HasVertices returns a boolean if a field has been set.
func (o *BTExportTessellatedEdgesEdge1364) HasVertices() bool {
if o != nil && o.Vertices != nil {
return true
}
return false
}
// SetVertices gets a reference to the given []BTVector3d389 and assigns it to the Vertices field.
func (o *BTExportTessellatedEdgesEdge1364) SetVertices(v []BTVector3d389) {
o.Vertices = &v
}
func (o BTExportTessellatedEdgesEdge1364) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.BtType != nil {
toSerialize["btType"] = o.BtType
}
if o.Id != nil {
toSerialize["id"] = o.Id
}
if o.Vertices != nil {
toSerialize["vertices"] = o.Vertices
}
return json.Marshal(toSerialize)
}
type NullableBTExportTessellatedEdgesEdge1364 struct {
value *BTExportTessellatedEdgesEdge1364
isSet bool
}
func (v NullableBTExportTessellatedEdgesEdge1364) Get() *BTExportTessellatedEdgesEdge1364 {
return v.value
}
func (v *NullableBTExportTessellatedEdgesEdge1364) Set(val *BTExportTessellatedEdgesEdge1364) {
v.value = val
v.isSet = true
}
func (v NullableBTExportTessellatedEdgesEdge1364) IsSet() bool {
return v.isSet
}
func (v *NullableBTExportTessellatedEdgesEdge1364) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTExportTessellatedEdgesEdge1364(val *BTExportTessellatedEdgesEdge1364) *NullableBTExportTessellatedEdgesEdge1364 {
return &NullableBTExportTessellatedEdgesEdge1364{value: val, isSet: true}
}
func (v NullableBTExportTessellatedEdgesEdge1364) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTExportTessellatedEdgesEdge1364) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | onshape/model_bt_export_tessellated_edges_edge_1364.go | 0.66356 | 0.490236 | model_bt_export_tessellated_edges_edge_1364.go | starcoder |
package auth0fga
import (
"encoding/json"
)
// WriteAssertionsRequestParams struct for WriteAssertionsRequestParams
type WriteAssertionsRequestParams struct {
Assertions []Assertion `json:"assertions"`
}
// NewWriteAssertionsRequestParams instantiates a new WriteAssertionsRequestParams object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewWriteAssertionsRequestParams(assertions []Assertion) *WriteAssertionsRequestParams {
this := WriteAssertionsRequestParams{}
this.Assertions = assertions
return &this
}
// NewWriteAssertionsRequestParamsWithDefaults instantiates a new WriteAssertionsRequestParams object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewWriteAssertionsRequestParamsWithDefaults() *WriteAssertionsRequestParams {
this := WriteAssertionsRequestParams{}
return &this
}
// GetAssertions returns the Assertions field value
func (o *WriteAssertionsRequestParams) GetAssertions() []Assertion {
if o == nil {
var ret []Assertion
return ret
}
return o.Assertions
}
// GetAssertionsOk returns a tuple with the Assertions field value
// and a boolean to check if the value has been set.
func (o *WriteAssertionsRequestParams) GetAssertionsOk() (*[]Assertion, bool) {
if o == nil {
return nil, false
}
return &o.Assertions, true
}
// SetAssertions sets field value
func (o *WriteAssertionsRequestParams) SetAssertions(v []Assertion) {
o.Assertions = v
}
func (o WriteAssertionsRequestParams) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["assertions"] = o.Assertions
}
return json.Marshal(toSerialize)
}
type NullableWriteAssertionsRequestParams struct {
value *WriteAssertionsRequestParams
isSet bool
}
func (v NullableWriteAssertionsRequestParams) Get() *WriteAssertionsRequestParams {
return v.value
}
func (v *NullableWriteAssertionsRequestParams) Set(val *WriteAssertionsRequestParams) {
v.value = val
v.isSet = true
}
func (v NullableWriteAssertionsRequestParams) IsSet() bool {
return v.isSet
}
func (v *NullableWriteAssertionsRequestParams) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableWriteAssertionsRequestParams(val *WriteAssertionsRequestParams) *NullableWriteAssertionsRequestParams {
return &NullableWriteAssertionsRequestParams{value: val, isSet: true}
}
func (v NullableWriteAssertionsRequestParams) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableWriteAssertionsRequestParams) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | model_write_assertions_request_params.go | 0.681303 | 0.525856 | model_write_assertions_request_params.go | starcoder |
package mlpack
/*
#cgo CFLAGS: -I./capi -Wall
#cgo LDFLAGS: -L. -lmlpack_go_nbc
#include <capi/nbc.h>
#include <stdlib.h>
*/
import "C"
import "gonum.org/v1/gonum/mat"
type NbcOptionalParam struct {
IncrementalVariance bool
InputModel *nbcModel
Labels *mat.Dense
Test *mat.Dense
Training *mat.Dense
Verbose bool
}
func NbcOptions() *NbcOptionalParam {
return &NbcOptionalParam{
IncrementalVariance: false,
InputModel: nil,
Labels: nil,
Test: nil,
Training: nil,
Verbose: false,
}
}
/*
This program trains the Naive Bayes classifier on the given labeled training
set, or loads a model from the given model file, and then may use that trained
model to classify the points in a given test set.
The training set is specified with the "Training" parameter. Labels may be
either the last row of the training set, or alternately the "Labels" parameter
may be specified to pass a separate matrix of labels.
If training is not desired, a pre-existing model may be loaded with the
"InputModel" parameter.
The "IncrementalVariance" parameter can be used to force the training to use
an incremental algorithm for calculating variance. This is slower, but can
help avoid loss of precision in some cases.
If classifying a test set is desired, the test set may be specified with the
"Test" parameter, and the classifications may be saved with the
"Predictions"predictions parameter. If saving the trained model is desired,
this may be done with the "OutputModel" output parameter.
Note: the "Output" and "OutputProbs" parameters are deprecated and will be
removed in mlpack 4.0.0. Use "Predictions" and "Probabilities" instead.
For example, to train a Naive Bayes classifier on the dataset data with labels
labels and save the model to nbc_model, the following command may be used:
// Initialize optional parameters for Nbc().
param := mlpack.NbcOptions()
param.Training = data
param.Labels = labels
_, nbc_model, _, _, _ := mlpack.Nbc(param)
Then, to use nbc_model to predict the classes of the dataset test_set and save
the predicted classes to predictions, the following command may be used:
// Initialize optional parameters for Nbc().
param := mlpack.NbcOptions()
param.InputModel = &nbc_model
param.Test = test_set
predictions, _, _, _, _ := mlpack.Nbc(param)
Input parameters:
- IncrementalVariance (bool): The variance of each class will be
calculated incrementally.
- InputModel (nbcModel): Input Naive Bayes model.
- Labels (mat.Dense): A file containing labels for the training set.
- Test (mat.Dense): A matrix containing the test set.
- Training (mat.Dense): A matrix containing the training set.
- Verbose (bool): Display informational messages and the full list of
parameters and timers at the end of execution.
Output parameters:
- output (mat.Dense): The matrix in which the predicted labels for the
test set will be written (deprecated).
- outputModel (nbcModel): File to save trained Naive Bayes model to.
- outputProbs (mat.Dense): The matrix in which the predicted
probability of labels for the test set will be written (deprecated).
- predictions (mat.Dense): The matrix in which the predicted labels for
the test set will be written.
- probabilities (mat.Dense): The matrix in which the predicted
probability of labels for the test set will be written.
*/
func Nbc(param *NbcOptionalParam) (*mat.Dense, nbcModel, *mat.Dense, *mat.Dense, *mat.Dense) {
resetTimers()
enableTimers()
disableBacktrace()
disableVerbose()
restoreSettings("Parametric Naive Bayes Classifier")
// Detect if the parameter was passed; set if so.
if param.IncrementalVariance != false {
setParamBool("incremental_variance", param.IncrementalVariance)
setPassed("incremental_variance")
}
// Detect if the parameter was passed; set if so.
if param.InputModel != nil {
setNBCModel("input_model", param.InputModel)
setPassed("input_model")
}
// Detect if the parameter was passed; set if so.
if param.Labels != nil {
gonumToArmaUrow("labels", param.Labels)
setPassed("labels")
}
// Detect if the parameter was passed; set if so.
if param.Test != nil {
gonumToArmaMat("test", param.Test)
setPassed("test")
}
// Detect if the parameter was passed; set if so.
if param.Training != nil {
gonumToArmaMat("training", param.Training)
setPassed("training")
}
// Detect if the parameter was passed; set if so.
if param.Verbose != false {
setParamBool("verbose", param.Verbose)
setPassed("verbose")
enableVerbose()
}
// Mark all output options as passed.
setPassed("output")
setPassed("output_model")
setPassed("output_probs")
setPassed("predictions")
setPassed("probabilities")
// Call the mlpack program.
C.mlpackNbc()
// Initialize result variable and get output.
var outputPtr mlpackArma
output := outputPtr.armaToGonumUrow("output")
var outputModel nbcModel
outputModel.getNBCModel("output_model")
var outputProbsPtr mlpackArma
outputProbs := outputProbsPtr.armaToGonumMat("output_probs")
var predictionsPtr mlpackArma
predictions := predictionsPtr.armaToGonumUrow("predictions")
var probabilitiesPtr mlpackArma
probabilities := probabilitiesPtr.armaToGonumMat("probabilities")
// Clear settings.
clearSettings()
// Return output(s).
return output, outputModel, outputProbs, predictions, probabilities
} | nbc.go | 0.710025 | 0.472562 | nbc.go | starcoder |
package board
// CoordinateToBitBoard returns BitBoard that is flagged only at the specified coordinates
func CoordinateToBitBoard(x int, y int) BitBoard {
var bb BitBoard = 0x8000000000000000
bb = bb >> x
bb = bb >> (y * 8)
return bb
}
// MakeLegalBoard returns BitBoard with flags only on the squares where the player can be placed
func (board Board) MakeLegalBoard() BitBoard {
horizontalWatchBoard := board.Opponent & 0x7e7e7e7e7e7e7e7e
verticalWatchBoard := board.Opponent & 0x00FFFFFFFFFFFF00
allSideWatchBoard := board.Opponent & 0x007e7e7e7e7e7e00
blankBoard := ^(board.Player | board.Opponent)
var legalBoard BitBoard
getNegativeStridedBoard := func(watchBoard BitBoard, shift int) BitBoard {
nextOpponentBoard := watchBoard & (board.Player << shift)
for i := 0; i < 5; i++ {
nextOpponentBoard |= horizontalWatchBoard & (nextOpponentBoard << shift)
}
return blankBoard & (nextOpponentBoard << shift)
}
getPositiveStridedBoard := func(watchBoard BitBoard, shift int) BitBoard {
nextOpponentBoard := watchBoard & (board.Player >> shift)
for i := 0; i < 5; i++ {
nextOpponentBoard |= horizontalWatchBoard & (nextOpponentBoard >> shift)
}
return blankBoard & (nextOpponentBoard >> shift)
}
// left
legalBoard |= getNegativeStridedBoard(horizontalWatchBoard, 1)
// left top
legalBoard |= getNegativeStridedBoard(allSideWatchBoard, 9)
// top
legalBoard |= getNegativeStridedBoard(verticalWatchBoard, 8)
// right top
legalBoard |= getNegativeStridedBoard(allSideWatchBoard, 7)
// right
legalBoard |= getPositiveStridedBoard(horizontalWatchBoard, 1)
// right bottom
legalBoard |= getPositiveStridedBoard(allSideWatchBoard, 9)
// bottom
legalBoard |= getPositiveStridedBoard(verticalWatchBoard, 8)
// left bottom
legalBoard |= getPositiveStridedBoard(allSideWatchBoard, 7)
return legalBoard
}
// CanPutPoint returns true if possible
func (board Board) CanPutPoint(x int, y int) bool {
bb := CoordinateToBitBoard(x, y)
return (bb & board.MakeLegalBoard()) == bb
}
// transfer returns BitBoard flagged at the inversion
func transfer(pos BitBoard, dir int) BitBoard {
var ans BitBoard
switch dir {
case 0: // top
ans = (pos << 8) & 0xffffffffffffff00
case 1: // right top
ans = (pos << 7) & 0x7f7f7f7f7f7f7f00
case 2: // right
ans = (pos >> 1) & 0x7f7f7f7f7f7f7f7f
case 3: // right bottom
ans = (pos >> 9) & 0x007f7f7f7f7f7f7f
case 4: // bottom
ans = (pos >> 8) & 0x00ffffffffffffff
case 5: // left bottom
ans = (pos >> 7) & 0x00fefefefefefefe
case 6: // left
ans = (pos << 1) & 0xfefefefefefefefe
case 7: // left top
ans = (pos << 9) & 0xfefefefefefefe00
}
return ans
}
// Reverse puts a stone and performs inversion processing
func (board *Board) Reverse(x int, y int) {
var reversed BitBoard = 0
var pos = CoordinateToBitBoard(x, y)
for k := 0; k < 8; k++ {
var rev BitBoard = 0
var mask BitBoard = transfer(pos, k)
for (mask != 0) && ((mask & board.Opponent) != 0) {
rev |= mask
mask = transfer(mask, k)
}
if (mask & board.Player) != 0 {
reversed |= rev
}
}
board.Player ^= pos | reversed
board.Opponent ^= reversed
}
// TurnChange swaps players and opponents' boards
func (board *Board) TurnChange() {
var tmp = board.Player
board.Player = board.Opponent
board.Opponent = tmp
} | board/util.go | 0.829181 | 0.481881 | util.go | starcoder |
package basic
import "strings"
// FilterPtrTest applies the function(1st argument) on each item of the list and returns new list
func FilterPtrTest() string {
return `
func TestFilter<FTYPE>Ptr(t *testing.T) {
var v1 <TYPE> = 1
var v2 <TYPE> = 2
var v3 <TYPE> = 3
var v4 <TYPE> = 4
var v10 <TYPE> = 10
var v20 <TYPE> = 20
var v40 <TYPE> = 40
// Test : even number in the list
expectedFilteredList := []*<TYPE>{&v2, &v4}
filteredList := Filter<FTYPE>Ptr(isEven<FTYPE>Ptr, []*<TYPE>{&v1, &v2, &v3, &v4})
if *filteredList[0] != *expectedFilteredList[0] || *filteredList[1] != *expectedFilteredList[1] {
t.Errorf("MapFilter failed. Expected filtered list=%v, actual list=%v", expectedFilteredList, filteredList)
}
// Test: filter all even numbers divisible by 10 in the list
expectedFilteredList = []*<TYPE>{&v20, &v40}
partialIsEven := func(num *<TYPE>) bool { return isEvenDivisibleBy<FTYPE>Ptr(num, &v10) }
filteredList = Filter<FTYPE>Ptr(partialIsEven, []*<TYPE>{&v20, &v1, &v3, &v40})
if filteredList[0] != expectedFilteredList[0] || filteredList[1] != expectedFilteredList[1] {
t.Errorf("MapFilter failed. Expected filtered list=%v, actual list=%v", expectedFilteredList, filteredList)
}
if len(Filter<FTYPE>Ptr(nil, nil)) > 0 {
t.Errorf("FilterInt failed.")
}
}
func isEven<FTYPE>Ptr(num *<TYPE>) bool {
return *num%2 == 0
}
func isEvenDivisibleBy<FTYPE>Ptr(num, divisibleBy *<TYPE>) bool {
return *num%2 == 0 && *num % *divisibleBy == 0
}
`
}
// FilterPtrBoolTest applies the function(1st argument) on each item of the list and returns new list
func FilterPtrBoolTest() string {
return `
func TestFilter<FTYPE>Ptr(t *testing.T) {
var vt <TYPE> = true
expectedSumList := []*<TYPE>{&vt}
newList := Filter<FTYPE>Ptr(true<FTYPE>Ptr, []*<TYPE>{&vt})
if *newList[0] != *expectedSumList[0] {
t.Errorf("Filter<FTYPE>Ptr failed")
}
if len(Filter<FTYPE>Ptr(nil, nil)) > 0 {
t.Errorf("Map<FTYPE>Ptr failed.")
}
}
func true<FTYPE>Ptr(num1 *<TYPE>) bool {
return true
}
`
}
// FilterPtrErrTest applies the function(1st argument) on each item of the list and returns new list
func FilterPtrErrTest() string {
return `
func TestFilter<FTYPE>PtrErr(t *testing.T) {
var v1 <TYPE> = 1
var v2 <TYPE> = 2
var v3 <TYPE> = 3
var v4 <TYPE> = 4
var v0 <TYPE> = 0
// Test : even number in the list
expectedFilteredList := []*<TYPE>{&v2, &v4}
filteredList, _ := Filter<FTYPE>PtrErr(isEven<FTYPE>PtrErr, []*<TYPE>{&v1, &v2, &v3, &v4})
if *filteredList[0] != *expectedFilteredList[0] || *filteredList[1] != *expectedFilteredList[1] {
t.Errorf("MapFilterPtrErr failed. Expected filtered list=%v, actual list=%v", expectedFilteredList, filteredList)
}
r, _ := Filter<FTYPE>PtrErr(nil, nil)
if len(r) > 0 {
t.Errorf("Filter<FTYPE>PtrErr failed.")
}
_, err := Filter<FTYPE>PtrErr(isEven<FTYPE>PtrErr, []*<TYPE>{&v0})
if err == nil {
t.Errorf("Filter<FTYPE>PtrErr failed.")
}
}
func isEven<FTYPE>PtrErr(num *<TYPE>) (bool, error) {
if *num == 0 {
return false, errors.New("Zero is not allowed")
}
return *num%2 == 0, nil
}
`
}
// FilterPtrErrBoolTest applies the function(1st argument) on each item of the list and returns new list
func FilterPtrErrBoolTest() string {
return `
func TestFilter<FTYPE>PtrErr(t *testing.T) {
var vt <TYPE> = true
var vf <TYPE> = false
expectedSumList := []*<TYPE>{&vt}
newList, _ := Filter<FTYPE>PtrErr(true<FTYPE>PtrErr, []*<TYPE>{&vt})
if *newList[0] != *expectedSumList[0] {
t.Errorf("Filter<FTYPE>PtrErr failed")
}
r, _ := Filter<FTYPE>PtrErr(nil, nil)
if len(r) > 0 {
t.Errorf("Filter<FTYPE>PtrErr failed.")
}
_, err := Filter<FTYPE>PtrErr(true<FTYPE>PtrErr, []*<TYPE>{&vf})
if err == nil {
t.Errorf("Filter<FTYPE>PtrErr failed.")
}
}
func true<FTYPE>PtrErr(num1 *<TYPE>) (bool, error) {
if *num1 == false {
return false, errors.New("False is not allowed")
}
return true, nil
}
`
}
// ReplaceActivityFilterPtrErrTest is template
func ReplaceActivityFilterPtrErrTest(code string) string {
s1 := `import (
_ "errors"
"reflect"
"testing"
)
func TestFilterIntPtrErr(t *testing.T) {`
s2 := `import (
"errors"
"testing"
)
func TestFilterIntPtrErr(t *testing.T) {`
code = strings.Replace(code, s1, s2, -1)
s1 = `func isEvenStrPtrErr(num *string) (bool, error) {
if *num == 0 {
return false, errors.New("Zero is not allowed")
}
return *num%2 == 0, nil
}`
s2 = `func isEvenStrPtrErr(num *string) (bool, error) {
if *num == "0" {
return false, errors.New("Zero is not allowed")
} else if *num == "2" || *num == "4" {
return true, nil
}
return false, nil
}`
code = strings.Replace(code, s1, s2, -1)
s1 = `func isEvenFloat32PtrErr(num *float32) (bool, error) {
if *num == 0 {
return false, errors.New("Zero is not allowed")
}
return *num%2 == 0, nil
}`
s2 = `func isEvenFloat32PtrErr(num *float32) (bool, error) {
if *num == 0 {
return false, errors.New("Zero is not allowed")
}
return int(*num)%2 == 0, nil
}`
code = strings.Replace(code, s1, s2, -1)
s1 = `func isEvenFloat64PtrErr(num *float64) (bool, error) {
if *num == 0 {
return false, errors.New("Zero is not allowed")
}
return *num%2 == 0, nil
}`
s2 = `func isEvenFloat64PtrErr(num *float64) (bool, error) {
if *num == 0 {
return false, errors.New("Zero is not allowed")
}
return int(*num)%2 == 0, nil
}`
code = strings.Replace(code, s1, s2, -1)
return code
}
// FilterErrTest applies the function(1st argument) on each item of the list and returns new list
func FilterErrTest() string {
return `
func TestFilter<FTYPE>Err(t *testing.T) {
var v1 <TYPE> = 1
var v2 <TYPE> = 2
var v3 <TYPE> = 3
var v4 <TYPE> = 4
var v0 <TYPE> = 0
// Test : even number in the list
expectedFilteredList := []<TYPE>{v2, v4}
filteredList, _ := Filter<FTYPE>Err(isEven<FTYPE>Err, []<TYPE>{v1, v2, v3, v4})
if filteredList[0] != expectedFilteredList[0] || filteredList[1] != expectedFilteredList[1] {
t.Errorf("MapFilterErr failed. Expected filtered list=%v, actual list=%v", expectedFilteredList, filteredList)
}
r, _ := Filter<FTYPE>Err(nil, nil)
if len(r) > 0 {
t.Errorf("Filter<FTYPE>Err failed.")
}
_, err := Filter<FTYPE>Err(isEven<FTYPE>Err, []<TYPE>{v0})
if err == nil {
t.Errorf("Filter<FTYPE>PtrErr failed.")
}
}
func isEven<FTYPE>Err(num <TYPE>) (bool, error) {
if num == 0 {
return false, errors.New("Zero is not allowed")
}
return num%2 == 0, nil
}
`
}
// FilterErrBoolTest applies the function(1st argument) on each item of the list and returns new list
func FilterErrBoolTest() string {
return `
func TestFilter<FTYPE>Err(t *testing.T) {
var vt <TYPE> = true
var vf <TYPE> = false
expectedSumList := []<TYPE>{vt}
newList, _ := Filter<FTYPE>Err(true<FTYPE>Err, []<TYPE>{vt})
if newList[0] != expectedSumList[0] {
t.Errorf("Filter<FTYPE>Err failed")
}
r, _ := Filter<FTYPE>Err(nil, nil)
if len(r) > 0 {
t.Errorf("Filter<FTYPE>Err failed.")
}
_, err := Filter<FTYPE>Err(true<FTYPE>Err, []<TYPE>{vf})
if err == nil {
t.Errorf("Filter<FTYPE>Err failed.")
}
}
func true<FTYPE>Err(num1 <TYPE>) (bool, error) {
if num1 == false {
return false, errors.New("False is not allowed")
}
return true, nil
}
`
}
// ReplaceActivityFilterErrTest replaces ...
func ReplaceActivityFilterErrTest(code string) string {
s1 := `import (
_ "errors"
"reflect"
"testing"
)
func TestFilterIntErr(t *testing.T) {`
s2 := `import (
"errors"
"testing"
)
func TestFilterIntErr(t *testing.T) {`
code = strings.Replace(code, s1, s2, -1)
s1 = `func isEvenStrErr(num string) (bool, error) {
if num == 0 {
return false, errors.New("Zero is not allowed")
}
return num%2 == 0, nil
}`
s2 = `func isEvenStrErr(num string) (bool, error) {
if num == "0" {
return false, errors.New("Zero is not allowed")
} else if num == "2" || num == "4" {
return true, nil
}
return false, nil
}`
code = strings.Replace(code, s1, s2, -1)
s1 = `func isEvenFloat32Err(num float32) (bool, error) {
if num == 0 {
return false, errors.New("Zero is not allowed")
}
return num%2 == 0, nil
}`
s2 = `func isEvenFloat32Err(num float32) (bool, error) {
if num == 0 {
return false, errors.New("Zero is not allowed")
}
return int(num)%2 == 0, nil
}`
code = strings.Replace(code, s1, s2, -1)
s1 = `func isEvenFloat64Err(num float64) (bool, error) {
if num == 0 {
return false, errors.New("Zero is not allowed")
}
return num%2 == 0, nil
}`
s2 = `func isEvenFloat64Err(num float64) (bool, error) {
if num == 0 {
return false, errors.New("Zero is not allowed")
}
return int(num)%2 == 0, nil
}`
code = strings.Replace(code, s1, s2, -1)
return code
} | internal/template/basic/filterptrtest.go | 0.541894 | 0.487307 | filterptrtest.go | starcoder |
package ts
// Create Elementary stream packets containing our stream src
func CreateStreamPackets(streamInfo StreamInfo, samplesInfo []SampleInfo, fragment *FragmentData) {
// For each sample
for _, sample := range samplesInfo {
// Create the elementary stream
elementaryStream := CreateElementaryStreamSrc(streamInfo, sample)
// Create packets stream
pes := createPackets(streamInfo, sample, uint32(len(elementaryStream)))
// Fill packets payload
fillPackets(&pes, elementaryStream)
// Append fragment to PES
fragment.pes = append(fragment.pes, pes...)
}
}
func CreateElementaryStreamSrc(stream StreamInfo, sample SampleInfo) ([]byte) {
var data *Data
sameTimeStamps := sample.DTS == sample.CTS
// Create data holding the elementary stream
streamSize, headerLength := getStreamSizeAndHeaderLength(stream, sample, sameTimeStamps)
data = NewData(streamSize)
pushSampleHeader(stream, sample, sameTimeStamps, streamSize, headerLength, data)
if stream.isVideo() {
// Push start slice
data.PushAll([]byte{0x00, 0x00, 0x01, 0x09, 0xf0, 0x00})
// Push the SPS data and PPS to be sure first picture and sequence have parameters
pushSPSAndPPS(stream, data)
// For each NAL Units
for _, unit := range sample.NALUnits {
// Packet start id code
data.PushUInt(1, 24)
// Add the corresponding data
stream.mdat.Offset = unit.mdatOffset
stream.mdat.Size = unit.mdatSize
data.PushAll(stream.mdat.ToBytes())
}
} else {
pushADTSHeader(stream, sample.mdatSize, data)
stream.mdat.Offset = sample.mdatOffset
stream.mdat.Size = sample.mdatSize
data.PushAll(stream.mdat.ToBytes())
}
return data.Data
}
func getStreamSizeAndHeaderLength(stream StreamInfo, sample SampleInfo, sameTimeStamps bool) (streamSize int, headerLength int) {
headerLength = 5
if !sameTimeStamps {
headerLength += 5
}
// Stream size with header
streamSize = 9 + headerLength + int(sample.size) // 9 bytes are bytes before the header length
if stream.isVideo() {
// Add start slice code
streamSize += len([]byte{0x00, 0x00, 0x01, 0x09, 0xf0, 0x00})
// Add PPS and SPS to be sure there are parameters for
// first pictures and sequences set
streamSize += 3 + len(stream.avcC.SPSData) // start code prefix + len of data
streamSize += 3 + len(stream.avcC.PPSData) // start code prefix + len of data
// Replace start code prefix with NALLength size
totalNALLengthSize := uint32(len(sample.NALUnits)) * stream.nalLengthSize
startCodePrefixSize := uint32(len(sample.NALUnits)) * 3
streamSize += int(startCodePrefixSize) - int(totalNALLengthSize)
} else {
streamSize += 7 // ADTS
}
return
}
func pushADTSHeader(stream StreamInfo, frameLength uint32, data *Data) {
data.PushUInt(0xFFFF, 12) // SyncWord
data.PushUInt(0, 1) // ID: 0 for MPEG-4, 1 for MPEG-2
data.PushUInt(0, 2) // Layer
data.PushUInt(1, 1) // CRC Absent checksum
data.PushUInt(1, 2) // Profile: Using only Low complexity
data.PushUInt(3, 4) // Sampling frequency: 48 000 khz
data.PushUInt(0, 1) // Private bit
data.PushUInt(uint32(stream.Audio.NumberOfChannels), 3) // Channel configuration: Number of channels
data.PushUInt(0, 1) // Original
data.PushUInt(0, 1) // Home
data.PushUInt(0, 1 ) // Copyright identification bit
data.PushUInt(0, 1) // CopyRight identification start
data.PushUInt(frameLength + 7, 13 ) // Frame length + ADT
data.PushUInt(0x7FF, 11) // ADTS buffer fullness: variable rate
data.PushUInt(0, 2) // Number of raw blocks in frame
}
func pushSPSAndPPS(stream StreamInfo, data *Data) {
data.PushUInt(1, 24) // Start code prefix
data.PushAll(stream.avcC.SPSData) // id 103, Sequence parameter set
data.PushUInt(1, 24) // Start code prefix
data.PushAll(stream.avcC.PPSData) // id 104, Picture parameter set
}
func pushSampleHeader(stream StreamInfo, sample SampleInfo, sameTimeStamps bool, streamSize int, headerLength int, data *Data) {
// If CTS needed
flagPTSCode := uint32(0x02)
if !sameTimeStamps {
flagPTSCode = 0x03
}
data.PushUInt(1, 24) // Packet start id code
if stream.isVideo() {
data.PushUInt(224, 8) // Pes stream
data.PushUInt(0, 16) // Stream size
} else {
data.PushUInt(192, 8) // Pes stream
data.PushUInt(uint32(streamSize - 6), 16) // Stream size - 6 bytes before
}
data.PushUInt(0x2, 2) // '10'
data.PushUInt(0, 2) // PES_Scrambling_control
data.PushUInt(0, 1) // PES_Priority
data.PushUInt(1, 1) // data alignment indicator
data.PushUInt(0, 1) // copyright
data.PushUInt(0, 1) // original or copy
data.PushUInt(flagPTSCode, 2) // PTS and DTS flag
data.PushUInt(0, 1) // ESCR flag
data.PushUInt(0, 1) // ESCR Rate flag
data.PushUInt(0, 1) // DSM_trick_mode_flag
data.PushUInt(0, 1) // additional_copy_info_flag
data.PushUInt(0, 1) // PES_CRC_flag
data.PushUInt(0, 1) // PES_extension_flag
data.PushUInt(uint32(headerLength), 8) // Header length
data.PushUInt(flagPTSCode,4) // PTS and DTS flag
pushTimestamp(sample.CTS, data)
if !sameTimeStamps {
data.PushUInt(1, 4)
pushTimestamp(sample.DTS, data)
}
}
func pushTimestamp(timestamp uint64, data *Data) {
data.PushUInt64(timestamp >> 30, 3) // timestamp [32..30]
data.PushUInt(1, 1) // marker_bit
data.PushUInt64(timestamp >> 15, 15) // timestamp [29..15]
data.PushUInt(1, 1) // marker_bit
data.PushUInt64(timestamp, 15) // timestamp [14..0]
data.PushUInt(1, 1) // marker_bit
}
func createPackets(info StreamInfo, sample SampleInfo, elementaryStreamSize uint32) (packets []PES){
// Create the first packet
firstPacket := createFirstPacket(info, sample)
// Get stuffing information and number of needed packets
stuffingCase, sizeToStuff, lastPacketPESSize, neededPackets := getStuffingCase(firstPacket.EmptySize, elementaryStreamSize)
// Create packets
packets = make([]PES, neededPackets + 1)
initPacketList(neededPackets, firstPacket, info.PID, &packets)
// Stuff the last packet adaptationfield
stuffLastPackets(stuffingCase, &packets, sizeToStuff, lastPacketPESSize)
return
}
func createFirstPacket(info StreamInfo, sample SampleInfo) (firstPacket *PES){
firstPacket = NewStartStream(info.PID, info.streamType)
if sample.hasPCR {
pcr := PCR{}
pcr.BaseMediaDecodeTime = sample.PCR
firstPacket.setPCR(pcr)
}
// IF isIFrame set RAP
if sample.IsIframe() {
firstPacket.RandomAccessIndicator = 1
} else if info.isAudio() {
firstPacket.RandomAccessIndicator = 1
}
firstPacket.setAdaptationControl(true, true)
firstPacket.Payload.EmptySize = uint32(188 - firstPacket.HeaderAndAdaptationSize())
return
}
func getStuffingCase(restingSize uint32, elementaryStreamSize uint32) (stuffingCase int, sizeToStuff uint32, lastPacketPESSize uint32, neededPackets uint32){
// If there are only one packet needed
if restingSize >= elementaryStreamSize {
stuffingCase = 1
lastPacketPESSize = elementaryStreamSize
sizeToStuff = 188 - 4 - elementaryStreamSize // + First byte in header
} else {
// Compute number of needed packets to be filled with elementary stream
neededPackets = RoundDivision32(elementaryStreamSize - restingSize, 184)
// Get stream size in the last packet
lastPacketPESSize = (elementaryStreamSize - restingSize) % 184
sizeToStuff = (184 - lastPacketPESSize) % 184
// If there are still something to write
if sizeToStuff != 0 {
stuffingCase = 1
// If there is enough space to write payload and adaptation field in this last packet
if sizeToStuff < uint32(AdaptationField{}.Size()) {
// We need another packet
neededPackets++
stuffingCase = 2
}
}
}
return
}
func initPacketList(neededPackets uint32, firstPacket *PES, pid uint16, packets *[]PES) {
(*packets)[0] = *firstPacket
for i := uint32(1); i < neededPackets + 1; i++ {
(*packets)[i] = *NewStream(pid)
(*packets)[i].setAdaptationControl(false, true)
(*packets)[i].Payload.EmptySize = 184
}
return
}
func stuffLastPackets(stuffingCase int, packets *[]PES, sizeToStuff uint32, lastPacketPESSize uint32) {
// If we have not enough space in the last packet with the adpation field
switch stuffingCase {
case 1:
lastPacket := &(*packets)[len(*packets) - 1]
// Fill last packet adaptation field
lastPacket.setAdaptationControl(true, true)
lastPacket.setTotalAdaptationSize(byte(sizeToStuff))
lastPacket.Payload.EmptySize = lastPacketPESSize
break;
case 2:
secLastPacket := &(*packets)[len(*packets) - 2]
lastPacket := &(*packets)[len(*packets) - 1]
// Add adaptation field for last two packets
secLastPacket.setAdaptationControl(true, true)
secLastPacket.EmptySize = secLastPacket.EmptySize - uint32(AdaptationField{}.Size())
restingPES := lastPacketPESSize - secLastPacket.EmptySize
lastPacket.setAdaptationControl(true, true)
lastPacket.setTotalAdaptationSize(byte(lastPacket.EmptySize - restingPES))
lastPacket.Payload.EmptySize = restingPES
break;
}
}
func fillPackets(packets *[]PES, elementaryStream []byte) {
offset := uint32(0)
finalSize := uint32(len(elementaryStream))
packetId := uint32(0)
var extractedSize uint32
var payloadSize uint32
// While there is data left
for offset != finalSize {
// Get the corresponding part of the packet
payloadSize = uint32((*packets)[packetId].Payload.EmptySize)
extractedSize = Min32(payloadSize, finalSize - offset)
// Register the payload
(*packets)[packetId].Data = elementaryStream[offset:offset+extractedSize]
// Go to next packet
offset += extractedSize
packetId++
}
} | src/ts/CreateStreamPackets.go | 0.603581 | 0.435121 | CreateStreamPackets.go | starcoder |
package cmd
import (
"errors"
"fmt"
"strings"
"github.com/JosephLai241/shift/database"
"github.com/JosephLai241/shift/timesheet"
"github.com/JosephLai241/shift/utils"
"github.com/spf13/cobra"
)
// amendCmd represents the amend command.
var amendCmd = &cobra.Command{
Use: `amend (in|out) "NEW MESSAGE"`,
Short: "Amend a shift's clock-in or clock-out message",
Long: `
_
___ _____ ___ ___ _| |
| .'| | -_| | . |
|__,|_|_|_|___|_|_|___|
Use this command to amend a recorded shift's clock-in or clock-out
message. This command is fairly versatile - you can search for records
based on a day of the week or date, month, and/or year.
Using amend without additional commands or flags will display a
table containing shifts recorded for the current day.
There are three optional flags you can use: the '-d', '-m',
and '-y' flags. These flags denote the target day of the week or date,
month, and year respectively. The default value for all of these
flags is the current day of the week/date, month, and year.
Combine these flags to to do a deep search for a particular
shift or shifts.
You can search for shifts on a different day or date by using the '-d'
flag, which accepts a day of the week (ie. Monday) or a date
(ie. 07-14-2021). The accepted date formats are:
- MM-DD-YYYY
- MM/DD/YYYY
You can search for shifts in a different month by using the
'-m' flag, which accepts a month (ie. July). If this is the only
provided flag, a search will be done for the current day within
the provided month.
Finally, you can search for shifts in a different year by using
the '-y' flag, which accepts a year (ie. 2021). Like the '-m'
flag, a search will be done for the current day and month within
the provided year if this is the only provided flag.
You can combine the '-d', '-m', and/or '-y' flags to do a deep
search for a particular shift or shifts.
`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Println(utils.AmendArt)
checkArgs(args)
args[0] = strings.ToLower(args[0])
dayOrDate, month, year := utils.FormatFlags(cmd)
utils.CRUD(
func() { timesheet.Amend(args, dayOrDate, month, year) },
func() { database.Amend(args, dayOrDate, month, year) },
)
},
}
// Add the `amend` command and its sub-flags to the base command.
func init() {
rootCmd.AddCommand(amendCmd)
amendCmd.Flags().StringP(
"dayordate", "d",
utils.CurrentDate,
"Search records on a day of the week or date",
)
amendCmd.Flags().StringP(
"month", "m",
utils.CurrentMonth,
"Search records in a month",
)
amendCmd.Flags().StringP(
"year", "y",
utils.CurrentYear,
"Search records in a year",
)
}
// Check all input for the `amend` command.
func checkArgs(args []string) {
if len(args) < 1 {
utils.CheckError("Command error", errors.New("`amend` requires in or out"))
} else if len(args) < 2 {
utils.CheckError("Command error", errors.New("`amend` requires a new message"))
} else {
utils.BoldBlue.Printf("New message: %s\n", args[len(args)-1])
}
} | cmd/amend.go | 0.655667 | 0.419529 | amend.go | starcoder |
package main
const inccidentTemplate = `{
{{.MessageTarget}}
"markdown": "<blockquote class='{{.MessageColor}}'> {{.Emoji}} {{.MessageStatus}} <br/>
<b>Check Name:</b> {{.CheckName}}
{{if (ne .MessageStatus "Resolved") }}
<b>Execution Time:</b> {{.CheckExecutionTime}} <br/>
{{end}}
<b>Entity:</b> {{.EntityName}} <br/>
{{if (ne .MessageStatus "Resolved") }}
<b>Check output:</b> {{.CheckOutput}} <br/>
<b>History:</b> {{.History}} <br/>
{{end}}</blockquote>",
"attachments": [
{
"contentType": "application/vnd.microsoft.card.adaptive",
"content": {
"type": "AdaptiveCard",
"version": "1.0",
"body": [
{
"type": "Container",
"items": [{
"type": "ColumnSet",
"columns": [{
"type": "Column",
"width": "100px",
"items": [{
"type": "TextBlock",
"text": "{{.Emoji}} {{.MessageStatus}}",
"size": "Medium",
"isSubtle": true
}]
},
{
"type": "Column",
"width": "300px",
"items": [{
"type": "TextBlock",
"text": "**Check Name**: [{{.CheckName}}](foo)"
}]
}
],
"horizontalAlignment": "Left"
}
],
"spacing": "Medium",
"horizontalAlignment": "Left",
"style": "default"
},
{
"type": "ColumnSet",
"columns": [{
"type": "Column",
"width": "5px",
"items": [{
"type": "Image",
"altText": "",
"url": "{{.BucketName}}/{{.MessageColor}}.png",
"spacing": "Medium",
"backgroundColor": "green"
}],
"spacing": "None",
"horizontalAlignment": "Center",
"backgroundImage": {
"url": "{{.BucketName}}/{{.MessageColor}}.png",
"fillMode": "RepeatVertically",
"horizontalAlignment": "Center"
}
}
,{
"type": "Column",
"width": "stretch",
"items": [{
"type": "ColumnSet",
"columns": [{
"type": "Column",
"width": "stretch",
"items": [{
"type": "FactSet",
"facts": [
{
"title": "**Entity:** ",
"value": "[{{.EntityName}}](foo)"
}
{{if (ne .MessageStatus "Resolved") }}
,
{
"title": "Time",
"value": "{{.CheckExecutionTime}}"
},
{
"title": "History",
"value": "{{.History}}"
}
{{end}}
]
}]
}]
}]
}
]
}
{{if (ne .MessageStatus "Resolved") }}
,
{
"type": "Container",
"items": [{
"type": "Container",
"items": [{
"type": "ColumnSet",
"columns": [{
"type": "Column",
"width": "stretch",
"items": [{
"type": "TextBlock",
"text": "**Check Output**: {{.CheckOutput}}",
"wrap": true,
"color": "Attention",
"separator": true,
"horizontalAlignment": "Left",
"size": "Small"
}]
}]
}]
}]
}
{{end}}
]
}
}
]
}` | template.go | 0.565539 | 0.435902 | template.go | starcoder |
package rate
import (
"math"
"sync"
"time"
)
const hz float64 = 1.0 / float64(time.Second)
// Estimator is a rate estimator using exponential decay. It is not
// thread-safe.
type Estimator struct {
interval time.Duration
seconds float64
value float64
base float64
time time.Time
running bool
}
// Init initialises a rate estimator with the given time constant.
func (e *Estimator) Init(interval time.Duration) {
e.interval = interval
e.time = time.Now()
e.seconds = float64(interval) / float64(time.Second)
e.base = -1.0 / e.seconds
}
// Start starts a rate estimator.
func (e *Estimator) Start() {
if !e.running {
e.time = time.Now()
e.running = true
}
}
// Stop stops a rate estimator.
func (e *Estimator) Stop() {
e.running = false
}
// Time returns the time at which the estimator was advanced.
func (e *Estimator) Time() time.Time {
return e.time
}
func (e *Estimator) advance(now time.Time) {
if !e.running {
panic("Cannot advance stopped rate estimator")
}
delay := now.Sub(e.time)
e.time = now
if delay <= time.Duration(0) {
return
}
seconds := float64(delay) * (1 / float64(time.Second))
e.value = e.value * math.Exp(e.base*seconds)
}
func (e *Estimator) accumulate(value int, now time.Time) {
if !e.running {
return
}
e.value += float64(value)
if e.value < 0 {
e.value = 0
}
}
func (e *Estimator) rate(value float64) float64 {
return float64(value) / e.seconds
}
// Estimate returns an estimate of the current rate.
func (e *Estimator) Estimate() float64 {
if e.running {
e.advance(time.Now())
}
return e.rate(e.value)
}
// Accumulate notifies the estimator that the given number of bytes has
// been sent or received.
func (e *Estimator) Accumulate(value int) {
if e.running {
now := time.Now()
e.advance(now)
e.accumulate(value, now)
}
}
// Allow returns true if sending or receiving the given number of bytes
// would not exceed the given target.
func (e *Estimator) Allow(value int, target float64) bool {
if (e.value+float64(value)) <= target*e.seconds {
return true
}
if e.running {
e.advance(time.Now())
if (e.value + float64(value)) <= target*e.seconds {
return true
}
}
return false
}
// AtomicEstimator is a thread-save rate estimator.
type AtomicEstimator struct {
sync.Mutex
e Estimator
}
func (e *AtomicEstimator) Init(interval time.Duration) {
e.Lock()
e.e.Init(interval)
e.Unlock()
}
func (e *AtomicEstimator) Start() {
e.Lock()
e.e.Start()
e.Unlock()
}
func (e *AtomicEstimator) Stop() {
e.Lock()
e.e.Stop()
e.Unlock()
}
func (e *AtomicEstimator) Time() time.Time {
e.Lock()
v := e.e.Time()
e.Unlock()
return v
}
func (e *AtomicEstimator) Estimate() float64 {
e.Lock()
v := e.e.Estimate()
e.Unlock()
return v
}
func (e *AtomicEstimator) Accumulate(value int) {
e.Lock()
e.e.Accumulate(value)
e.Unlock()
}
func (e *AtomicEstimator) Allow(value int, target float64) bool {
e.Lock()
v := e.e.Allow(value, target)
e.Unlock()
return v
} | rate/rate.go | 0.845879 | 0.614394 | rate.go | starcoder |
package card
import (
"context"
"github.com/ianeinser/xendit-go"
)
/* Token */
///--- CreateToken creates new token
func CreateToken(data *CreateTokenParams) (*xendit.Token, *xendit.Error) {
return CreateTokenWithContext(context.Background(), data)
}
///--- CreateTokenWithContext creates new token with context
func CreateTokenWithContext(ctx context.Context, data *CreateTokenParams) (*xendit.Token, *xendit.Error) {
client, err := getClient()
if err != nil {
return nil, err
}
return client.CreateTokenWithContext(ctx, data)
}
/* Authentication */
///--- CreateAuthentication creates new authentication using token id
func CreateAuthentication(data *CreateAuthenticationParams) (*xendit.Authentication, *xendit.Error) {
return CreateAuthenticationWithContext(context.Background(), data)
}
///--- CreateAuthenticationWithContext creates new authentication with context
func CreateAuthenticationWithContext(ctx context.Context, data *CreateAuthenticationParams) (*xendit.Authentication, *xendit.Error) {
client, err := getClient()
if err != nil {
return nil, err
}
return client.CreateAuthenticationWithContext(ctx, data)
}
/* Charge */
// CreateCharge creates new card charge
func CreateCharge(data *CreateChargeParams) (*xendit.CardCharge, *xendit.Error) {
return CreateChargeWithContext(context.Background(), data)
}
// CreateChargeWithContext creates new card charge with context
func CreateChargeWithContext(ctx context.Context, data *CreateChargeParams) (*xendit.CardCharge, *xendit.Error) {
client, err := getClient()
if err != nil {
return nil, err
}
return client.CreateChargeWithContext(ctx, data)
}
// CaptureCharge captures a card charge
func CaptureCharge(data *CaptureChargeParams) (*xendit.CardCharge, *xendit.Error) {
return CaptureChargeWithContext(context.Background(), data)
}
// CaptureChargeWithContext captures a card charge with context
func CaptureChargeWithContext(ctx context.Context, data *CaptureChargeParams) (*xendit.CardCharge, *xendit.Error) {
client, err := getClient()
if err != nil {
return nil, err
}
return client.CaptureChargeWithContext(ctx, data)
}
// GetCharge gets a card charge
func GetCharge(data *GetChargeParams) (*xendit.CardCharge, *xendit.Error) {
return GetChargeWithContext(context.Background(), data)
}
// GetChargeWithContext gets a card charge with context
func GetChargeWithContext(ctx context.Context, data *GetChargeParams) (*xendit.CardCharge, *xendit.Error) {
client, err := getClient()
if err != nil {
return nil, err
}
return client.GetChargeWithContext(ctx, data)
}
// CreateRefund gets a card charge
func CreateRefund(data *CreateRefundParams) (*xendit.CardRefund, *xendit.Error) {
return CreateRefundWithContext(context.Background(), data)
}
// CreateRefundWithContext gets a card charge with context
func CreateRefundWithContext(ctx context.Context, data *CreateRefundParams) (*xendit.CardRefund, *xendit.Error) {
client, err := getClient()
if err != nil {
return nil, err
}
return client.CreateRefundWithContext(ctx, data)
}
/* Authorization */
// ReverseAuthorization reverses a card authorization
func ReverseAuthorization(data *ReverseAuthorizationParams) (*xendit.CardReverseAuthorization, *xendit.Error) {
return ReverseAuthorizationWithContext(context.Background(), data)
}
// ReverseAuthorizationWithContext reverses a card authorization with context
func ReverseAuthorizationWithContext(ctx context.Context, data *ReverseAuthorizationParams) (*xendit.CardReverseAuthorization, *xendit.Error) {
client, err := getClient()
if err != nil {
return nil, err
}
return client.ReverseAuthorizationWithContext(ctx, data)
}
func getClient() (*Client, *xendit.Error) {
return &Client{
Opt: &xendit.Opt,
APIRequester: xendit.GetAPIRequester(),
}, nil
} | card/card.go | 0.685002 | 0.428054 | card.go | starcoder |
package main
// help prints the help message and exits.
import (
"fmt"
"os"
"strings"
)
// help generates the help message.
func help(opts CliOptions) {
if opts.HelpArg == "" {
helpTop()
} else {
// generate the help for a recipe
recipe := loadRecipe(opts.HelpArg)
fmt.Printf("Help for %v - %v\n", recipe.Name, recipe.File)
fmt.Printf("%v\n", recipe.Full)
}
os.Exit(0)
}
// helpTop generates the top level help.
func helpTop() {
msg := `
USAGE
%[1]v [OPTIONS] <RECIPE> [RECIPE_OPTIONS]
DESCRIPTION
%[1]v runs recipes that perform complex build tasks that normally require
lots of different steps or arcane combinations of command line options.
Sounds a lot like scripts, right? Why not just create scripts to wrap the
complex commands. You can and should use scripts to wrap complex functions.
Scripts are great.
However, when there is a profileration of many scripts for a class of tasks
(like building SW), it is sometimes difficult to find the one that you want.
That is where recipes come in. They are not meant to replace scripts or
other tools. Instead they are meant to help organize them in a way that
makes it easy to find the functionality that you need.
Recipes are simply wrappers for steps that live in a central place with
brief and full descriptions to make it easier to find them and simple
support variables to allow them to be customized a bit.
Recipes can be created by anyone and shared with everyone or kept private.
The syntax for using recipes is very simple:
%[1]v [OPTIONS] <recipe> [RECIPE_OPTIONS]
Here is how you list all recipes along with a brief description:
$ %[1]v --list
Here is how you get detailed information for a specific recipe:
$ %[1]v help <recipe>
Here is how you run a recipe:
$ %[1]v <recipe> <recipe-options>
Each recipe is defined by an INI file with three parts: the description,
the variables and the steps. These sections are described in detail in
the next section.
MOTIVATION
The motivation for developing this tool came from working with a legacy
build system that had many tasks and each task required a dozen or more
steps. This first approach was to write scripts to wrap the steps but that
quickly got out of hand with dozens of them so this approach was developed
which made things much easier. It also hid the implementation so that it
could be improved.
RECIPES
Recipes are the heart of the system. Each recipe defines a sequence of
commands for performing an operation.
Recipes are defined by INI files with a .ini extension.
Blank lines are ignored.
Lines that start with a '#' as the first non-whitespace character are
comment lines that are ignored.
Multi-line strings can be specified by delimiting them with """.
Includes are allowed. An include is defined by they keyword 'include'
followed by a filename. Include statements can appear anywhere in the
file. They act just like paste operations and can be used to share code.
Include files can include other files. Include files must not have a .ini
extension. The recommended extension is .inc but anything will work.
Recipes have three sections:
[description] Fields that describe the recipe.
[variable] Defines variables for the recipe.
[steo] Defines the recipe steps.
The description section contains two variable: brief and full. Brief is a
one line description of the recipe. Full is a full multiline description.
You can use """ """ syntax for the full description.
The variable section defines variables that the user can change. Each
variable has a name and an optional value separated by an equals '=' sign.
Variable names can only be composed of lower case letters, digits,
underscores and dashes. They cannot start with a digit or a dash.
Variables are referenced using shell syntax: ${name}. Note that the braces
are not optional. If the variables are assigned a value, that is the default
value. If they are not assigned a value, then they are required.
Variable names appear as options on the command line. That means that
if you define a variable named "foo", an option named --foo will be
generated to set that variable. Here are some sample declarations of
variables to make this clearer.
# defines two variables, required and optional.
# they appear as --required <value> and --optional <value> on the
# command line.
[variable]
required =
option = default
The step section defines the steps taken. It is very simple and does not
support looping or conditionals. That is because it is only meant to handle
high level operations that deal with running multiple scripts in order. For
lower level stuff that requires looping or conditionals, it makes more sense
to use a script. Note that you can embed an anonymous script if you don't
want to create an external one explicitly.
Each entry in the step section is defined like this:
step = <directive> <data>
The directive tells %[1]v what to do. The following directives are
available.
cd <dir> Change the working dir for all subsequent steps.
export X=Y Define an env var for all subsequent steps.
exec <cmd> Execute a command, stop if it fails.
exec-no-exit <cmd> Exexute a command, continue if it fails.
info <msg> Print a message to the log.
must-exist-dir <dir> Fail if <dir> does not exist.
Shorthand for
step = exec /bin/bash -c "[ -d <dir>] && exit 0 || exit 1"
must-exist-file <file> Fail if <file> does not exist.
Shorthand for
step = exec /bin/bash -c "[ -f <file>] && exit 0 || exit 1"
must-not-exist-dir <dir> Fail if <dir> exists.
Shorthand for
step = exec /bin/bash -c "[ ! -d <dir>] && exit 0 || exit 1"
must-not-exist-file <file> Fail if <file> exists.
Shorthand for
step = exec /bin/bash -c "[ ! -f <file>] && exit 0 || exit 1"
script Embed an anonymous, in-line script.
You can use any scripting language.
They are generated dynamically in %[3]v.
You can change a variable setting by
specifying a line of the form:
###export <variable> = <value>
Here is an example recipe. It is named list-files.ini so you can refer to it
as "list-files" on the command line.
# This is an example recipe.
[description]
brief = "List files in a directory."
full = """
USAGE
list-files [OPTIONS]
DESCRIPTION
Lists files in a directory.
OPTIONS
--dir DIR Override the default directory.
"""
[variable]
dir = /tmp
[step]
step = must-exist-dir ${dir}
step = info "ls command"
step = ls -l ${dir}
step = info """
# ================================================================
# run anonymous bash and python scripts
# ================================================================
"""
step = script """#!/bin/bash
echo "bash script - ${dir}"
"""
step = script """#!/usr/bin/env python
print("python script - {}".format("${dir}"))
"""
# Reset the value of the dir variable from an anonymous script.
# It will be /var for all subsequent steps.
step = script """#!/bin/bash
echo "###export dir=/var"
"""
# dir will be /var
step = exec ls -l ${dir}
step = info done
Note the use of '"""' to delimit multi-line strings for the full description
and the anonymous script.
You would run this recipe like this:
$ %[1]v list-files
To print the help, do this:
$ %[1]v help list-files
To list a different directory, do this:
$ %[1]v list-files --dir /var/run
ENVIRONMENT VARIABLES
When a recipe is run there are environment variables that are made available
to it by %[1]v. The list of environment variables is shown below.
%[4]v
To use an environment variable just reference it like a normal variable.
Here is an example: ${%[2]v_USERNAME}.
CALLING OTHER RECIPES
You can use ${%[2]v_EXE} to call other recipes like this:
# Call other another recipe.
step = exec ${%[2]v_EXE} --arg1 arg1
Use this approach with caution because you could end up with infinite
recursion for a recipe that calls itself.
OPTIONS
-h, --help On-line help. Same as "%[1]v help".
-f FILE, --flatten FILE
Flatten a recipe into a file.
-l, --list List the available recipes with a brief description.
-q, --quiet Run quietly. Only error messages are printed.
If -q and -v are not specified, only ERROR and WARNING
messages are printed.
--no-banner Turn off the step banner in verbose mode.
-r DIR, --recipes DIR
The path to the recipes directory.
--run <cmd> <args> Run a command. Used for internal testing.
-t, --tee Log all messages to a unique log file as well as stdout.
It saves having to create a unique file name for each run
using the command line tee tool.
The output file name is
%[1]v-<YYYYMMDD>-<hhmmss>-<username>.log
-v, --verbose Increase the level of verbosity.
It can be specified multiple times.
-v --> print INFO and banner messages
-v -v --> print INFO, banner and DEBUG messages
You always want to use -v when running recipes.
-V, --version Print the program version and exit.
EXAMPLES
$ # Example 1: Get help.
$ %[1]v help
$ # Example 2: List all available recipes.
$ %[1]v --list
$ # Example 3: Get help about a recipe.
$ %[1]v help <recipe>
$ # Example 4: Show your local configuration.
$ %[1]v -v
$ # Example 5: Run a recipe with automatic logging.
$ # Provide a recipe specific option. The options
$ # are different for each recipe.
$ %[1]v -v -t <recipe> --foo bar
$ # Example 6: Run a local recipe file.
$ %[1]v -v ./myrecipe.ini
$ # Example 7: Use a local recipe repository.
$ %[1]v -v -r ~/my/recipes myrecipe1
`
// Get the built-in environment variables.
evs := []string{}
ub := strings.ToUpper(Context.Base)
ubu := ub + "_"
for _, e := range os.Environ() {
if strings.HasPrefix(e, ubu) {
evs = append(evs, e)
}
}
// Print the message and exit.
fmt.Printf(msg, Context.Base, ub, Context.ScriptDir, strings.Join(evs, "\n "))
os.Exit(0)
} | src/cb/help.go | 0.51879 | 0.41117 | help.go | starcoder |
package binpack2d
// List of different heuristic rules that can be used when deciding where to place a new rectangle.
const (
RULE_BEST_SHORT_SIDE_FIT = iota
RULE_BEST_LONG_SIDE_FIT
RULE_BEST_AREA_FIT
RULE_BOTTOM_LEFT
RULE_CONTACT_POINT
num_rules
)
// The Rectangle structure defines position and size of a rectangle.
type Rectangle struct { X, Y, W, H int }
// The Packer structure defines a single rectangular bin.
type Packer struct {
width, height int // bin dimension
usedRects []Rectangle // list of occupied space
freeRects []Rectangle // list of free space
}
// Create creates a new empty BinPacker structure of given dimension.
func Create(width, height int) *Packer {
p := Packer { 0, 0, make([]Rectangle, 0), make([]Rectangle, 0), }
p.Reset(width, height)
return &p
}
// Reset removes all rectangles in the packer object and sets the bin size to the given dimension.
func (p *Packer) Reset(width, height int) {
if width < 0 { width = 0 }
if height < 0 { height = 0 }
p.width = width
p.height = height
p.Clear()
}
// Clear removes all items from the list of used rectangles.
func (p *Packer) Clear() {
p.usedRects = p.usedRects[:0]
p.freeRects = p.freeRects[:0]
addRect(&p.freeRects, len(p.freeRects), Rectangle{0, 0, p.width, p.height})
}
// GetWidth returns the width of the current bin.
func (p *Packer) GetWidth() int {
return p.width
}
// GetHeight returns the height of the current bin.
func (p *Packer) GetHeight() int {
return p.height
}
// GetUsedRectanglesLength returns the number of rectangles stored in the current bin.
func (p *Packer) GetUsedRectanglesLength() int {
return len(p.usedRects)
}
// GetUsedRectangle returns the stored rectangle at the specified index. Returns an empty rectangle if the index is out of range.
func (p *Packer) GetUsedRectangle(index int) Rectangle {
if index < 0 || index > len(p.usedRects) { return Rectangle{} }
return p.usedRects[index]
}
// ShrinkBin attempts to shrink the current bin as much as possible. Use "binary" to specify whether to reduce dimensions by a fixed 50% per iteration.
func (p *Packer) ShrinkBin(binary bool) {
if len(p.usedRects) == 0 { return }
minX, minY, maxX, maxY := 1 << 30, 1 << 30, -(1 << 30), -(1 << 30)
// finding borders
for i := 0; i < len(p.usedRects); i++ {
if p.usedRects[i].X < minX { minX = p.usedRects[i].X }
if p.usedRects[i].Y < minY { minY = p.usedRects[i].Y }
if p.usedRects[i].X + p.usedRects[i].W > maxX { maxX = p.usedRects[i].X + p.usedRects[i].W }
if p.usedRects[i].Y + p.usedRects[i].H > maxY { maxY = p.usedRects[i].Y + p.usedRects[i].H }
}
newWidth, newHeight := maxX - minX, maxY - minY
if binary {
// attempt to shrink to the next lower power of two
curWidth, curHeight := p.width, p.height
for newWidth <= (curWidth >> 1) {
curWidth >>= 1
}
newWidth = curWidth
for newHeight <= (curHeight >> 1) {
curHeight >>= 1
}
newHeight = curHeight
}
// adjusting rectangle positions
if (newWidth != p.width || newHeight != p.height) && (minX > 0 || minY > 0) {
for idx := 0; idx < len(p.freeRects); idx++ {
p.freeRects[idx].X -= minX
p.freeRects[idx].Y -= minY
}
for idx := 0; idx < len(p.usedRects); idx++ {
p.usedRects[idx].X -= minX
p.usedRects[idx].Y -= minY
}
}
p.width = newWidth
p.height = newHeight
}
// Insert inserts a single rectangle to the bin by using the specified packing rule.
// Returns the packed Rectangle structure, or sets "ok" to false if no fit could be found.
func (p *Packer) Insert(width, height, rule int) (rect Rectangle, ok bool) {
ok = false
switch rule {
case RULE_BEST_SHORT_SIDE_FIT:
rect = p.findPositionForNewNodeBestShortSideFit(width, height, nil)
case RULE_BOTTOM_LEFT:
rect = p.findPositionForNewNodeBottomLeft(width, height, nil)
case RULE_CONTACT_POINT:
rect = p.findPositionForNewNodeContactPoint(width, height, nil)
case RULE_BEST_LONG_SIDE_FIT:
rect = p.findPositionForNewNodeBestLongSideFit(width, height, nil)
case RULE_BEST_AREA_FIT:
rect = p.findPositionForNewNodeBestAreaFit(width, height, nil)
default:
rect = Rectangle{}
return
}
if rect.H == 0 {
return
}
for i, size := 0, len(p.freeRects); i < size; i++ {
if p.splitFreeNode(i, rect) {
removeRect(&p.freeRects, i)
i--
size--
}
}
p.pruneFree()
addRect(&p.usedRects, len(p.usedRects), rect)
ok = true
return
}
// GetOccupancy computes the ratio of used surface area to the total bin area.
func (p *Packer) GetOccupancy() float32 {
usedSurfaceArea := int64(0)
for i, size := 0, len(p.usedRects); i < size; i++ {
usedSurfaceArea += int64(p.usedRects[i].W * p.usedRects[i].H)
}
return float32(usedSurfaceArea) / float32(p.width*p.height)
}
// Used internally. Computes the placement score for placing the given rectangle with the given method.
// width and height specify the rectangle dimension.
// rule specifies the placement rule.
// rect identifies where the rectangle would be placed if it were placed.
// pri and sec return the primary and secondary placement score.
// ok returns whether the rectangle fits into the bin.
func (p *Packer) scoreRect(width, height, rule int) (rect Rectangle, pri, sec int, ok bool) {
ok = false
pri, sec = 1 << 30, 1 << 30
switch rule {
case RULE_BEST_SHORT_SIDE_FIT:
rect = p.findPositionForNewNodeBestShortSideFit(width, height, []int{pri, sec})
case RULE_BOTTOM_LEFT:
rect = p.findPositionForNewNodeBottomLeft(width, height, []int{pri, sec})
case RULE_CONTACT_POINT:
rect = p.findPositionForNewNodeContactPoint(width, height, []int{pri, sec})
case RULE_BEST_LONG_SIDE_FIT:
rect = p.findPositionForNewNodeBestLongSideFit(width, height, []int{pri, sec})
case RULE_BEST_AREA_FIT:
rect = p.findPositionForNewNodeBestAreaFit(width, height, []int{pri, sec})
default:
rect = Rectangle{}
return
}
// cannot fit the current rectangle
if rect.H == 0 {
pri, sec = 1 << 30, 1 << 30
} else {
ok = true
}
return
}
// Used internally. Places the given rectangle into the bin.
func (p *Packer) placeRect(rect Rectangle) {
for i, size := 0, len(p.freeRects); i < size; i++ {
if p.splitFreeNode(i, rect) {
removeRect(&p.freeRects, i)
i--
size--
}
}
p.pruneFree()
addRect(&p.usedRects, len(p.usedRects), rect)
}
// Used internally. Computes the placement score for the "CP" variant.
func (p *Packer) contactPointScoreNode(x, y, width, height int) int {
score := 0
if x == 0 || x + width == p.width {
score += height
}
if y == 0 || y + height == p.height {
score += width
}
for i, size := 0, len(p.usedRects); i < size; i++ {
if p.usedRects[i].X == x + width || p.usedRects[i].X + p.usedRects[i].W == x {
score += commonIntervalLength(p.usedRects[i].Y, p.usedRects[i].Y + p.usedRects[i].H, y, y + height)
}
if p.usedRects[i].Y == y + height || p.usedRects[i].Y + p.usedRects[i].H == y {
score += commonIntervalLength(p.usedRects[i].X, p.usedRects[i].X + p.usedRects[i].W, x, x + width)
}
}
return score
}
// Used internally. Implementing RULE_BOTTOM_LEFT packing rule.
func (p *Packer) findPositionForNewNodeBottomLeft(width, height int, bestPos []int) Rectangle {
if bestPos == nil { bestPos = []int{0, 0} }
bestNode := Rectangle{}
bestPos[0] = 1 << 30
for i, size := 0, len(p.freeRects); i < size; i++ {
// Try to place the rectangle in upright (non-flipped) orientation.
if p.freeRects[i].W >= width && p.freeRects[i].H >= height {
topSideY := p.freeRects[i].Y + height
if topSideY < bestPos[0] || (topSideY == bestPos[0] && p.freeRects[i].X < bestPos[1]) {
bestNode.X, bestNode.Y = p.freeRects[i].X, p.freeRects[i].Y
bestNode.W, bestNode.H = width, height
bestPos[0], bestPos[1] = topSideY, p.freeRects[i].X
}
}
}
return bestNode
}
// Used internally. Implementing RULE_BEST_SHORT_SIDE_FIT packing rule.
func (p *Packer) findPositionForNewNodeBestShortSideFit(width, height int, bestFit []int) Rectangle {
if bestFit == nil { bestFit = []int{0, 0} }
bestNode := Rectangle{}
bestFit[0] = 1 << 30
for i, size := 0, len(p.freeRects); i < size; i++ {
// Try to place the rectangle in upright (non-flipped) orientation.
if p.freeRects[i].W >= width && p.freeRects[i].H >= height {
leftoverHoriz := p.freeRects[i].W - width
if leftoverHoriz < 0 { leftoverHoriz = -leftoverHoriz }
leftoverVert := p.freeRects[i].H - height
if leftoverVert < 0 { leftoverVert = -leftoverVert }
shortSideFit := leftoverHoriz
if leftoverVert < shortSideFit { shortSideFit = leftoverVert }
longSideFit := leftoverHoriz
if leftoverVert > longSideFit { longSideFit = leftoverVert }
if shortSideFit < bestFit[0] || (shortSideFit == bestFit[0] && longSideFit < bestFit[1]) {
bestNode.X, bestNode.Y = p.freeRects[i].X, p.freeRects[i].Y
bestNode.W, bestNode.H = width, height
bestFit[0], bestFit[1] = shortSideFit, longSideFit
}
}
}
return bestNode
}
// Used internally. Implementing RULE_BEST_LONG_SIDE_FIT packing rule.
func (p *Packer) findPositionForNewNodeBestLongSideFit(width, height int, bestFit []int) Rectangle {
if bestFit == nil { bestFit = []int{0, 0} }
bestNode := Rectangle{}
bestFit[1] = 1 << 30
for i, size := 0, len(p.freeRects); i < size; i++ {
// Try to place the rectangle in upright (non-flipped) orientation.
if p.freeRects[i].W >= width && p.freeRects[i].H >= height {
leftoverHoriz := p.freeRects[i].W - width
if leftoverHoriz < 0 { leftoverHoriz = -leftoverHoriz }
leftoverVert := p.freeRects[i].H - height
if leftoverVert < 0 { leftoverVert = -leftoverVert }
shortSideFit := leftoverHoriz
if leftoverVert < shortSideFit { shortSideFit = leftoverVert }
longSideFit := leftoverHoriz
if leftoverVert > longSideFit { longSideFit = leftoverVert }
if longSideFit < bestFit[1] || (longSideFit == bestFit[1] && shortSideFit < bestFit[0]) {
bestNode.X, bestNode.Y = p.freeRects[i].X, p.freeRects[i].Y
bestNode.W, bestNode.H = width, height
bestFit[0], bestFit[1] = shortSideFit, longSideFit
}
}
}
return bestNode
}
// Used internally. Implementing RULE_BEST_AREA_FIT packing rule.
func (p *Packer) findPositionForNewNodeBestAreaFit(width, height int, bestFit []int) Rectangle {
if bestFit == nil { bestFit = []int{0, 0} }
bestNode := Rectangle{}
bestFit[0] = 1 << 30
for i, size := 0, len(p.freeRects); i < size; i++ {
areaFit := p.freeRects[i].W*p.freeRects[i].H - width*height
// Try to place the rectangle in upright (non-flipped) orientation.
if p.freeRects[i].W >= width && p.freeRects[i].H >= height {
leftoverHoriz := p.freeRects[i].W - width
if leftoverHoriz < 0 { leftoverHoriz = -leftoverHoriz }
leftoverVert := p.freeRects[i].H - height
if leftoverVert < 0 { leftoverVert = -leftoverVert }
shortSideFit := leftoverHoriz
if leftoverVert < shortSideFit { shortSideFit = leftoverVert }
if areaFit < bestFit[0] || (areaFit == bestFit[0] && shortSideFit < bestFit[1]) {
bestNode.X, bestNode.Y = p.freeRects[i].X, p.freeRects[i].Y
bestNode.W, bestNode.H = width, height
bestFit[0], bestFit[1] = areaFit, shortSideFit
}
}
}
return bestNode
}
// Used internally. Implementing RULE_CONTACT_POINT packing rule.
func (p *Packer) findPositionForNewNodeContactPoint(width, height int, bestScore []int) Rectangle {
if bestScore == nil { bestScore = []int{0, 0} }
bestNode := Rectangle{}
bestScore[0] = -1
for i, size := 0, len(p.freeRects); i < size; i++ {
// Try to place the rectangle in upright (non-flipped) orientation.
if p.freeRects[i].W >= width && p.freeRects[i].H >= height {
score := p.contactPointScoreNode(p.freeRects[i].X, p.freeRects[i].Y, width, height)
if score > bestScore[0] {
bestNode.X, bestNode.Y = p.freeRects[i].X, p.freeRects[i].Y
bestNode.W, bestNode.H = width, height
bestScore[0] = score
}
}
}
return bestNode
}
// Used internally. Returns true if the free node was split.
func (p *Packer) splitFreeNode(freeIdx int, usedNode Rectangle) bool {
// Test with SAT if the rectangles even intersect.
if usedNode.X >= p.freeRects[freeIdx].X + p.freeRects[freeIdx].W ||
usedNode.X + usedNode.W <= p.freeRects[freeIdx].X ||
usedNode.Y >= p.freeRects[freeIdx].Y + p.freeRects[freeIdx].H ||
usedNode.Y + usedNode.H <= p.freeRects[freeIdx].Y {
return false
}
if usedNode.X < p.freeRects[freeIdx].X + p.freeRects[freeIdx].W && usedNode.X + usedNode.W > p.freeRects[freeIdx].X {
// New node at the top side of the used node.
if usedNode.Y > p.freeRects[freeIdx].Y && usedNode.Y < p.freeRects[freeIdx].Y + p.freeRects[freeIdx].H {
newNode := Rectangle{p.freeRects[freeIdx].X,
p.freeRects[freeIdx].Y,
p.freeRects[freeIdx].W,
usedNode.Y - p.freeRects[freeIdx].Y}
addRect(&p.freeRects, len(p.freeRects), newNode)
}
// New node at the bottom side of the used node.
if usedNode.Y + usedNode.H < p.freeRects[freeIdx].Y + p.freeRects[freeIdx].H {
newNode := Rectangle{p.freeRects[freeIdx].X,
usedNode.Y + usedNode.H,
p.freeRects[freeIdx].W,
p.freeRects[freeIdx].Y + p.freeRects[freeIdx].H - (usedNode.Y + usedNode.H)}
addRect(&p.freeRects, len(p.freeRects), newNode)
}
}
if usedNode.Y < p.freeRects[freeIdx].Y + p.freeRects[freeIdx].H && usedNode.Y + usedNode.H > p.freeRects[freeIdx].Y {
// New node at the left side of the used node.
if usedNode.X > p.freeRects[freeIdx].X && usedNode.X < p.freeRects[freeIdx].X + p.freeRects[freeIdx].W {
newNode := Rectangle{p.freeRects[freeIdx].X,
p.freeRects[freeIdx].Y,
usedNode.X - p.freeRects[freeIdx].X,
p.freeRects[freeIdx].H}
addRect(&p.freeRects, len(p.freeRects), newNode)
}
// New node at the right side of the used node.
if usedNode.X + usedNode.W < p.freeRects[freeIdx].X + p.freeRects[freeIdx].W {
newNode := Rectangle{usedNode.X + usedNode.W,
p.freeRects[freeIdx].Y,
p.freeRects[freeIdx].X + p.freeRects[freeIdx].W - (usedNode.X + usedNode.W),
p.freeRects[freeIdx].H}
addRect(&p.freeRects, len(p.freeRects), newNode)
}
}
return true
}
// Used internally. Goes through the free rectangle list and removes any redundant entries.
func (p *Packer) pruneFree() {
// Go through each pair and remove any rectangle that is redundant.
for i, size1 := 0, len(p.freeRects); i < size1; i++ {
for j, size2 := i+1, len(p.freeRects); j < size2; j++ {
if (isContainedIn(p.freeRects[i], p.freeRects[j])) {
removeRect(&p.freeRects, i)
i--
size1--
size2--
break;
}
if (isContainedIn(p.freeRects[j], p.freeRects[i])) {
removeRect(&p.freeRects, j)
j--
size1--
size2--
}
}
}
}
// Used internally. Returns 0 if the two intervals i1 and i2 are disjoint, or the length of their overlap otherwise.
func commonIntervalLength(i1start, i1end, i2start, i2end int) int {
if i1end < i2start || i2end < i1start {
return 0
}
end := i1end
if i2end < end { end = i2end }
start := i1start
if i2start > start { start = i2start }
return end - start
}
// Used internally. Returns true if a is contained in b.
func isContainedIn(a, b Rectangle) bool {
return a.X >= b.X && a.Y >= b.Y && a.X+a.W <= b.X+b.W && a.Y+a.H <= b.Y+b.H;
}
// Used internally. Adds a new rectangle to the rectangle list at the specified position.
func addRect(list *[]Rectangle, index int, rect Rectangle) {
if index < 0 { index = 0 }
if index > len(*list) { index = len(*list) }
*list = append(*list, Rectangle{})
for pos := len(*list) - 1; pos > index; pos-- {
(*list)[pos] = (*list)[pos - 1]
}
(*list)[index] = rect
}
// Used internally. Removes the rectangle from the given list at the specified position.
func removeRect(list *[]Rectangle, index int) {
if index < 0 { index = 0 }
if index >= len(*list) { index = len(*list) }
for pos := index + 1; pos < len(*list); pos++ {
(*list)[pos - 1] = (*list)[pos]
}
*list = (*list)[:len(*list) - 1]
} | binpack2d.go | 0.914972 | 0.732687 | binpack2d.go | starcoder |
package connect
import "encoding/json"
type HistogramType string
const (
HistogramOneDay HistogramType = "HistogramOneDay" // Data interval: 5min, Max samples: 288
HistogramTwoHours HistogramType = "HistogramTwoHours" // Data interval: 20sec, Max samples: 360
HistogramOneWeek HistogramType = "HistogramOneWeek" // Data interval: 30min, Max samples: 336
HistogramOneMonth HistogramType = "HistogramOneMonth" // Data interval: 2h, Max samples: 372
)
type HistogramIntervalType string
const (
HistogramInterval5m HistogramIntervalType = "HistogramInterval5m" // Data interval: 5min, Max samples: 288, Length 1day
HistogramInterval20s HistogramIntervalType = "HistogramInterval20s" // Data interval: 20sec, Max samples: 360, Length 2Hours
HistogramInterval30m HistogramIntervalType = "HistogramInterval30m" // Data interval: 30min, Max samples: 336, Length 1Week
HistogramInterval2h HistogramIntervalType = "HistogramInterval2h" // Data interval: 2h, Max samples: 372, Length 1Month
)
// PercentHistogram - 0-100%, sample count and rate depens on requested type and is the same as in ActiveHost histogram
type PercentHistogram []float64
type SystemHealthData struct {
Cpu PercentHistogram `json:"cpu"`
Memory PercentHistogram `json:"memory"`
MemoryTotal float64 `json:"memoryTotal"` // memory histogram has to have fixed maximum to this value
DiskTotal float64 `json:"diskTotal"` // total number of bytes on data partition (install dir on windows)
DiskFree float64 `json:"diskFree"`
}
// SystemHealthGet -
func (s *ServerConnection) SystemHealthGet(histogramType HistogramType) (*SystemHealthData, error) {
params := struct {
Type HistogramType `json:"type"`
}{histogramType}
data, err := s.CallRaw("SystemHealth.get", params)
if err != nil {
return nil, err
}
systemHealthData := struct {
Result struct {
Data SystemHealthData `json:"data"`
} `json:"result"`
}{}
err = json.Unmarshal(data, &systemHealthData)
return &systemHealthData.Result.Data, err
}
// SystemHealthGetInc -
func (s *ServerConnection) SystemHealthGetInc(histogramIntervalType HistogramIntervalType, startSampleTime DateTimeStamp) (*SystemHealthData, *DateTimeStamp, error) {
params := struct {
Type HistogramIntervalType `json:"type"`
StartSampleTime DateTimeStamp `json:"startSampleTime"`
}{histogramIntervalType, startSampleTime}
data, err := s.CallRaw("SystemHealth.getInc", params)
if err != nil {
return nil, nil, err
}
systemHealthData := struct {
Result struct {
Data SystemHealthData `json:"data"`
SampleTime DateTimeStamp `json:"sampleTime"`
} `json:"result"`
}{}
err = json.Unmarshal(data, &systemHealthData)
return &systemHealthData.Result.Data, &systemHealthData.Result.SampleTime, err
} | systemHealth.go | 0.693473 | 0.649064 | systemHealth.go | starcoder |
package ecs
// Mask is the format of the bitmask
type Mask uint64
// Mask is the size of Mask in bits
const MaskTotalBits = 64
var nibbleToBitsSet = [16]uint{0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4}
// NewMask creates a new bitmask from a list of IDs
// If any ID is bigger or equal MaskTotalBits, it'll not be added to the mask
func NewMask(ids ...ID) Mask {
var mask Mask
for _, id := range ids {
mask.Set(id, true)
}
return mask
}
// Get reports if bit index defined by ID is true or false
// The return will be always false for bit >= MaskTotalBits
func (e Mask) Get(bit ID) bool {
mask := Mask(1 << bit)
return e&mask == mask
}
// Set sets the state of bit index to true or false
// This function has no effect for bit >= MaskTotalBits
func (e *Mask) Set(bit ID, value bool) {
if value {
*e |= Mask(1 << bit)
} else {
*e &= Mask(^(1 << bit))
}
}
// Reset change the state of all bits to false
func (e *Mask) Reset() {
*e = 0
}
// Contains reports if other mask is a subset of this mask
func (e Mask) Contains(other Mask) bool {
return e & other == other
}
// TotalBitsSet returns how many bits are set in this mask
func (e Mask) TotalBitsSet() uint {
var count uint
for e != 0 {
count += nibbleToBitsSet[e & 0xf]
e >>= 4
}
return count
}
// NextBitSet returns the index of the next bit set in range [startingFromBit, MaskTotalBits]
// If no bit set is found within this range, the return is MaskTotalBits
// The offset at startingFromBit is checked to, so remember to use the last index found + 1 to find the next bit set
func (e Mask) NextBitSet(startingFromBit uint) uint {
count := startingFromBit
e >>= count
if e == 0 {
return MaskTotalBits
}
if e&1 != 0 {
return count
}
count += 1
if e&0xffffffff == 0 {
e >>= 32
count += 32
}
if e&0xffff == 0 {
e >>= 16
count += 16
}
if e&0xff == 0 {
e >>= 8
count += 8
}
if e&0xf == 0 {
e >>= 4
count += 4
}
if e&0x3 == 0 {
e >>= 2
count += 2
}
count -= uint(e & 1)
return count
} | bitmask.go | 0.743727 | 0.528777 | bitmask.go | starcoder |
package twiml
import (
"fmt"
"regexp"
"strings"
)
// Validate aggregates the results of individual validation functions and returns true
// when all validation functions pass
func Validate(vf ...bool) bool {
for _, f := range vf {
if !f {
return false
}
}
return true
}
// OneOf validates that a field is one of the options provided
func OneOf(field string, options ...string) bool {
for _, w := range options {
if field == w {
return true
}
}
return false
}
// IntBetween validates that a field is an integer between high and low
func IntBetween(field int, high int, low int) bool {
if (field <= high) && (field >= low) {
return true
}
return false
}
// Required validates that a field is not the empty string
func Required(field string) bool {
return len(field) > 0
}
// OneOfOpt validates that a field is one of the options provided or the empty string (for optional fields)
func OneOfOpt(field string, options ...string) bool {
if field == "" {
return true
}
return OneOf(field, options...)
}
// AllowedMethod validates that a method is either of type GET or POST (or empty string to default)
func AllowedMethod(field string) bool {
// optional field always set with default (typically POST)
if field == "" {
return true
}
if (field != "GET") && (field != "POST") {
return false
}
return true
}
// Numeric validates that a string contains only digits 0-9
func Numeric(field string) bool {
matched, err := regexp.MatchString("^[0-9]+$", field)
if err != nil {
return false
}
return matched
}
// NumericOrWait validates that a string contains only digits 0-9 or the wait key 'w'
func NumericOrWait(field string) bool {
matched, err := regexp.MatchString("^[0-9w]+$", field)
if err != nil {
return false
}
return matched
}
// NumericOpt validates that the field is numeric or empty string (for optional fields)
func NumericOpt(field string) bool {
if field == "" {
return true
}
return Numeric(field)
}
// AllowedLanguage validates that the combination of speaker and language is allowable
func AllowedLanguage(speaker string, language string) bool {
switch speaker {
case Man, Woman:
return OneOfOpt(language, English, French, German, Spanish, EnglishUK)
case Alice:
return OneOfOpt(language,
DanishDenmark,
GermanGermany,
EnglishAustralia,
EnglishCanada,
EnglishUK,
EnglishIndia,
EnglishUSA,
SpanishCatalan,
SpanishSpain,
SpanishMexico,
FinishFinland,
FrenchCanada,
FrenchFrance,
ItalianItaly,
JapaneseJapan,
KoreanKorea,
NorwegianNorway,
DutchNetherlands,
PolishPoland,
PortugueseBrazil,
PortuguesePortugal,
RussianRussia,
SwedishSweden,
ChineseMandarin,
ChineseCantonese,
ChineseTaiwanese,
)
default:
return OneOfOpt(language, English, French, German, Spanish, EnglishUK)
}
}
// construct regexp to validate a list of callback events
func constructCallbackEventValidator(eventNames []string) *regexp.Regexp {
eventPatterns := make([]string, 0)
for _, eventName := range eventNames {
eventPatterns = append(eventPatterns, fmt.Sprintf("%s\\s?", eventName))
}
expression := fmt.Sprintf("^(%s)+$", strings.Join(eventPatterns, "|"))
return regexp.MustCompile(expression)
}
var (
// SipCallbackEvents validator for Sip TwiML block
SipCallbackEvents = constructCallbackEventValidator([]string{"initiated", "ringing", "answered", "completed"})
// ConferenceCallbackEvents validator for Conference TwiML block
ConferenceCallbackEvents = constructCallbackEventValidator([]string{"start", "end", "join", "leave", "mute", "hold", "speaker"})
)
// AllowedCallbackEvent validates that the CallbackEvent is one of the allowed options
func AllowedCallbackEvent(events string, callbackValidator *regexp.Regexp) bool {
if events == "" {
return true
}
return callbackValidator.MatchString(events)
} | validate.go | 0.690246 | 0.423041 | validate.go | starcoder |
package diffence
const (
defaultRulesJSON = `[
{
"part": "filename",
"type": "regex",
"pattern": "\\A.*_rsa\\z",
"caption": "Private SSH key",
"description": null
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A.*_dsa\\z",
"caption": "Private SSH key",
"description": null
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A.*_ed25519\\z",
"caption": "Private SSH key",
"description": null
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A.*_ecdsa\\z",
"caption": "Private SSH key",
"description": null
},
{
"part": "path",
"type": "regex",
"pattern": "\\.?ssh/config\\z",
"caption": "SSH configuration file",
"description": null
},
{
"part": "extension",
"type": "match",
"pattern": "pem",
"caption": "Potential cryptographic private key",
"description": null
},
{
"part": "extension",
"type": "regex",
"pattern": "\\Akey(pair)?\\z",
"caption": "Potential cryptographic private key",
"description": null
},
{
"part": "extension",
"type": "match",
"pattern": "pkcs12",
"caption": "Potential cryptographic key bundle",
"description": null
},
{
"part": "extension",
"type": "match",
"pattern": "pfx",
"caption": "Potential cryptographic key bundle",
"description": null
},
{
"part": "extension",
"type": "match",
"pattern": "p12",
"caption": "Potential cryptographic key bundle",
"description": null
},
{
"part": "extension",
"type": "match",
"pattern": "asc",
"caption": "Potential cryptographic key bundle",
"description": null
},
{
"part": "filename",
"type": "match",
"pattern": "otr.private_key",
"caption": "Pidgin OTR private key",
"description": null
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A\\.?(bash_|zsh_|z)?history\\z",
"caption": "Shell command history file",
"description": null
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A\\.?mysql_history\\z",
"caption": "MySQL client command history file",
"description": null
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A\\.?psql_history\\z",
"caption": "PostgreSQL client command history file",
"description": null
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A\\.?pgpass\\z",
"caption": "PostgreSQL password file",
"description": null
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A\\.?irb_history\\z",
"caption": "Ruby IRB console history file",
"description": null
},
{
"part": "path",
"type": "regex",
"pattern": "\\.?purple\\/accounts\\.xml\\z",
"caption": "Pidgin chat client account configuration file",
"description": null
},
{
"part": "path",
"type": "regex",
"pattern": "\\.?xchat2?\\/servlist_?\\.conf\\z",
"caption": "Hexchat/XChat IRC client server list configuration file",
"description": null
},
{
"part": "path",
"type": "regex",
"pattern": "\\.?irssi\\/config\\z",
"caption": "Irssi IRC client configuration file",
"description": null
},
{
"part": "path",
"type": "regex",
"pattern": "\\.?recon-ng\\/keys\\.db\\z",
"caption": "Recon-ng web reconnaissance framework API key database",
"description": null
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A\\.?dbeaver-data-sources.xml\\z",
"caption": "DBeaver SQL database manager configuration file",
"description": null
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A\\.?muttrc\\z",
"caption": "Mutt e-mail client configuration file",
"description": null
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A\\.?s3cfg\\z",
"caption": "S3cmd configuration file",
"description": null
},
{
"part": "path",
"type": "regex",
"pattern": "\\.?aws/credentials\\z",
"caption": "AWS CLI credentials file",
"description": null
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A\\.?trc\\z",
"caption": "T command-line Twitter client configuration file",
"description": null
},
{
"part": "extension",
"type": "match",
"pattern": "ovpn",
"caption": "OpenVPN client configuration file",
"description": null
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A\\.?gitrobrc\\z",
"caption": "Well, this is awkward... Gitrob configuration file",
"description": null
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A\\.?(bash|zsh)rc\\z",
"caption": "Shell configuration file",
"description": "Shell configuration files might contain information such as server hostnames, passwords and API keys."
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A\\.?(bash_|zsh_)?profile\\z",
"caption": "Shell profile configuration file",
"description": "Shell configuration files might contain information such as server hostnames, passwords and API keys."
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A\\.?(bash_|zsh_)?aliases\\z",
"caption": "Shell command alias configuration file",
"description": "Shell configuration files might contain information such as server hostnames, passwords and API keys."
},
{
"part": "filename",
"type": "match",
"pattern": "secret_token.rb",
"caption": "Ruby On Rails secret token configuration file",
"description": "If the Rails secret token is known, it can allow for remote code execution. (http://www.exploit-db.com/exploits/27527/)"
},
{
"part": "filename",
"type": "match",
"pattern": "omniauth.rb",
"caption": "OmniAuth configuration file",
"description": "The OmniAuth configuration file might contain client application secrets."
},
{
"part": "filename",
"type": "match",
"pattern": "carrierwave.rb",
"caption": "Carrierwave configuration file",
"description": "Can contain credentials for online storage systems such as Amazon S3 and Google Storage."
},
{
"part": "filename",
"type": "match",
"pattern": "schema.rb",
"caption": "Ruby On Rails database schema file",
"description": "Contains information on the database schema of a Ruby On Rails application."
},
{
"part": "filename",
"type": "match",
"pattern": "database.yml",
"caption": "Potential Ruby On Rails database configuration file",
"description": "Might contain database credentials."
},
{
"part": "filename",
"type": "match",
"pattern": "settings.py",
"caption": "Django configuration file",
"description": "Might contain database credentials, online storage system credentials, secret keys, etc."
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A(.*)?config(\\.inc)?\\.php\\z",
"caption": "PHP configuration file",
"description": "Might contain credentials and keys."
},
{
"part": "extension",
"type": "match",
"pattern": "kdb",
"caption": "KeePass password manager database file",
"description": null
},
{
"part": "extension",
"type": "match",
"pattern": "agilekeychain",
"caption": "1Password password manager database file",
"description": null
},
{
"part": "extension",
"type": "match",
"pattern": "keychain",
"caption": "Apple Keychain database file",
"description": null
},
{
"part": "extension",
"type": "regex",
"pattern": "\\Akey(store|ring)\\z",
"caption": "GNOME Keyring database file",
"description": null
},
{
"part": "extension",
"type": "match",
"pattern": "log",
"caption": "Log file",
"description": "Log files might contain information such as references to secret HTTP endpoints, session IDs, user information, passwords and API keys."
},
{
"part": "extension",
"type": "match",
"pattern": "pcap",
"caption": "Network traffic capture file",
"description": null
},
{
"part": "extension",
"type": "regex",
"pattern": "\\Asql(dump)?\\z",
"caption": "SQL dump file",
"description": null
},
{
"part": "extension",
"type": "match",
"pattern": "gnucash",
"caption": "GnuCash database file",
"description": null
},
{
"part": "filename",
"type": "regex",
"pattern": "backup",
"caption": "Contains word: backup",
"description": null
},
{
"part": "filename",
"type": "regex",
"pattern": "dump",
"caption": "Contains word: dump",
"description": null
},
{
"part": "filename",
"type": "regex",
"pattern": "password",
"caption": "Contains word: password",
"description": null
},
{
"part": "filename",
"type": "regex",
"pattern": "credential",
"caption": "Contains word: credential",
"description": null
},
{
"part": "filename",
"type": "regex",
"pattern": "secret",
"caption": "Contains word: secret",
"description": null
},
{
"part": "filename",
"type": "regex",
"pattern": "private.*key",
"caption": "Contains words: private, key",
"description": null
},
{
"part": "filename",
"type": "match",
"pattern": "jenkins.plugins.publish_over_ssh.BapSshPublisherPlugin.xml",
"caption": "Jenkins publish over SSH plugin file",
"description": null
},
{
"part": "filename",
"type": "match",
"pattern": "credentials.xml",
"caption": "Potential Jenkins credentials file",
"description": null
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A\\.?htpasswd\\z",
"caption": "Apache htpasswd file",
"description": null
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A(\\.|_)?netrc\\z",
"caption": "Configuration file for auto-login process",
"description": "Might contain username and password."
},
{
"part": "extension",
"type": "match",
"pattern": "kwallet",
"caption": "KDE Wallet Manager database file",
"description": null
},
{
"part": "filename",
"type": "match",
"pattern": "LocalSettings.php",
"caption": "Potential MediaWiki configuration file",
"description": null
},
{
"part": "extension",
"type": "match",
"pattern": "tblk",
"caption": "Tunnelblick VPN configuration file",
"description": null
},
{
"part": "path",
"type": "regex",
"pattern": "\\.?gem/credentials\\z",
"caption": "Rubygems credentials file",
"description": "Might contain API key for a rubygems.org account."
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A*\\.pubxml(\\.user)?\\z",
"caption": "Potential MSBuild publish profile",
"description": null
},
{
"part": "filename",
"type": "match",
"pattern": "Favorites.plist",
"caption": "Sequel Pro MySQL database manager bookmark file",
"description": null
},
{
"part": "filename",
"type": "match",
"pattern": "configuration.user.xpl",
"caption": "Little Snitch firewall configuration file",
"description": "Contains traffic rules for applications"
},
{
"part": "extension",
"type": "match",
"pattern": "dayone",
"caption": "Day One journal file",
"description": null
},
{
"part": "filename",
"type": "match",
"pattern": "journal.txt",
"caption": "Potential jrnl journal file",
"description": null
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A\\.?tugboat\\z",
"caption": "Tugboat DigitalOcean management tool configuration",
"description": null
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A\\.?git-credentials\\z",
"caption": "git-credential-store helper credentials file",
"description": null
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A\\.?gitconfig\\z",
"caption": "Git configuration file",
"description": null
},
{
"part": "filename",
"type": "match",
"pattern": "knife.rb",
"caption": "Chef Knife configuration file",
"description": "Might contain references to Chef servers"
},
{
"part": "path",
"type": "regex",
"pattern": "\\.?chef/(.*)\\.pem\\z",
"caption": "Chef private key",
"description": "Can be used to authenticate against Chef servers"
},
{
"part": "filename",
"type": "match",
"pattern": "proftpdpasswd",
"caption": "cPanel backup ProFTPd credentials file",
"description": "Contains usernames and password hashes for FTP accounts"
},
{
"part": "filename",
"type": "match",
"pattern": "robomongo.json",
"caption": "Robomongo MongoDB manager configuration file",
"description": "Might contain credentials for MongoDB databases"
},
{
"part": "filename",
"type": "match",
"pattern": "filezilla.xml",
"caption": "FileZilla FTP configuration file",
"description": "Might contain credentials for FTP servers"
},
{
"part": "filename",
"type": "match",
"pattern": "recentservers.xml",
"caption": "FileZilla FTP recent servers file",
"description": "Might contain credentials for FTP servers"
},
{
"part": "filename",
"type": "match",
"pattern": "ventrilo_srv.ini",
"caption": "Ventrilo server configuration file",
"description": "Might contain passwords"
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A\\.?dockercfg\\z",
"caption": "Docker configuration file",
"description": "Might contain credentials for public or private Docker registries"
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A\\.?npmrc\\z",
"caption": "NPM configuration file",
"description": "Might contain credentials for NPM registries"
},
{
"part": "filename",
"type": "match",
"pattern": "terraform.tfvars",
"caption": "Terraform variable config file",
"description": "Might contain credentials for terraform providers"
},
{
"part": "filename",
"type": "regex",
"pattern": "\\A\\.?env\\z",
"caption": "Environment configuration file",
"description": null
}
]
`
) | rules.go | 0.565779 | 0.496094 | rules.go | starcoder |
package mvt
import (
"log"
"github.com/go-spatial/geom/cmp"
"github.com/go-spatial/geom/winding"
"github.com/go-spatial/geom"
)
// PrepareGeo converts the geometry's coordinates to tile pixel coordinates. tile should be the
// extent of the tile, in the same projection as geo. pixelExtent is the dimension of the
// (square) tile in pixels usually 4096, see DefaultExtent.
// This function treats the tile extent elements as left, top, right, bottom. This is fine
// when working with a north-positive projection such as lat/long (epsg:4326)
// and web mercator (epsg:3857), but a south-positive projection (ie. epsg:2054) or west-postive
// projection would then flip the geomtery. To properly render these coordinate systems, simply
// swap the X's or Y's in the tile extent.
func PrepareGeo(geo geom.Geometry, tile *geom.Extent, pixelExtent float64) geom.Geometry {
switch g := geo.(type) {
case geom.Point:
return preparept(g, tile, pixelExtent)
case geom.MultiPoint:
pts := g.Points()
if len(pts) == 0 {
return nil
}
mp := make(geom.MultiPoint, len(pts))
for i, pt := range g {
mp[i] = preparept(pt, tile, pixelExtent)
}
return mp
case geom.LineString:
return preparelinestr(g, tile, pixelExtent)
case geom.MultiLineString:
var ml geom.MultiLineString
for _, l := range g.LineStrings() {
nl := preparelinestr(l, tile, pixelExtent)
if len(nl) > 0 {
ml = append(ml, nl)
}
}
return ml
case geom.Polygon:
return preparePolygon(g, tile, pixelExtent)
case geom.MultiPolygon:
var mp geom.MultiPolygon
for _, p := range g.Polygons() {
np := preparePolygon(p, tile, pixelExtent)
if len(np) > 0 {
mp = append(mp, np)
}
}
return mp
case *geom.MultiPolygon:
if g == nil {
return nil
}
var mp geom.MultiPolygon
for _, p := range g.Polygons() {
np := preparePolygon(p, tile, pixelExtent)
if len(np) > 0 {
mp = append(mp, np)
}
}
return &mp
}
return nil
}
func preparept(g geom.Point, tile *geom.Extent, pixelExtent float64) geom.Point {
px := (g.X() - tile.MinX()) / tile.XSpan() * pixelExtent
py := (tile.MaxY() - g.Y()) / tile.YSpan() * pixelExtent
return geom.Point{float64(px), float64(py)}
}
func preparelinestr(g geom.LineString, tile *geom.Extent, pixelExtent float64) (ls geom.LineString) {
pts := g
// If the linestring
if len(pts) < 2 {
// Not enough points to make a line.
return nil
}
ls = make(geom.LineString, 0, len(pts))
for i := 0; i < len(pts); i++ {
npt := preparept(pts[i], tile, pixelExtent)
if i != 0 && cmp.HiCMP.GeomPointEqual(ls[len(ls)-1], npt) {
// skip points that are equivalent due to precision truncation
continue
}
ls = append(ls, preparept(pts[i], tile, pixelExtent))
}
if len(ls) < 2 {
return nil
}
return ls
}
func preparePolygon(g geom.Polygon, tile *geom.Extent, pixelExtent float64) (p geom.Polygon) {
lines := geom.MultiLineString(g.LinearRings())
p = make(geom.Polygon, 0, len(lines))
if len(lines) == 0 {
return p
}
for _, line := range lines.LineStrings() {
if len(line) < 2 {
if debug {
// skip lines that have been reduced to less than 2 points.
log.Println("skipping line 2", line, len(line))
}
continue
}
ln := preparelinestr(line, tile, pixelExtent)
if cmp.HiCMP.GeomPointEqual(ln[0], ln[len(ln)-1]) {
// first and last is the same, need to remove the last point.
ln = ln[:len(ln)-1]
}
if len(ln) < 2 {
if debug {
// skip lines that have been reduced to less than 2 points.
log.Println("skipping line 2", line, len(ln))
}
continue
}
p = append(p, ln)
}
order := winding.Order{
YPositiveDown: false,
}
return geom.Polygon(order.RectifyPolygon([][][2]float64(p)))
} | vendor/github.com/go-spatial/geom/encoding/mvt/prepare.go | 0.684264 | 0.699819 | prepare.go | starcoder |
package block
import (
"github.com/df-mc/dragonfly/server/block/cube"
"github.com/df-mc/dragonfly/server/entity"
"github.com/df-mc/dragonfly/server/item"
"github.com/df-mc/dragonfly/server/world"
"github.com/df-mc/dragonfly/server/world/particle"
"github.com/go-gl/mathgl/mgl64"
"math/rand"
)
// DoubleFlower is a two block high flower consisting of an upper and lower part.
type DoubleFlower struct {
transparent
empty
// UpperPart is set if the plant is the upper part.
UpperPart bool
// Type is the type of the double plant.
Type DoubleFlowerType
}
// FlammabilityInfo ...
func (d DoubleFlower) FlammabilityInfo() FlammabilityInfo {
return newFlammabilityInfo(60, 100, true)
}
// BoneMeal ...
func (d DoubleFlower) BoneMeal(pos cube.Pos, w *world.World) bool {
itemEntity := entity.NewItem(item.NewStack(d, 1), pos.Vec3Centre())
itemEntity.SetVelocity(mgl64.Vec3{rand.Float64()*0.2 - 0.1, 0.2, rand.Float64()*0.2 - 0.1})
w.AddEntity(itemEntity)
return true
}
// NeighbourUpdateTick ...
func (d DoubleFlower) NeighbourUpdateTick(pos, _ cube.Pos, w *world.World) {
if d.UpperPart {
if bottom, ok := w.Block(pos.Side(cube.FaceDown)).(DoubleFlower); !ok || bottom.Type != d.Type || bottom.UpperPart {
w.SetBlock(pos, nil, nil)
w.AddParticle(pos.Vec3Centre(), particle.BlockBreak{Block: d})
}
return
}
if upper, ok := w.Block(pos.Side(cube.FaceUp)).(DoubleFlower); !ok || upper.Type != d.Type || !upper.UpperPart {
w.SetBlock(pos, nil, nil)
w.AddParticle(pos.Vec3Centre(), particle.BlockBreak{Block: d})
return
}
if !supportsVegetation(d, w.Block(pos.Side(cube.FaceDown))) {
w.SetBlock(pos, nil, nil)
w.AddParticle(pos.Vec3Centre(), particle.BlockBreak{Block: d})
}
}
// UseOnBlock ...
func (d DoubleFlower) UseOnBlock(pos cube.Pos, face cube.Face, _ mgl64.Vec3, w *world.World, user item.User, ctx *item.UseContext) bool {
pos, _, used := firstReplaceable(w, pos, face, d)
if !used {
return false
}
if !replaceableWith(w, pos.Side(cube.FaceUp), d) {
return false
}
if !supportsVegetation(d, w.Block(pos.Side(cube.FaceDown))) {
return false
}
place(w, pos, d, user, ctx)
place(w, pos.Side(cube.FaceUp), DoubleFlower{Type: d.Type, UpperPart: true}, user, ctx)
return placed(ctx)
}
// BreakInfo ...
func (d DoubleFlower) BreakInfo() BreakInfo {
return newBreakInfo(0, alwaysHarvestable, nothingEffective, oneOf(d))
}
// HasLiquidDrops ...
func (d DoubleFlower) HasLiquidDrops() bool {
return true
}
// EncodeItem ...
func (d DoubleFlower) EncodeItem() (name string, meta int16) {
return "minecraft:double_plant", int16(d.Type.Uint8())
}
// EncodeBlock ...
func (d DoubleFlower) EncodeBlock() (string, map[string]any) {
return "minecraft:double_plant", map[string]any{"double_plant_type": d.Type.String(), "upper_block_bit": d.UpperPart}
}
// allDoubleFlowers ...
func allDoubleFlowers() (b []world.Block) {
for _, d := range DoubleFlowerTypes() {
b = append(b, DoubleFlower{Type: d, UpperPart: true})
b = append(b, DoubleFlower{Type: d, UpperPart: false})
}
return
} | server/block/double_flower.go | 0.651909 | 0.438785 | double_flower.go | starcoder |
package cvss3
import "fmt"
type BaseMetrics struct {
AttackVector
AttackComplexity
PrivilegesRequired
UserInteraction
Scope
Confidentiality
Integrity
Availability
}
type AttackVector int
const (
AttackVectorNetwork AttackVector = iota + 1
AttackVectorAdjecent
AttackVectorLocal
AttackVectorPhysical
)
var (
weightsAttackVector = []float64{0, 0.85, 0.62, 0.55, 0.2}
codeAttackVector = []string{"", "N", "A", "L", "P"}
)
func (av AttackVector) defined() bool {
return int(av) != 0
}
func (av AttackVector) weight() float64 {
return weightsAttackVector[av]
}
func (av AttackVector) String() string {
return codeAttackVector[av]
}
func (av *AttackVector) parse(str string) error {
idx, found := findIndex(str, codeAttackVector)
if found {
*av = AttackVector(idx)
return nil
}
return fmt.Errorf("illegal attack vector code %s", str)
}
type AttackComplexity int
const (
AttackComplexityLow AttackComplexity = iota + 1
AttackComplexityHigh
)
var (
weightsAttackComplexity = []float64{0, 0.77, 0.44}
codeAttackComplexity = []string{"", "L", "H"}
)
func (ac AttackComplexity) defined() bool {
return int(ac) != 0
}
func (ac AttackComplexity) weight() float64 {
return weightsAttackComplexity[ac]
}
func (ac AttackComplexity) String() string {
return codeAttackComplexity[ac]
}
func (ac *AttackComplexity) parse(str string) error {
idx, found := findIndex(str, codeAttackComplexity)
if found {
*ac = AttackComplexity(idx)
return nil
}
return fmt.Errorf("illegal attack complexity code %s", str)
}
type PrivilegesRequired int
const (
PrivilegesRequiredNone PrivilegesRequired = iota + 1
PrivilegesRequiredLow
PrivilegesRequiredHigh
)
var (
weightsPrivilegesRequired = map[bool][]float64{
false: {0, 0.85, 0.62, 0.27},
true: {0, 0.85, 0.68, 0.5},
}
codePrivilegesRequired = []string{"", "N", "L", "H"}
)
func (pr PrivilegesRequired) defined() bool {
return int(pr) != 0
}
func (pr PrivilegesRequired) weight(scopeChanged bool) float64 {
return weightsPrivilegesRequired[scopeChanged][pr]
}
func (pr PrivilegesRequired) String() string {
return codePrivilegesRequired[pr]
}
func (pr *PrivilegesRequired) parse(str string) error {
idx, found := findIndex(str, codePrivilegesRequired)
if found {
*pr = PrivilegesRequired(idx)
return nil
}
return fmt.Errorf("illegal privileges required code %s", str)
}
type UserInteraction int
const (
UserInteractionNone UserInteraction = iota + 1
UserInteractionRequired
)
var (
weightsUserInteraction = []float64{0, 0.85, 0.62}
codeUserInteraction = []string{"", "N", "R"}
)
func (ui UserInteraction) defined() bool {
return int(ui) != 0
}
func (ui UserInteraction) weight() float64 {
return weightsUserInteraction[ui]
}
func (ui UserInteraction) String() string {
return codeUserInteraction[ui]
}
func (ui *UserInteraction) parse(str string) error {
idx, found := findIndex(str, codeUserInteraction)
if found {
*ui = UserInteraction(idx)
return nil
}
return fmt.Errorf("illegal user interaction code %s", str)
}
type Scope int
const (
ScopeUnchanged Scope = iota + 1
ScopeChanged
)
var (
weightsScope = []float64{0, 0.0, 0.0}
codeScope = []string{"", "U", "C"}
)
func (s Scope) defined() bool {
return int(s) != 0
}
func (s Scope) weight() float64 {
return weightsScope[s]
}
func (s Scope) String() string {
return codeScope[s]
}
func (s *Scope) parse(str string) error {
idx, found := findIndex(str, codeScope)
if found {
*s = Scope(idx)
return nil
}
return fmt.Errorf("illegal scope code %s", str)
}
type Confidentiality int
const (
ConfidentialityHigh Confidentiality = iota + 1
ConfidentialityLow
ConfidentialityNone
)
var (
weightsConfidentiality = []float64{0, 0.56, 0.22, 0.0}
codeConfidentiality = []string{"", "H", "L", "N"}
)
func (c Confidentiality) defined() bool {
return int(c) != 0
}
func (c Confidentiality) weight() float64 {
return weightsConfidentiality[c]
}
func (c Confidentiality) String() string {
return codeConfidentiality[c]
}
func (c *Confidentiality) parse(str string) error {
idx, found := findIndex(str, codeConfidentiality)
if found {
*c = Confidentiality(idx)
return nil
}
return fmt.Errorf("illegal confidentiality code %s", str)
}
type Integrity int
const (
IntegrityHigh Integrity = iota + 1
IntegrityLow
IntegrityNone
)
var (
weightsIntegrity = []float64{0, 0.56, 0.22, 0.0}
codeIntegrity = []string{"", "H", "L", "N"}
)
func (i Integrity) defined() bool {
return int(i) != 0
}
func (i Integrity) weight() float64 {
return weightsIntegrity[i]
}
func (i Integrity) String() string {
return codeIntegrity[i]
}
func (i *Integrity) parse(str string) error {
idx, found := findIndex(str, codeIntegrity)
if found {
*i = Integrity(idx)
return nil
}
return fmt.Errorf("illegal integrity code %s", str)
}
type Availability int
const (
AvailabilityHigh Availability = iota + 1
AvailabilityLow
AvailabilityNone
)
var (
weightsAvailability = []float64{0, 0.56, 0.22, 0.0}
codeAvailability = []string{"", "H", "L", "N"}
)
func (a Availability) defined() bool {
return int(a) != 0
}
func (a Availability) weight() float64 {
return weightsAvailability[a]
}
func (a Availability) String() string {
return codeAvailability[a]
}
func (a *Availability) parse(str string) error {
idx, found := findIndex(str, codeAvailability)
if found {
*a = Availability(idx)
return nil
}
return fmt.Errorf("illegal availability code %s", str)
} | cvss3/base_metrics.go | 0.657098 | 0.47658 | base_metrics.go | starcoder |
package validator
import (
"fmt"
"math/big"
"sort"
"strings"
"github.com/hyperledger/burrow/crypto"
)
var big0 = big.NewInt(0)
// A Validator multiset - can be used to capture the global state of validators or as an accumulator each block
type Set struct {
powers map[crypto.Address]*big.Int
publicKeys map[crypto.Address]crypto.Addressable
totalPower *big.Int
trim bool
}
func newSet() *Set {
return &Set{
totalPower: new(big.Int),
powers: make(map[crypto.Address]*big.Int),
publicKeys: make(map[crypto.Address]crypto.Addressable),
}
}
// Create a new Validators which can act as an accumulator for validator power changes
func NewSet() *Set {
return newSet()
}
// Like Set but removes entries when power is set to 0 this make Count() == CountNonZero() and prevents a set from leaking
// but does mean that a zero will not be iterated over when performing an update which is necessary in Ring
func NewTrimSet() *Set {
s := newSet()
s.trim = true
return s
}
// Implements Writer, but will never error
func (vs *Set) SetPower(id crypto.PublicKey, power *big.Int) error {
vs.ChangePower(id, power)
return nil
}
// Add the power of a validator and returns the flow into that validator
func (vs *Set) ChangePower(id crypto.PublicKey, power *big.Int) *big.Int {
address := id.GetAddress()
// Calculate flow into this validator (positive means in, negative means out)
flow := vs.Flow(id, power)
vs.totalPower.Add(vs.totalPower, flow)
if vs.trim && power.Sign() == 0 {
delete(vs.publicKeys, address)
delete(vs.powers, address)
} else {
vs.publicKeys[address] = crypto.NewAddressable(id)
vs.powers[address] = new(big.Int).Set(power)
}
return flow
}
func (vs *Set) TotalPower() *big.Int {
return new(big.Int).Set(vs.totalPower)
}
// Returns the maximum allowable flow whilst ensuring the majority of validators are non-byzantine after the transition
// So need at most ceiling((Total Power)/3) - 1, in integer division we have ceiling(X*p/q) = (p(X+1)-1)/q
// For p = 1 just X/q so we want (Total Power)/3 - 1
func (vs *Set) MaxFlow() *big.Int {
max := vs.TotalPower()
return max.Sub(max.Div(max, big3), big1)
}
// Returns the flow that would be induced by a validator power change
func (vs *Set) Flow(id crypto.PublicKey, power *big.Int) *big.Int {
return new(big.Int).Sub(power, vs.GetPower(id.GetAddress()))
}
// Returns the power of id but only if it is set
func (vs *Set) MaybePower(id crypto.Address) *big.Int {
if vs.powers[id] == nil {
return nil
}
return new(big.Int).Set(vs.powers[id])
}
// Version of Power to match interface
func (vs *Set) Power(id crypto.Address) (*big.Int, error) {
return vs.GetPower(id), nil
}
// Error free version of Power
func (vs *Set) GetPower(id crypto.Address) *big.Int {
if vs.powers[id] == nil {
return new(big.Int)
}
return new(big.Int).Set(vs.powers[id])
}
// Returns an error if the Sets are not equal describing which part of their structures differ
func (vs *Set) Equal(vsOther *Set) error {
if vs.Size() != vsOther.Size() {
return fmt.Errorf("set size %d != other set size %d", vs.Size(), vsOther.Size())
}
// Stop iteration IFF we find a non-matching validator
return vs.IterateValidators(func(id crypto.Addressable, power *big.Int) error {
otherPower := vsOther.GetPower(id.GetAddress())
if otherPower.Cmp(power) != 0 {
return fmt.Errorf("set power %d != other set power %d", power, otherPower)
}
return nil
})
}
// Iterates over validators sorted by address
func (vs *Set) IterateValidators(iter func(id crypto.Addressable, power *big.Int) error) error {
if vs == nil {
return nil
}
addresses := make(crypto.Addresses, 0, len(vs.powers))
for address := range vs.powers {
addresses = append(addresses, address)
}
sort.Sort(addresses)
for _, address := range addresses {
err := iter(vs.publicKeys[address], new(big.Int).Set(vs.powers[address]))
if err != nil {
return err
}
}
return nil
}
func (vs *Set) Flush(output Writer, backend Reader) error {
return vs.IterateValidators(func(id crypto.Addressable, power *big.Int) error {
return output.SetPower(id.GetPublicKey(), power)
})
}
func (vs *Set) CountNonZero() int {
var count int
vs.IterateValidators(func(id crypto.Addressable, power *big.Int) error {
if power.Sign() != 0 {
count++
}
return nil
})
return count
}
func (vs *Set) Size() int {
return len(vs.publicKeys)
}
func (vs *Set) Validators() []*Validator {
if vs == nil {
return nil
}
pvs := make([]*Validator, 0, vs.Size())
vs.IterateValidators(func(id crypto.Addressable, power *big.Int) error {
pvs = append(pvs, &Validator{PublicKey: id.GetPublicKey(), Power: power.Uint64()})
return nil
})
return pvs
}
func UnpersistSet(pvs []*Validator) *Set {
vs := NewSet()
for _, pv := range pvs {
vs.ChangePower(pv.PublicKey, new(big.Int).SetUint64(pv.Power))
}
return vs
}
func (vs *Set) String() string {
return fmt.Sprintf("Validators{TotalPower: %v; Count: %v; %v}", vs.TotalPower(), vs.Size(),
vs.Strings())
}
func (vs *Set) Strings() string {
strs := make([]string, 0, vs.Size())
vs.IterateValidators(func(id crypto.Addressable, power *big.Int) error {
strs = append(strs, fmt.Sprintf("%v->%v", id.GetAddress(), power))
return nil
})
return strings.Join(strs, ", ")
} | evmcc/vendor/github.com/hyperledger/burrow/acm/validator/set.go | 0.792544 | 0.472197 | set.go | starcoder |
package main
import (
"math"
"math/rand"
. "github.com/hborntraeger/pt/pt"
)
func offset(stdev float64) Vector {
a := rand.Float64() * 2 * math.Pi
r := rand.NormFloat64() * stdev
x := math.Cos(a) * r
y := math.Sin(a) * r
return Vector{x, 0, y}
}
func intersects(scene *Scene, shape Shape) bool {
box := shape.BoundingBox()
for _, other := range scene.Shapes {
if box.Intersects(other.BoundingBox()) {
return true
}
}
return false
}
func main() {
scene := Scene{}
scene.Color = White
black := GlossyMaterial(HexColor(0x111111), 1.5, Radians(45))
white := GlossyMaterial(HexColor(0xFFFFFF), 1.6, Radians(20))
for _, p := range blackPositions {
for {
m := Scale(Vector{0.48, 0.2, 0.48}).Translate(Vector{p[0] - 9.5, 0, p[1] - 9.5})
m = m.Translate(offset(0.02))
shape := NewTransformedShape(NewSphere(Vector{}, 1, black), m)
if intersects(&scene, shape) {
continue
}
scene.Add(shape)
break
}
}
for _, p := range whitePositions {
for {
m := Scale(Vector{0.48, 0.2, 0.48}).Translate(Vector{p[0] - 9.5, 0, p[1] - 9.5})
m = m.Translate(offset(0.02))
shape := NewTransformedShape(NewSphere(Vector{}, 1, white), m)
if intersects(&scene, shape) {
continue
}
scene.Add(shape)
break
}
}
for i := 0; i < 19; i++ {
x := float64(i) - 9.5
m := 0.015
scene.Add(NewCube(Vector{x - m, -1, -9.5}, Vector{x + m, -0.195, 8.5}, black))
scene.Add(NewCube(Vector{-9.5, -1, x - m}, Vector{8.5, -0.195, x + m}, black))
}
material := GlossyMaterial(HexColor(0xEFECCA), 1.2, Radians(30))
material.Texture = GetTexture("examples/wood.jpg")
scene.Add(NewCube(Vector{-12, -12, -12}, Vector{12, -0.2, 12}, material))
scene.Texture = GetTexture("examples/courtyard_ccby/courtyard_8k.png")
camera := LookAt(Vector{-0.5, 5, 5}, Vector{-0.5, 0, 0.5}, Vector{0, 1, 0}, 50)
sampler := NewSampler(4, 4)
renderer := NewRenderer(&scene, &camera, sampler, 2560/2, 1440/2)
renderer.IterativeRender("out%03d.png", 1000)
}
var blackPositions = [][]float64{
{7, 3}, {14, 17}, {14, 4}, {18, 4}, {0, 7}, {5, 8}, {11, 5}, {10, 7}, {7, 6}, {6, 10}, {12, 6}, {3, 2}, {5, 11}, {7, 5}, {14, 15}, {12, 11}, {8, 12}, {4, 15}, {2, 11}, {9, 9}, {10, 3}, {6, 17}, {7, 2}, {14, 5}, {13, 3}, {13, 16}, {3, 6}, {1, 10}, {4, 1}, {10, 9}, {5, 17}, {12, 7}, {3, 5}, {2, 7}, {5, 10}, {10, 10}, {5, 7}, {7, 4}, {12, 4}, {8, 13}, {9, 8}, {15, 17}, {3, 10}, {4, 13}, {2, 13}, {8, 16}, {12, 3}, {17, 5}, {13, 2}, {15, 3}, {2, 3}, {6, 5}, {11, 7}, {16, 5}, {11, 8}, {14, 7}, {15, 6}, {1, 7}, {5, 9}, {10, 11}, {6, 6}, {4, 18}, {7, 14}, {17, 3}, {4, 9}, {10, 12}, {6, 3}, {16, 7}, {14, 14}, {16, 18}, {3, 13}, {1, 13}, {2, 10}, {7, 9}, {13, 1}, {12, 15}, {4, 3}, {5, 2}, {10, 2},
}
var whitePositions = [][]float64{
{16, 6}, {16, 9}, {13, 4}, {1, 6}, {0, 10}, {3, 7}, {1, 11}, {8, 5}, {6, 7}, {5, 5}, {15, 11}, {13, 7}, {18, 9}, {2, 6}, {7, 10}, {15, 14}, {13, 10}, {17, 18}, {7, 15}, {5, 14}, {3, 18}, {15, 16}, {14, 8}, {12, 8}, {7, 13}, {1, 15}, {8, 9}, {6, 14}, {12, 2}, {17, 6}, {18, 5}, {17, 11}, {9, 7}, {6, 4}, {5, 4}, {6, 11}, {11, 9}, {13, 6}, {18, 6}, {0, 8}, {8, 3}, {4, 6}, {9, 2}, {4, 17}, {14, 12}, {13, 9}, {18, 11}, {3, 15}, {4, 8}, {2, 8}, {12, 9}, {16, 17}, {8, 10}, {9, 11}, {17, 7}, {16, 11}, {14, 10}, {3, 9}, {1, 9}, {8, 7}, {2, 14}, {9, 6}, {5, 3}, {14, 16}, {5, 16}, {16, 8}, {13, 5}, {8, 4}, {4, 7}, {5, 6}, {11, 2}, {12, 5}, {15, 8}, {2, 9}, {9, 15}, {8, 1}, {4, 4}, {16, 15}, {12, 10}, {13, 11}, {2, 16}, {4, 14}, {5, 15}, {10, 1}, {6, 8}, {6, 12}, {17, 9}, {8, 8},
} | examples/go.go | 0.604632 | 0.542379 | go.go | starcoder |
package schemax
import "sync"
/*
ObjectClassCollection describes all ObjectClasses-based types:
- *SuperiorObjectClasses
- *AuxiliaryObjectClasses
*/
type ObjectClassCollection interface {
// Get returns the *ObjectClass instance retrieved as a result
// of a term search, based on Name or OID. If no match is found,
// nil is returned.
Get(interface{}) *ObjectClass
// Index returns the *ObjectClass instance stored at the nth
// index within the receiver, or nil.
Index(int) *ObjectClass
// Equal performs a deep-equal between the receiver and the
// interface ObjectClassCollection provided.
Equal(ObjectClassCollection) bool
// Set returns an error instance based on an attempt to add
// the provided *ObjectClass instance to the receiver.
Set(*ObjectClass) error
// Contains returns the index number and presence boolean that
// reflects the result of a term search within the receiver.
Contains(interface{}) (int, bool)
// String returns a properly-delimited sequence of string
// values, either as a Name or OID, for the receiver type.
String() string
// Label returns the field name associated with the interface
// types, or a zero string if no label is appropriate.
Label() string
// IsZero returns a boolean value indicative of whether the
// receiver is considered zero, or undefined.
IsZero() bool
// Len returns an integer value indicative of the current
// number of elements stored within the receiver.
Len() int
// SetSpecifier assigns a string value to all definitions within
// the receiver. This value is used in cases where a definition
// type name (e.g.: attributetype, objectclass, etc.) is required.
// This value will be displayed at the beginning of the definition
// value during the unmarshal or unsafe stringification process.
SetSpecifier(string)
// SetUnmarshaler assigns the provided DefinitionUnmarshaler
// signature to all definitions within the receiver. The provided
// function shall be executed during the unmarshal or unsafe
// stringification process.
SetUnmarshaler(DefinitionUnmarshaler)
}
/*
Kind is an unsigned 8-bit integer that describes the "kind" of ObjectClass definition bearing this type. Only one distinct Kind value may be set for any given ObjectClass definition, and must be set explicitly (no default is implied).
*/
type Kind uint8
const (
badKind Kind = iota
Abstract
Structural
Auxiliary
)
/*
IsZero returns a boolean value indicative of whether the receiver is undefined.
*/
func (r Kind) IsZero() bool {
return r == badKind
}
/*
ObjectClass conforms to the specifications of RFC4512 Section 4.1.1. Boolean values, e.g: 'OBSOLETE', are supported internally and are not explicit fields.
*/
type ObjectClass struct {
OID OID
Name Name
Description Description
SuperClass ObjectClassCollection
Kind Kind
Must AttributeTypeCollection
May AttributeTypeCollection
Extensions Extensions
flags definitionFlags
ufn DefinitionUnmarshaler
spec string
info []byte
}
/*
Type returns the formal name of the receiver in order to satisfy signature requirements of the Definition interface type.
*/
func (r *ObjectClass) Type() string {
return `ObjectClass`
}
/*
ObjectClasses is a thread-safe collection of *ObjectClass slice instances.
*/
type ObjectClasses struct {
mutex *sync.Mutex
slice collection
macros *Macros
}
/*
StructuralObjectClass is a type alias of *ObjectClass intended for use solely within instances of NameForm within its "OC" field.
*/
type StructuralObjectClass struct {
*ObjectClass
}
/*
SuperiorObjectClasses contains an embedded *ObjectClasses instance. This type alias is meant to reside within the SUP field of an objectClass definition.
*/
type SuperiorObjectClasses struct {
*ObjectClasses
}
/*
AuxiliaryObjectClasses contains an embedded *ObjectClasses instance. This type alias is meant to reside within the AUX field of a dITContentRule definition.
*/
type AuxiliaryObjectClasses struct {
*ObjectClasses
}
/*
SetMacros assigns the *Macros instance to the receiver, allowing subsequent OID resolution capabilities during the addition of new slice elements.
*/
func (r *ObjectClasses) SetMacros(macros *Macros) {
r.macros = macros
}
/*
SetSpecifier is a convenience method that executes the SetSpecifier method in iterative fashion for all definitions within the receiver.
*/
func (r *ObjectClasses) SetSpecifier(spec string) {
for i := 0; i < r.Len(); i++ {
r.Index(i).SetSpecifier(spec)
}
}
/*
SetUnmarshaler is a convenience method that executes the SetUnmarshaler method in iterative fashion for all definitions within the receiver.
*/
func (r *ObjectClasses) SetUnmarshaler(fn DefinitionUnmarshaler) {
for i := 0; i < r.Len(); i++ {
r.Index(i).SetUnmarshaler(fn)
}
}
/*
String is an unsafe convenience wrapper for Unmarshal(r). If an error is encountered, an empty string definition is returned. If reliability and error handling are important, use Unmarshal.
*/
func (r ObjectClass) String() (def string) {
def, _ = r.unmarshal()
return
}
/*
SetSpecifier assigns a string value to the receiver, useful for placement into configurations that require a type name (e.g.: objectclass). This will be displayed at the beginning of the definition value during the unmarshal or unsafe stringification process.
*/
func (r *ObjectClass) SetSpecifier(spec string) {
r.spec = spec
}
/*
String is a stringer method that returns the string-form of the receiver instance.
*/
func (r Kind) String() string {
switch r {
case Abstract:
return `ABSTRACT`
case Structural:
return `STRUCTURAL`
case Auxiliary:
return `AUXILIARY`
}
return `` // no default
}
/*
Contains is a thread-safe method that returns a collection slice element index integer and a presence-indicative boolean value based on a term search conducted within the receiver.
*/
func (r ObjectClasses) Contains(x interface{}) (int, bool) {
r.mutex.Lock()
defer r.mutex.Unlock()
if !r.macros.IsZero() {
if oid, resolved := r.macros.Resolve(x); resolved {
return r.slice.contains(oid)
}
}
return r.slice.contains(x)
}
/*
Index is a thread-safe method that returns the nth collection slice element if defined, else nil. This method supports use of negative indices which should be used with special care.
*/
func (r ObjectClasses) Index(idx int) *ObjectClass {
r.mutex.Lock()
defer r.mutex.Unlock()
assert, _ := r.slice.index(idx).(*ObjectClass)
return assert
}
/*
Get combines Contains and Index method executions to return an entry based on a term search conducted within the receiver.
*/
func (r ObjectClasses) Get(x interface{}) *ObjectClass {
idx, found := r.Contains(x)
if !found {
return nil
}
return r.Index(idx)
}
/*
Len is a thread-safe method that returns the effective length of the receiver slice collection.
*/
func (r ObjectClasses) Len() int {
return r.slice.len()
}
/*
IsZero returns a boolean value indicative of whether the receiver is considered empty or uninitialized.
*/
func (r *ObjectClasses) IsZero() bool {
if r != nil {
return r.slice.isZero()
}
return r == nil
}
/*
IsZero returns a boolean value indicative of whether the receiver is considered empty or uninitialized.
*/
func (r *ObjectClass) IsZero() bool {
return r == nil
}
/*
Set is a thread-safe append method that returns an error instance indicative of whether the append operation failed in some manner. Uniqueness is enforced for new elements based on Object Identifier and not the effective Name of the definition, if defined.
*/
func (r *ObjectClasses) Set(x *ObjectClass) error {
if _, exists := r.Contains(x.OID); exists {
return nil //silent
}
r.mutex.Lock()
defer r.mutex.Unlock()
return r.slice.append(x)
}
/*
SetInfo assigns the byte slice to the receiver. This is a user-leveraged field intended to allow arbitrary information (documentation?) to be assigned to the definition.
*/
func (r *ObjectClass) SetInfo(info []byte) {
r.info = info
}
/*
Info returns the assigned informational byte slice instance stored within the receiver.
*/
func (r *ObjectClass) Info() []byte {
return r.info
}
/*
SetUnmarshaler assigns the provided DefinitionUnmarshaler signature value to the receiver. The provided function shall be executed during the unmarshal or unsafe stringification process.
*/
func (r *ObjectClass) SetUnmarshaler(fn DefinitionUnmarshaler) {
r.ufn = fn
}
/*
Equal performs a deep-equal between the receiver and the provided collection type.
*/
func (r ObjectClasses) Equal(x ObjectClassCollection) bool {
return r.slice.equal(x.(*ObjectClasses).slice)
}
/*
Equal performs a deep-equal between the receiver and the provided definition type.
Description text is ignored.
*/
func (r *ObjectClass) Equal(x interface{}) (equals bool) {
var z *ObjectClass
switch tv := x.(type) {
case *ObjectClass:
z = tv
case *StructuralObjectClass:
z = tv.ObjectClass
default:
return
}
if z.IsZero() && r.IsZero() {
equals = true
return
} else if z.IsZero() || r.IsZero() {
return
}
if !z.Name.Equal(r.Name) {
return
}
if r.Kind != z.Kind {
return
}
if r.flags != z.flags {
return
}
if !r.Must.Equal(z.Must) {
return
}
if !r.May.Equal(z.May) {
return
}
if !z.SuperClass.IsZero() && !r.SuperClass.IsZero() {
if !r.SuperClass.Equal(z.SuperClass) {
return
}
}
equals = r.Extensions.Equal(z.Extensions)
return
}
/*
NewObjectClasses returns an initialized instance of ObjectClasses cast as an ObjectClassCollection.
*/
func NewObjectClasses() ObjectClassCollection {
var x interface{} = &ObjectClasses{
mutex: &sync.Mutex{},
slice: make(collection, 0, 0),
}
return x.(ObjectClassCollection)
}
/*
NewSuperiorObjectClasses returns an initialized instance of SuperiorObjectClasses cast as an ObjectClassCollection.
*/
func NewSuperiorObjectClasses() ObjectClassCollection {
var z *ObjectClasses = &ObjectClasses{
mutex: &sync.Mutex{},
slice: make(collection, 0, 0),
}
var x interface{} = &SuperiorObjectClasses{z}
return x.(ObjectClassCollection)
}
/*
NewAuxiliaryObjectClasses returns an initialized instance of AuxiliaryObjectClasses cast as an ObjectClassCollection.
*/
func NewAuxiliaryObjectClasses() ObjectClassCollection {
var z *ObjectClasses = &ObjectClasses{
mutex: &sync.Mutex{},
slice: make(collection, 0, 0),
}
var x interface{} = &AuxiliaryObjectClasses{z}
return x.(ObjectClassCollection)
}
func newKind(x interface{}) Kind {
switch tv := x.(type) {
case Kind:
return newKind(tv.String())
case string:
switch toLower(tv) {
case toLower(Abstract.String()):
return Abstract
case toLower(Structural.String()):
return Structural
case toLower(Auxiliary.String()):
return Auxiliary
}
case uint:
switch tv {
case 0x1:
return Abstract
case 0x2:
return Structural
case 0x3:
return Auxiliary
}
case int:
if tv >= 0 {
return newKind(uint(tv))
}
}
return badKind
}
func (r Kind) is(x Kind) bool {
return r == x
}
/*
is returns a boolean value indicative of whether the provided interface value is either a Kind or a definitionFlags AND is enabled within the receiver.
*/
func (r ObjectClass) is(b interface{}) bool {
switch tv := b.(type) {
case definitionFlags:
return r.flags.is(tv)
case Kind:
return r.Kind.is(tv)
}
return false
}
func (r *ObjectClass) validateKind() (err error) {
if newKind(r.Kind.String()) == badKind {
err = invalidObjectClassKind
}
return
}
/*
Validate returns an error that reflects any fatal condition observed regarding the receiver configuration.
*/
func (r *ObjectClass) Validate() (err error) {
return r.validate()
}
func (r *ObjectClass) validate() (err error) {
if r.IsZero() {
return raise(isZero, "%T.validate", r)
}
if err = validateFlag(r.flags); err != nil {
return
}
if err = r.validateKind(); err != nil {
return
}
if err = validateNames(r.Name.strings()...); err != nil {
return
}
if err = validateDesc(r.Description); err != nil {
return
}
return
}
func (r *ObjectClass) getMay(m AttributeTypeCollection) (ok PermittedAttributeTypes) {
for _, atr := range m.(*AttributeTypes).slice {
at, assert := atr.(*AttributeType)
if !assert {
return
}
ok.Set(at)
}
if !r.SuperClass.IsZero() {
for i := 0; i < r.SuperClass.Len(); i++ {
oc := r.SuperClass.Index(0)
if oc.IsZero() {
continue
}
for j := 0; j < oc.May.Len(); j++ {
may := oc.May.Index(j)
if may.IsZero() {
ok.Set(may)
}
}
}
}
if !r.May.IsZero() {
for i := 0; i < r.May.Len(); i++ {
may := r.May.Index(0)
if may.IsZero() {
continue
}
ok.Set(may)
}
}
return
}
func (r *ObjectClass) getMust(m RequiredAttributeTypes) (req RequiredAttributeTypes) {
for _, atr := range m.slice {
at, ok := atr.(*AttributeType)
if !ok {
return
}
req.Set(at)
}
if !r.SuperClass.IsZero() {
for i := 0; i < r.SuperClass.Len(); i++ {
oc := r.SuperClass.Index(0)
if oc.IsZero() {
continue
}
for j := 0; j < oc.Must.Len(); j++ {
must := oc.Must.Index(j)
if must.IsZero() {
req.Set(must)
}
}
}
}
if !r.Must.IsZero() {
for i := 0; i < r.Must.Len(); i++ {
must := r.Must.Index(0)
if must.IsZero() {
continue
}
req.Set(must)
}
}
return
}
/*
String returns a properly-delimited sequence of string values, either as a Name or OID, for the receiver type.
*/
func (r ObjectClasses) String() string {
return r.slice.ocs_oids_string()
}
/*
String returns a properly-delimited sequence of string values, either as a Name or OID, for the receiver type.
*/
func (r SuperiorObjectClasses) String() string {
return r.slice.ocs_oids_string()
}
/*
String returns a properly-delimited sequence of string values, either as a Name or OID, for the receiver type.
*/
func (r AuxiliaryObjectClasses) String() string {
return r.slice.ocs_oids_string()
}
func (r *ObjectClass) unmarshal() (string, error) {
if err := r.validate(); err != nil {
err = raise(invalidUnmarshal, err.Error())
return ``, err
}
if r.ufn != nil {
return r.ufn(r)
}
return r.unmarshalBasic()
}
/*
Map is a convenience method that returns a map[string][]string instance containing the effective contents of the receiver.
*/
func (r *ObjectClass) Map() (def map[string][]string) {
if err := r.Validate(); err != nil {
return
}
def = make(map[string][]string, 14)
def[`RAW`] = []string{r.String()}
def[`OID`] = []string{r.OID.String()}
def[`KIND`] = []string{r.Kind.String()}
def[`TYPE`] = []string{r.Type()}
if len(r.info) > 0 {
def[`INFO`] = []string{string(r.info)}
}
if !r.Name.IsZero() {
def[`NAME`] = make([]string, 0)
for i := 0; i < r.Name.Len(); i++ {
def[`NAME`] = append(def[`NAME`], r.Name.Index(i))
}
}
if len(r.Description) > 0 {
def[`DESC`] = []string{r.Description.String()}
}
if !r.SuperClass.IsZero() {
def[`SUP`] = make([]string, 0)
for i := 0; i < r.SuperClass.Len(); i++ {
sup := r.SuperClass.Index(i)
term := sup.Name.Index(0)
if len(term) == 0 {
term = sup.OID.String()
}
def[`SUP`] = append(def[`SUP`], term)
}
}
if !r.Must.IsZero() {
def[`MUST`] = make([]string, 0)
for i := 0; i < r.Must.Len(); i++ {
must := r.Must.Index(i)
term := must.Name.Index(0)
if len(term) == 0 {
term = must.OID.String()
}
def[`MUST`] = append(def[`MUST`], term)
}
}
if !r.May.IsZero() {
def[`MAY`] = make([]string, 0)
for i := 0; i < r.May.Len(); i++ {
must := r.May.Index(i)
term := must.Name.Index(0)
if len(term) == 0 {
term = must.OID.String()
}
def[`MAY`] = append(def[`MAY`], term)
}
}
if !r.Extensions.IsZero() {
for k, v := range r.Extensions {
def[k] = v
}
}
if r.Obsolete() {
def[`OBSOLETE`] = []string{`TRUE`}
}
return def
}
/*
ObjectClassUnmarshaler is a package-included function that honors the signature of the first class (closure) DefinitionUnmarshaler type.
The purpose of this function, and similar user-devised ones, is to unmarshal a definition with specific formatting included, such as linebreaks, leading specifier declarations and indenting.
*/
func ObjectClassUnmarshaler(x interface{}) (def string, err error) {
var r *ObjectClass
switch tv := x.(type) {
case *ObjectClass:
if tv.IsZero() {
err = raise(isZero, "%T is nil", tv)
return
}
r = tv
default:
err = raise(unexpectedType,
"Bad type for unmarshal (%T)", tv)
return
}
var (
WHSP string = ` `
idnt string = "\n\t"
head string = `(`
tail string = `)`
)
if len(r.spec) > 0 {
head = r.spec + WHSP + head
}
def += head + WHSP + r.OID.String()
if !r.Name.IsZero() {
def += idnt + r.Name.Label()
def += WHSP + r.Name.String()
}
if !r.Description.IsZero() {
def += idnt + r.Description.Label()
def += WHSP + r.Description.String()
}
if r.Obsolete() {
def += idnt + Obsolete.String()
}
if !r.SuperClass.IsZero() {
def += idnt + r.SuperClass.Label()
def += WHSP + r.SuperClass.String()
}
// Kind will never be zero
def += idnt + r.Kind.String()
if !r.Must.IsZero() {
def += idnt + r.Must.Label()
def += WHSP + r.Must.String()
}
if !r.May.IsZero() {
def += idnt + r.May.Label()
def += WHSP + r.May.String()
}
if !r.Extensions.IsZero() {
def += idnt + r.Extensions.String()
}
def += WHSP + tail
return
}
func (r *ObjectClass) unmarshalBasic() (def string, err error) {
var (
WHSP string = ` `
head string = `(`
tail string = `)`
)
if len(r.spec) > 0 {
head = r.spec + WHSP + head
}
def += head + WHSP + r.OID.String()
if !r.Name.IsZero() {
def += WHSP + r.Name.Label()
def += WHSP + r.Name.String()
}
if !r.Description.IsZero() {
def += WHSP + r.Description.Label()
def += WHSP + r.Description.String()
}
if r.Obsolete() {
def += WHSP + Obsolete.String()
}
if !r.SuperClass.IsZero() {
def += WHSP + r.SuperClass.Label()
def += WHSP + r.SuperClass.String()
}
// Kind will never be zero
def += WHSP + r.Kind.String()
if !r.Must.IsZero() {
def += WHSP + r.Must.Label()
def += WHSP + r.Must.String()
}
if !r.May.IsZero() {
def += WHSP + r.May.Label()
def += WHSP + r.May.String()
}
if !r.Extensions.IsZero() {
def += WHSP + r.Extensions.String()
}
def += WHSP + tail
return
} | oc.go | 0.770637 | 0.420034 | oc.go | starcoder |
package main
import (
"fmt"
"math"
)
// Describe2Der describes 2D shapes
type Describe2Der interface {
area() float64
perim() float64
}
// Describe3Der describes 3D shapes
type Describe3Der interface {
volume() float64
surface() float64
}
// Circle description
type Circle struct {
radius float64
}
// Rectangle description
type Rectangle struct {
width float64
height float64
}
// Triangle description
type Triangle struct {
a float64
b float64
c float64
}
// Cylinder description
type Cylinder struct {
radius float64
height float64
}
// Circle area
func (c Circle) area() float64 {
return math.Pi * math.Pow(c.radius, 2)
}
// Circle circumference (will call it perim for this example)
func (c Circle) perim() float64 {
return 2 * math.Pi * c.radius
}
// Rectangle area
func (r Rectangle) area() float64 {
return r.width * r.height
}
// Rectangle perimeter
func (r Rectangle) perim() float64 {
return (r.width + r.height) * 2
}
// Triangle area
func (t Triangle) area() float64 {
// Heron's Formula to get area from 3 sides
s := ((t.a + t.b + t.c) / 2)
return math.Sqrt(s * (s - t.a) * (s - t.a) * (s - t.a))
}
// Triangle perimeter
func (t Triangle) perim() float64 {
return t.a + t.b + t.c
}
// Cylinder volume
func (c Cylinder) volume() float64 {
return math.Pi * math.Pow(c.radius, 2) * c.height
}
// Cylinder surface area
func (c Cylinder) surface() float64 {
return (2 * math.Pi * c.radius * c.height) + (2 * math.Pi * math.Pow(c.radius, 2))
}
func main() {
// Declare and assign to struct
// I like doing this extra step, but its up to you
circle1 := Circle{5}
rectangle1 := Rectangle{5, 3}
// triangle1 := Triangle{4, 5, 6}
// cylinder1 := Cylinder{5, 3}
// Lets declair an interface and assign the proper struct
var circle1er Describe2Der = circle1
var rectangle1er Describe2Der = rectangle1
var triangle1er Describe2Der = Triangle{3, 4, 5}
var cylinder1er Describe3Der = Cylinder{radius: 5, height: 3}
// Now to get shape properties,
// Just use the interface and method name
// Where interface has the values of the methods
areaCircle1 := circle1er.area()
circCircle1 := circle1er.perim()
areaRectangle1 := rectangle1er.area()
perimRectangle1 := rectangle1er.perim()
areaTriangle1 := triangle1er.area()
perimTriangle1 := triangle1er.perim()
volumeCylinder1 := cylinder1er.volume()
surfaceCylinder1 := cylinder1er.surface()
fmt.Println(areaCircle1, circCircle1)
fmt.Println(areaRectangle1, perimRectangle1)
fmt.Println(areaTriangle1, perimTriangle1)
fmt.Println(volumeCylinder1, surfaceCylinder1)
// Note how there is no way to get the values of the Triangle and Cylinder
fmt.Printf("Circle1 (radius %.2f) area is %10.3f, circumference is %10.3f\n",
circle1er, areaCircle1, circCircle1)
fmt.Printf("Rectangle1 (width %.2f, height %.2f) area is %10.3f, perimeter is %10.3f\n",
rectangle1.width, rectangle1.height, areaRectangle1, perimRectangle1)
fmt.Printf("Triangle1 (a %.2f, b %.2f, c %.2f) area is %10.3f, perimeter is %10.3f\n",
3.0, 4.0, 5.0, areaTriangle1, perimTriangle1)
fmt.Printf("Cylinder1 (radius %.2f, height %.2f) vol is %10.3f, surface area is %10.3f\n",
5.0, 3.0, volumeCylinder1, surfaceCylinder1)
fmt.Sprintf("%v", circle1er["radius"])
} | software/development/languages/go-cheat-sheet/src/function-method-interface-package-example/interface/interfaces.go | 0.853974 | 0.4474 | interfaces.go | starcoder |
Marching Squares Quadtree
Convert an SDF2 boundary to a set of line segments.
Uses quadtree space subdivision.
*/
//-----------------------------------------------------------------------------
package render
import (
"math"
"sync"
"github.com/deadsy/sdfx/sdf"
)
//-----------------------------------------------------------------------------
type square struct {
v sdf.V2i // origin of square as integers
n uint // level of square, size = 1 << n
}
//-----------------------------------------------------------------------------
// Evaluate the SDF2 via a distance cache to avoid repeated evaluations.
type dcache2 struct {
origin sdf.V2 // origin of the overall bounding square
resolution float64 // size of smallest quadtree square
hdiag []float64 // lookup table of square half diagonals
s sdf.SDF2 // the SDF2 to be rendered
cache map[sdf.V2i]float64 // cache of distances
lock sync.RWMutex // lock the the cache during reads/writes
}
func newDcache2(s sdf.SDF2, origin sdf.V2, resolution float64, n uint) *dcache2 {
dc := dcache2{
origin: origin,
resolution: resolution,
hdiag: make([]float64, n),
s: s,
cache: make(map[sdf.V2i]float64),
}
// build a lut for cube half diagonal lengths
for i := range dc.hdiag {
si := 1 << uint(i)
s := float64(si) * dc.resolution
dc.hdiag[i] = 0.5 * math.Sqrt(2.0*s*s)
}
return &dc
}
// read from the cache
func (dc *dcache2) read(vi sdf.V2i) (float64, bool) {
dc.lock.RLock()
dist, found := dc.cache[vi]
dc.lock.RUnlock()
return dist, found
}
// write to the cache
func (dc *dcache2) write(vi sdf.V2i, dist float64) {
dc.lock.Lock()
dc.cache[vi] = dist
dc.lock.Unlock()
}
func (dc *dcache2) evaluate(vi sdf.V2i) (sdf.V2, float64) {
v := dc.origin.Add(vi.ToV2().MulScalar(dc.resolution))
// do we have it in the cache?
dist, found := dc.read(vi)
if found {
return v, dist
}
// evaluate the SDF2
dist = dc.s.Evaluate(v)
// write it to the cache
dc.write(vi, dist)
return v, dist
}
// isEmpty returns true if the square contains no SDF surface
func (dc *dcache2) isEmpty(c *square) bool {
// evaluate the SDF2 at the center of the square
s := 1 << (c.n - 1) // half side
_, d := dc.evaluate(c.v.AddScalar(s))
// compare to the center/corner distance
return math.Abs(d) >= dc.hdiag[c.n]
}
// Process a square. Generate line segments, or more squares.
func (dc *dcache2) processSquare(c *square, output chan<- *Line) {
if !dc.isEmpty(c) {
if c.n == 1 {
// this square is at the required resolution
c0, d0 := dc.evaluate(c.v.Add(sdf.V2i{0, 0}))
c1, d1 := dc.evaluate(c.v.Add(sdf.V2i{2, 0}))
c2, d2 := dc.evaluate(c.v.Add(sdf.V2i{2, 2}))
c3, d3 := dc.evaluate(c.v.Add(sdf.V2i{0, 2}))
corners := [4]sdf.V2{c0, c1, c2, c3}
values := [4]float64{d0, d1, d2, d3}
// output the line(s) for this square
for _, l := range msToLines(corners, values, 0) {
output <- l
}
} else {
// process the sub squares
n := c.n - 1
s := 1 << n
// TODO - turn these into throttled go-routines
dc.processSquare(&square{c.v.Add(sdf.V2i{0, 0}), n}, output)
dc.processSquare(&square{c.v.Add(sdf.V2i{s, 0}), n}, output)
dc.processSquare(&square{c.v.Add(sdf.V2i{s, s}), n}, output)
dc.processSquare(&square{c.v.Add(sdf.V2i{0, s}), n}, output)
}
}
}
//-----------------------------------------------------------------------------
// marchingSquaresQuadtree generates line segments for an SDF2 using quadtree subdivision.
func marchingSquaresQuadtree(s sdf.SDF2, resolution float64, output chan<- *Line) {
// Scale the bounding box about the center to make sure the boundaries
// aren't on the object surface.
bb := s.BoundingBox()
bb = bb.ScaleAboutCenter(1.01)
longAxis := bb.Size().MaxComponent()
// We want to test the smallest squares (side == resolution) for emptiness
// so the level = 0 cube is at half resolution.
resolution = 0.5 * resolution
// how many cube levels for the quadtree?
levels := uint(math.Ceil(math.Log2(longAxis/resolution))) + 1
// create the distance cache
dc := newDcache2(s, bb.Min, resolution, levels)
// process the quadtree, start at the top level
dc.processSquare(&square{sdf.V2i{0, 0}, levels - 1}, output)
}
//----------------------------------------------------------------------------- | render/march2x.go | 0.692954 | 0.558086 | march2x.go | starcoder |
package braille
import (
"image"
"image/color"
"github.com/borkshop/bork/internal/bitmap"
"github.com/borkshop/bork/internal/cops/display"
)
// Margin is the typical margin of skipped bits necessary to make Braille line
// art look straight. Pass as the margin argument to DrawBitmap.
var Margin = image.Point{1, 2}
// BitmapAt returns the braille glyph that coresponds to the 2x4 grid at the
// given point in a bitmap.
func BitmapAt(src *bitmap.Bitmap, sp image.Point) string {
var r rune
if src.At(sp.X, sp.Y) {
r |= 0x1
}
if src.At(sp.X, sp.Y+1) {
r |= 0x2
}
if src.At(sp.X, sp.Y+2) {
r |= 0x4
}
if src.At(sp.X, sp.Y+3) {
r |= 0x40
}
if src.At(sp.X+1, sp.Y) {
r |= 0x8
}
if src.At(sp.X+1, sp.Y+1) {
r |= 0x10
}
if src.At(sp.X+1, sp.Y+2) {
r |= 0x20
}
if src.At(sp.X+1, sp.Y+3) {
r |= 0x80
}
if r == 0 {
return ""
}
return string(0x2800 + r)
}
// DrawBitmap draws a braille bitmap onto a display, setting the foreground
// color for any cells with a prsent braille character. Skips over pixels in
// the given margin between cells. Passing braille.Margin drops pixels between
// cells to preserve the appearance of straight lines. Passing image.ZP
// preserves the entire image, but will render discontinuities in the margin
// between cells.
func DrawBitmap(dst *display.Display, r image.Rectangle, src *bitmap.Bitmap, sp image.Point, m image.Point, fg color.Color) {
r = r.Intersect(dst.Bounds())
if r.Empty() {
return
}
w, h := r.Dx(), r.Dy()
for y := 0; y < h; y++ {
for x := 0; x < w; x++ {
pt := image.Pt(x*(2+m.X), y*(4+m.Y)).Add(sp)
dx := r.Min.X + x
dy := r.Min.Y + y
t := BitmapAt(src, pt)
if t != "" {
dst.Text.Set(dx, dy, t)
dst.Foreground.Set(dx, dy, fg)
}
}
}
}
// Bounds takes a rectangle describing cells on a display to the cells of a
// braille bitmap covering the cells of the display.
// Accepts a margin, bits to skip over between cells. Passing braille.Margin
// drops some pixels to achieve the possibility of rendering straighter lines.
// Passing image.ZP covers every bit of the bitmap, though all readable fonts
// will draw a margin between braille characters.
func Bounds(r image.Rectangle, m image.Point) image.Rectangle {
w, h := r.Dx(), r.Dy()
return image.Rectangle{
r.Min,
r.Min.Add(image.Pt(w*(2+m.X), h*(4+m.Y))).Sub(m),
}
} | internal/cops/braille/braille.go | 0.736969 | 0.580382 | braille.go | starcoder |
package xstring
import (
"fmt"
"reflect"
"strconv"
"strings"
)
// Version returns package version
func Version() string {
return "0.3.0"
}
// Author returns package author
func Author() string {
return "[<NAME>](https://www.likexian.com/)"
}
// License returns package license
func License() string {
return "Licensed under the Apache License 2.0"
}
// IsLetter returns if s is an english letter
func IsLetter(s uint8) bool {
n := (s | 0x20) - 'a'
return n < 26
}
// IsLetters returns if s is all english letter
func IsLetters(s string) bool {
for _, v := range s {
if !IsLetter(uint8(v)) {
return false
}
}
return true
}
// IsNumeric returns if s is a number
func IsNumeric(s string) bool {
_, err := strconv.ParseFloat(s, 64)
return err == nil
}
// Reverse returns reversed string
func Reverse(s string) string {
n := len(s)
runes := make([]rune, n)
for _, v := range s {
n--
runes[n] = v
}
return string(runes[n:])
}
// ToString convert v to string
func ToString(v interface{}) string {
switch vv := v.(type) {
case []byte:
return string(vv)
case string:
return vv
case bool:
return strconv.FormatBool(vv)
case int:
return strconv.FormatInt(int64(vv), 10)
case int8:
return strconv.FormatInt(int64(vv), 10)
case int16:
return strconv.FormatInt(int64(vv), 10)
case int32:
return strconv.FormatInt(int64(vv), 10)
case int64:
return strconv.FormatInt(int64(vv), 10)
case uint:
return strconv.FormatUint(uint64(vv), 10)
case uint8:
return strconv.FormatUint(uint64(vv), 10)
case uint16:
return strconv.FormatUint(uint64(vv), 10)
case uint32:
return strconv.FormatUint(uint64(vv), 10)
case uint64:
return strconv.FormatUint(uint64(vv), 10)
case float32:
return strconv.FormatFloat(float64(vv), 'f', 2, 64)
case float64:
return strconv.FormatFloat(float64(vv), 'f', 2, 64)
default:
return fmt.Sprintf("%v", v)
}
}
// Join concatenates the elements and returns string
func Join(v interface{}, sep string) string {
vv := reflect.ValueOf(v)
if vv.Kind() == reflect.Ptr || vv.Kind() == reflect.Interface {
if vv.IsNil() {
return ""
}
vv = vv.Elem()
}
switch vv.Kind() {
case reflect.Slice, reflect.Array:
as := []string{}
for i := 0; i < vv.Len(); i++ {
as = append(as, ToString(vv.Index(i)))
}
return strings.Join(as, sep)
default:
return ToString(v)
}
}
// Expand replaces {var} of string s based on the value map m
// For example, Expand("i am {name}", map[string]interface{}{"name": "<NAME>"})
func Expand(s string, m map[string]interface{}) string {
var i, j int
var buf []byte
for {
i = LastInIndex(s, "{")
if i < 0 {
break
}
j = strings.Index(s[i+1:], "}")
if j <= 0 {
break
}
buf = append(buf, s[:i]...)
key := s[i+1 : i+1+j]
if v, ok := m[key]; ok {
buf = append(buf, fmt.Sprint(v)...)
} else {
buf = append(buf, []byte(fmt.Sprintf("%%!%s(MISSING)", key))...)
}
s = s[i+1+j+1:]
}
buf = append(buf, s...)
s = string(buf)
return s
}
// LastInIndex find last position at first index
// for example, LastInIndex("{{{{{{{{{{name}", "{")
// ↑
func LastInIndex(s, f string) int {
i := strings.Index(s, f)
if i < 0 {
return i
}
t := s[i+1:]
for j := 0; j < len(t); j++ {
if t[j] != f[0] {
return j + i
}
}
return i
} | xstring/xstring.go | 0.742888 | 0.405625 | xstring.go | starcoder |
package specs
import (
"testing"
"github.com/Fs02/grimoire"
"github.com/Fs02/grimoire/c"
"github.com/Fs02/grimoire/errors"
"github.com/stretchr/testify/assert"
)
// Query tests query specifications without join.
func Query(t *testing.T, repo grimoire.Repo) {
// preparte tests data
user := User{Name: "name1", Gender: "male", Age: 10}
repo.From(users).MustSave(&user)
repo.From(users).MustSave(&User{Name: "name2", Gender: "male", Age: 20})
repo.From(users).MustSave(&User{Name: "name3", Gender: "male", Age: 30})
repo.From(users).MustSave(&User{Name: "name4", Gender: "female", Age: 40})
repo.From(users).MustSave(&User{Name: "name5", Gender: "female", Age: 50})
repo.From(users).MustSave(&User{Name: "name6", Gender: "female", Age: 60})
repo.From(addresses).MustSave(&Address{Address: "address1", UserID: &user.ID})
repo.From(addresses).MustSave(&Address{Address: "address2", UserID: &user.ID})
repo.From(addresses).MustSave(&Address{Address: "address3", UserID: &user.ID})
tests := []grimoire.Query{
repo.From(users).Where(c.Eq(id, user.ID)),
repo.From(users).Where(c.Eq(name, "name1")),
repo.From(users).Where(c.Eq(age, 10)),
repo.From(users).Where(c.Eq(id, user.ID), c.Eq(name, "name1")),
repo.From(users).Where(c.Eq(id, user.ID), c.Eq(name, "name1"), c.Eq(age, 10)),
repo.From(users).Where(c.Eq(id, user.ID)).OrWhere(c.Eq(name, "name1")),
repo.From(users).Where(c.Eq(id, user.ID)).OrWhere(c.Eq(name, "name1"), c.Eq(age, 10)),
repo.From(users).Where(c.Eq(id, user.ID)).OrWhere(c.Eq(name, "name1")).OrWhere(c.Eq(age, 10)),
repo.From(users).Where(c.Ne(gender, "male")),
repo.From(users).Where(c.Gt(age, 59)),
repo.From(users).Where(c.Gte(age, 60)),
repo.From(users).Where(c.Lt(age, 11)),
repo.From(users).Where(c.Lte(age, 10)),
repo.From(users).Where(c.Nil(note)),
repo.From(users).Where(c.NotNil(name)),
repo.From(users).Where(c.In(id, 1, 2, 3)),
repo.From(users).Where(c.Nin(id, 1, 2, 3)),
repo.From(users).Where(c.Like(name, "name%")),
repo.From(users).Where(c.NotLike(name, "noname%")),
repo.From(users).Where(c.Fragment("id > 0")),
repo.From(users).Where(c.Not(c.Eq(id, 1), c.Eq(name, "name1"), c.Eq(age, 10))),
repo.From(users).Order(c.Asc(name)),
repo.From(users).Order(c.Desc(name)),
repo.From(users).Order(c.Asc(name), c.Desc(age)),
repo.From(users).Group("gender").Select("COUNT(id)"),
repo.From(users).Group("age").Having(c.Gt(age, 10)).Select("COUNT(id)"),
repo.From(users).Limit(5),
repo.From(users).Limit(5).Offset(5),
repo.From(users).Find(1),
repo.From(users).Select("name").Find(1),
repo.From(users).Select("name", "age").Find(1),
repo.From(users).Distinct().Find(1),
}
run(t, tests)
}
// QueryJoin tests query specifications with join.
func QueryJoin(t *testing.T, repo grimoire.Repo) {
tests := []grimoire.Query{
repo.From(addresses).Join(users),
repo.From(addresses).Join(users, c.Eq(c.I("addresses.user_id"), c.I("users.id"))),
repo.From(addresses).Join(users).Find(1),
repo.From(addresses).Join(users).Where(c.Eq(address, "address1")),
repo.From(addresses).Join(users).Where(c.Eq(address, "address1")).Order(c.Asc(name)),
repo.From(addresses).JoinWith("LEFT JOIN", users),
repo.From(addresses).JoinWith("LEFT OUTER JOIN", users),
}
run(t, tests)
}
// QueryNotFound tests query specifications when no result found.
func QueryNotFound(t *testing.T, repo grimoire.Repo) {
t.Run("NotFound", func(t *testing.T) {
user := User{}
// find user error not found
err := repo.From("users").Find(0).One(&user)
assert.NotNil(t, err)
assert.Equal(t, errors.NotFound, err.(errors.Error).Kind())
})
}
func run(t *testing.T, queries []grimoire.Query) {
for _, query := range queries {
statement, _ := builder.Find(query)
t.Run("All|"+statement, func(t *testing.T) {
var result []User
assert.Nil(t, query.All(&result))
assert.NotEqual(t, 0, len(result))
})
}
for _, query := range queries {
statement, _ := builder.Find(query)
t.Run("One|"+statement, func(t *testing.T) {
var result User
assert.Nil(t, query.One(&result))
})
}
} | adapter/specs/query.go | 0.562777 | 0.512022 | query.go | starcoder |
package geom
//Polygon is a two-dimensional geometry representing a polygon
type Polygon []LineString
//PolygonZ is a three-dimensional geometry representing a polygon
type PolygonZ []LineStringZ
//PolygonM is a two-dimensional geometry representing a polygon, with an additional value defined on each vertex
type PolygonM []LineStringM
//PolygonZM is a three-dimensional geometry representing a polygon, with an additional value defined on each vertex
type PolygonZM []LineStringZM
//Envelope returns an envelope around the polygon
func (p Polygon) Envelope() *Envelope {
e := NewEnvelope()
for _, ls := range p {
e.Extend(ls.Envelope())
}
return e
}
//Envelope returns an envelope around the polygon
func (p PolygonZ) Envelope() *Envelope {
e := NewEnvelope()
for _, ls := range p {
e.Extend(ls.Envelope())
}
return e
}
//EnvelopeZ returns an envelope around the polygon
func (p PolygonZ) EnvelopeZ() *EnvelopeZ {
e := NewEnvelopeZ()
for _, ls := range p {
e.Extend(ls.EnvelopeZ())
}
return e
}
//Envelope returns an envelope around the polygon
func (p PolygonM) Envelope() *Envelope {
e := NewEnvelope()
for _, ls := range p {
e.Extend(ls.Envelope())
}
return e
}
//EnvelopeM returns an envelope around the polygon
func (p PolygonM) EnvelopeM() *EnvelopeM {
e := NewEnvelopeM()
for _, ls := range p {
e.Extend(ls.EnvelopeM())
}
return e
}
//Envelope returns an envelope around the polygon
func (p PolygonZM) Envelope() *Envelope {
e := NewEnvelope()
for _, ls := range p {
e.Extend(ls.Envelope())
}
return e
}
//EnvelopeZ returns an envelope around the polygon
func (p PolygonZM) EnvelopeZ() *EnvelopeZ {
e := NewEnvelopeZ()
for _, ls := range p {
e.Extend(ls.EnvelopeZ())
}
return e
}
//EnvelopeM returns an envelope around the polygon
func (p PolygonZM) EnvelopeM() *EnvelopeM {
e := NewEnvelopeM()
for _, ls := range p {
e.Extend(ls.EnvelopeM())
}
return e
}
//EnvelopeZM returns an envelope around the polygon
func (p PolygonZM) EnvelopeZM() *EnvelopeZM {
e := NewEnvelopeZM()
for _, ls := range p {
e.Extend(ls.EnvelopeZM())
}
return e
}
//Clone returns a deep copy of the polygon
func (p Polygon) Clone() Geometry {
return &p
}
//Clone returns a deep copy of the polygon
func (p PolygonZ) Clone() Geometry {
return &p
}
//Clone returns a deep copy of the polygon
func (p PolygonM) Clone() Geometry {
return &p
}
//Clone returns a deep copy of the polygon
func (p PolygonZM) Clone() Geometry {
return &p
}
//Iterate walks over the points (and can modify in situ) the polygon
func (p Polygon) Iterate(f func([]Point) error) error {
for i := range p {
if err := p[i].Iterate(f); err != nil {
return err
}
}
return nil
}
//Iterate walks over the points (and can modify in situ) the polygon
func (p PolygonZ) Iterate(f func([]Point) error) error {
for i := range p {
if err := p[i].Iterate(f); err != nil {
return err
}
}
return nil
}
//Iterate walks over the points (and can modify in situ) the polygon
func (p PolygonM) Iterate(f func([]Point) error) error {
for i := range p {
if err := p[i].Iterate(f); err != nil {
return err
}
}
return nil
}
//Iterate walks over the points (and can modify in situ) the polygon
func (p PolygonZM) Iterate(f func([]Point) error) error {
for i := range p {
if err := p[i].Iterate(f); err != nil {
return err
}
}
return nil
} | polygon.go | 0.87724 | 0.736306 | polygon.go | starcoder |
package internal
import (
"math/rand"
"sort"
"github.com/onsi-experimental/ginkgo/v2/types"
)
type GroupedSpecIndices []SpecIndices
type SpecIndices []int
func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, GroupedSpecIndices) {
/*
Ginkgo has sophisticated suport for randomizing specs. Specs are guaranteed to have the same
order for a given seed across test runs.
By default only top-level containers and specs are shuffled - this makes for a more intuitive debugging
experience - specs within a given container run in the order they appear in the file.
Developers can set -randomizeAllSpecs to shuffle _all_ specs.
In addition, spec containers can be marked as Ordered. Specs within an Ordered container are never shuffled.
Finally, specs and spec containers can be marked as Serial. When running in parallel, serial specs run on Process #1 _after_ all other processes have finished.
*/
// Seed a new random source based on thee configured random seed.
r := rand.New(rand.NewSource(suiteConfig.RandomSeed))
// Decide how to group specs for shuffling. By default we shuffle top-level containers,
// but setting --randomize-all-specs causes us to shuffle all specs (excpect for Ordered specs)
nodeTypesToGroup := types.NodeTypesForContainerAndIt
if suiteConfig.RandomizeAllSpecs {
nodeTypesToGroup = types.NodeTypeIt
}
// Go through all specs and build the permutable groups. These are groupings that can be shuffled.
// Along the way we extract sort keys to ensure a consistent order of specs before we permute them.
permutableGroups := map[uint]SpecIndices{}
groupIsMarkedOrdered := map[uint]bool{}
groupSortKeys := map[uint]string{}
groupIDs := []uint{}
for idx, spec := range specs {
groupingNode := spec.Nodes.FirstNodeMarkedOrdered()
if groupingNode.IsZero() {
// If a spec is not in an ordered container...
// ...we group based on the first node with a nodetype satisfying `nodeTypesToGroup`
groupingNode = spec.Nodes.FirstNodeWithType(nodeTypesToGroup)
} else {
// If a spec is in an ordered container...
// ...we group based on the outermost ordered container
groupIsMarkedOrdered[groupingNode.ID] = true
}
// we've figured out which group we're in, so we add this specs index to the group.
permutableGroups[groupingNode.ID] = append(permutableGroups[groupingNode.ID], idx)
// and, while we're at it, extract the sort key for this group if we haven't already.
if groupSortKeys[groupingNode.ID] == "" {
groupSortKeys[groupingNode.ID] = groupingNode.CodeLocation.String()
groupIDs = append(groupIDs, groupingNode.ID)
}
}
// now sort the groups by the sort key. We use the grouping node's code location and break ties using group ID
sort.SliceStable(groupIDs, func(i, j int) bool {
keyA := groupSortKeys[groupIDs[i]]
keyB := groupSortKeys[groupIDs[j]]
if keyA == keyB {
return groupIDs[i] < groupIDs[j]
} else {
return keyA < keyB
}
})
// now permute the sorted group IDs and build the ordered Groups
orderedGroups := GroupedSpecIndices{}
permutation := r.Perm(len(groupIDs))
for _, j := range permutation {
if groupIsMarkedOrdered[groupIDs[j]] {
// If the group is marked ordered, we preserve the grouping to ensure ordered specs always run on the same Ginkgo process
orderedGroups = append(orderedGroups, permutableGroups[groupIDs[j]])
} else {
// If the group is _not_ marked ordered, we expand the grouping (it has served its purpose for permutation), in order to allow parallelizing across the specs in the group.
for _, idx := range permutableGroups[groupIDs[j]] {
orderedGroups = append(orderedGroups, SpecIndices{idx})
}
}
}
// If we're running in series, we're done.
if suiteConfig.ParallelTotal == 1 {
return orderedGroups, GroupedSpecIndices{}
}
// We're running in parallel so we need to partition the ordered groups into a parallelizable set and a serialized set.
// The parallelizable groups will run across all Ginkgo processes...
// ...the serial groups will only run on Process #1 after all other processes have exited.
parallelizableGroups, serialGroups := GroupedSpecIndices{}, GroupedSpecIndices{}
for _, specIndices := range orderedGroups {
if specs[specIndices[0]].Nodes.HasNodeMarkedSerial() {
serialGroups = append(serialGroups, specIndices)
} else {
parallelizableGroups = append(parallelizableGroups, specIndices)
}
}
return parallelizableGroups, serialGroups
} | internal/ordering.go | 0.645679 | 0.421016 | ordering.go | starcoder |
package segmentation
import (
"fmt"
"github.com/miguelfrde/image-segmentation/disjointset"
"github.com/miguelfrde/image-segmentation/graph"
"sort"
"time"
)
/**
* Performs the image segmentation using the "Graph Based Segmentation"
* algorithm. It uses sigma to apply a gaussian filter with it to the image
* to smooth it before running the algorithm.
* k and minSize are the algorithm parameters. For more information on this
* algorithm refer to either my report which link is on the repo's README or
* to: http://cs.brown.edu/~pff/papers/seg-ijcv.pdf
*/
func (s *Segmenter) SegmentGBS(sigma, k float64, minSize int) {
s.smoothImage(sigma)
s.buildGraph()
fmt.Printf("segment... ")
start := time.Now()
s.resultset = disjointset.New(s.graph.TotalVertices())
threshold_vals := make([]float64, s.graph.TotalVertices(), s.graph.TotalVertices())
for v := 0; v < s.graph.TotalVertices(); v++ {
threshold_vals[v] = k
}
edges := s.graph.Edges()
sort.Sort(edges)
s.gbsMergeFromThreshold(edges, threshold_vals, k)
s.gbsMergeSmallRegions(edges, minSize)
fmt.Println(time.Since(start))
fmt.Println("Components:", s.resultset.Components())
}
/**
* Computes the threshold used by the GBS algorithm.
* T(c) = k/|c|
*/
func threshold(set *disjointset.DisjointSet, k float64, u int) float64 {
return k / float64(set.Size(u))
}
/**
* Performs the union of the regions to which the endpoints of an edge belong to if that
* edge's weight is less than the thresholds of both regions.
*/
func (s *Segmenter) gbsMergeFromThreshold(edges graph.EdgeList, thresholds []float64, k float64) {
for _, edge := range edges {
u := s.resultset.Find(edge.U())
v := s.resultset.Find(edge.V())
uok := edge.Weight() <= thresholds[u]
vok := edge.Weight() <= thresholds[v]
if !s.resultset.Connected(u, v) && uok && vok {
s.resultset.Union(u, v)
new_threshold := edge.Weight() + threshold(s.resultset, k, s.resultset.Find(u))
thresholds[s.resultset.Find(u)] = new_threshold
}
}
}
/**
* Performs the merge of the regions to which the endpoints of an edge belong to if
* any of these regions is less than the minimum size for all regions.
*/
func (s *Segmenter) gbsMergeSmallRegions(edges graph.EdgeList, minSize int) {
for _, edge := range edges {
u := s.resultset.Find(edge.U())
v := s.resultset.Find(edge.V())
if u != v && (s.resultset.Size(u) < minSize || s.resultset.Size(v) < minSize) {
s.resultset.Union(u, v)
}
}
} | segmentation/gbs.go | 0.844569 | 0.491639 | gbs.go | starcoder |
package arrow
import (
"reflect"
)
type typeEqualsConfig struct {
metadata bool
}
// TypeEqualOption is a functional option type used for configuring type
// equality checks.
type TypeEqualOption func(*typeEqualsConfig)
// CheckMetadata is an option for TypeEqual that allows checking for metadata
// equality besides type equality. It only makes sense for STRUCT type.
func CheckMetadata() TypeEqualOption {
return func(cfg *typeEqualsConfig) {
cfg.metadata = true
}
}
// TypeEqual checks if two DataType are the same, optionally checking metadata
// equality for STRUCT types.
func TypeEqual(left, right DataType, opts ...TypeEqualOption) bool {
var cfg typeEqualsConfig
for _, opt := range opts {
opt(&cfg)
}
switch {
case left == nil || right == nil:
return left == nil && right == nil
case left.ID() != right.ID():
return false
}
switch l := left.(type) {
case ExtensionType:
return l.ExtensionEquals(right.(ExtensionType))
case *ListType:
if !TypeEqual(l.Elem(), right.(*ListType).Elem(), opts...) {
return false
}
if cfg.metadata {
return l.Meta.Equal(right.(*ListType).Meta)
}
return l.NullableElem == right.(*ListType).NullableElem
case *FixedSizeListType:
if !TypeEqual(l.Elem(), right.(*FixedSizeListType).Elem(), opts...) {
return false
}
if cfg.metadata {
return l.Meta.Equal(right.(*FixedSizeListType).Meta)
}
return l.n == right.(*FixedSizeListType).n && l.NullableElem == right.(*FixedSizeListType).NullableElem
case *StructType:
r := right.(*StructType)
switch {
case len(l.fields) != len(r.fields):
return false
case !reflect.DeepEqual(l.index, r.index):
return false
}
for i := range l.fields {
leftField, rightField := l.fields[i], r.fields[i]
switch {
case leftField.Name != rightField.Name:
return false
case leftField.Nullable != rightField.Nullable:
return false
case !TypeEqual(leftField.Type, rightField.Type, opts...):
return false
case cfg.metadata && !leftField.Metadata.Equal(rightField.Metadata):
return false
}
}
return true
default:
return reflect.DeepEqual(left, right)
}
} | go/arrow/compare.go | 0.739705 | 0.405272 | compare.go | starcoder |
package strset
import (
"encoding/json"
"sort"
)
// Set represents a set of unique strings.
type Set struct{ items []string }
// New creates a set with a given cap.
func New(size int) *Set {
return &Set{items: make([]string, 0, size)}
}
// Use turns a slice into a set, re-using the underlying slice.
// WARNING: this function is destructive and will mutate the passed slice.
func Use(vv ...string) *Set {
sort.Strings(vv)
return &Set{items: vv}
}
// Copy sets s to the value of x.
func (s *Set) Copy(x *Set) {
s.items = append(s.items[:0], x.items...)
}
// Len returns the set length.
func (s *Set) Len() int { return len(s.items) }
// Clear removes all elements from the set s.
func (s *Set) Clear() { s.items = s.items[:0] }
// Equals reports whether the sets s and t have the same elements.
func (s *Set) Equals(t *Set) bool {
if len(s.items) != len(t.items) {
return false
}
for i, v := range s.items {
if v != t.items[i] {
return false
}
}
return true
}
// Add adds x to the set s, and reports whether the set grew.
func (s *Set) Add(v string) bool {
if pos := sort.SearchStrings(s.items, v); pos < len(s.items) {
if s.items[pos] == v {
return false
}
s.items = append(s.items, "")
copy(s.items[pos+1:], s.items[pos:])
s.items[pos] = v
} else {
s.items = append(s.items, v)
}
return true
}
// Remove removes x from the set s, and reports whether the set shrank.
func (s *Set) Remove(v string) bool {
if pos := sort.SearchStrings(s.items, v); pos < len(s.items) && s.items[pos] == v {
s.items = s.items[:pos+copy(s.items[pos:], s.items[pos+1:])]
return true
}
return false
}
// Has reports whether x is an element of the set s.
func (s *Set) Has(v string) bool {
pos := sort.SearchStrings(s.items, v)
return pos < len(s.items) && s.items[pos] == v
}
// Intersection sets s to the intersection x ∩ y.
func (s *Set) Intersection(x, y *Set) {
ix, iy := x.items, y.items
if len(iy) < len(ix) {
ix, iy = iy, ix
}
s.Clear()
var offset int
var ok bool
for _, v := range ix {
if offset, ok = index(iy, v, offset); ok {
s.Add(v)
}
}
}
// IntersectionWith sets s to the intersection s ∩ x.
func (s *Set) IntersectionWith(x *Set) {
s.Intersection(s, x)
}
// Intersects reports whether s ∩ x ≠ ∅.
func (s *Set) Intersects(x *Set) bool {
si, xi := s.items, x.items
sn, xn := len(si), len(xi)
if xn < sn {
si, xi = xi, si
sn, xn = xn, sn
}
if sn == 0 || si[0] > xi[xn-1] || xi[0] > si[sn-1] {
return false
}
offset := 0
for _, v := range si {
if pos, ok := index(xi, v, offset); ok {
return true
} else if pos >= xn {
return false
} else {
offset = pos
}
}
return false
}
// Union sets s to the union x ∪ y.
func (s *Set) Union(x, y *Set) {
xi, yi := x.items, y.items
s.Clear()
for _, v := range xi {
s.Add(v)
}
for _, v := range yi {
s.Add(v)
}
}
// UnionWith sets s to the union s ∪ x, and reports whether s grew.
func (s *Set) UnionWith(x *Set) bool {
sz := s.Len()
for _, v := range x.items {
s.Add(v)
}
return s.Len() > sz
}
// Slice returns the string slice
func (s *Set) Slice() []string { return s.items }
// MarshalJSON encodes the set as JSON
func (s *Set) MarshalJSON() ([]byte, error) { return json.Marshal(s.items) }
// UnmarshalJSON decodes JSON into a set
func (s *Set) UnmarshalJSON(data []byte) error {
var vv []string
if err := json.Unmarshal(data, &vv); err != nil {
return err
}
*s = *Use(vv...)
return nil
}
func index(vs []string, v string, offset int) (int, bool) {
pos := sort.SearchStrings(vs[offset:], v) + offset
return pos, pos < len(vs) && vs[pos] == v
} | strset.go | 0.808786 | 0.446314 | strset.go | starcoder |
package question
const (
// Label holds the string label denoting the question type in the database.
Label = "question"
// FieldID holds the string denoting the id field in the database.
FieldID = "id" // FieldHash holds the string denoting the hash vertex property in the database.
FieldHash = "hash" // FieldTitle holds the string denoting the title vertex property in the database.
FieldTitle = "title" // FieldDescription holds the string denoting the description vertex property in the database.
FieldDescription = "description" // FieldMetadata holds the string denoting the metadata vertex property in the database.
FieldMetadata = "metadata" // FieldValidator holds the string denoting the validator vertex property in the database.
FieldValidator = "validator" // FieldAnonymous holds the string denoting the anonymous vertex property in the database.
FieldAnonymous = "anonymous"
// EdgeAnswers holds the string denoting the answers edge name in mutations.
EdgeAnswers = "answers"
// EdgeInput holds the string denoting the input edge name in mutations.
EdgeInput = "input"
// EdgeFlow holds the string denoting the flow edge name in mutations.
EdgeFlow = "flow"
// Table holds the table name of the question in the database.
Table = "questions"
// AnswersTable is the table the holds the answers relation/edge.
AnswersTable = "answers"
// AnswersInverseTable is the table name for the Answer entity.
// It exists in this package in order to avoid circular dependency with the "answer" package.
AnswersInverseTable = "answers"
// AnswersColumn is the table column denoting the answers relation/edge.
AnswersColumn = "question_answers"
// InputTable is the table the holds the input relation/edge.
InputTable = "inputs"
// InputInverseTable is the table name for the Input entity.
// It exists in this package in order to avoid circular dependency with the "input" package.
InputInverseTable = "inputs"
// InputColumn is the table column denoting the input relation/edge.
InputColumn = "question_input"
// FlowTable is the table the holds the flow relation/edge.
FlowTable = "questions"
// FlowInverseTable is the table name for the Flow entity.
// It exists in this package in order to avoid circular dependency with the "flow" package.
FlowInverseTable = "flows"
// FlowColumn is the table column denoting the flow relation/edge.
FlowColumn = "flow_questions"
)
// Columns holds all SQL columns for question fields.
var Columns = []string{
FieldID,
FieldHash,
FieldTitle,
FieldDescription,
FieldMetadata,
FieldValidator,
FieldAnonymous,
}
// ForeignKeys holds the SQL foreign-keys that are owned by the Question type.
var ForeignKeys = []string{
"flow_questions",
}
var (
// TitleValidator is a validator for the "title" field. It is called by the builders before save.
TitleValidator func(string) error
// DefaultAnonymous holds the default value on creation for the anonymous field.
DefaultAnonymous bool
) | ent/question/question.go | 0.511961 | 0.428592 | question.go | starcoder |
package leaves
import (
"bufio"
"fmt"
"os"
"github.com/FateFaker/leaves/internal/pickle"
"github.com/FateFaker/leaves/transformation"
)
func lgTreeFromSklearnDecisionTreeRegressor(tree pickle.SklearnDecisionTreeRegressor, scale float64, base float64) (lgTree, error) {
t := lgTree{}
// no support for categorical features in sklearn trees
t.nCategorical = 0
numLeaves := 0
numNodes := 0
for _, n := range tree.Tree.Nodes {
if n.LeftChild < 0 {
numLeaves++
} else {
numNodes++
}
}
if numLeaves-1 != numNodes {
return t, fmt.Errorf("unexpected number of leaves (%d) and nodes (%d)", numLeaves, numNodes)
}
if numNodes == 0 {
// special case
// we mimic decision rule but left and right childs lead to the same result
t.nodes = make([]lgNode, 0, 1)
node := numericalNode(0, 0, 0.0, 0)
node.Flags |= leftLeaf
node.Flags |= rightLeaf
node.Left = uint32(len(t.leafValues))
node.Right = uint32(len(t.leafValues))
t.nodes = append(t.nodes, node)
t.leafValues = append(t.leafValues, tree.Tree.Values[0]*scale+base)
return t, nil
}
// Numerical only
createNode := func(idx int) (lgNode, error) {
node := lgNode{}
refNode := &tree.Tree.Nodes[idx]
missingType := uint8(0)
defaultType := uint8(0)
node = numericalNode(uint32(refNode.Feature), missingType, refNode.Threshold, defaultType)
if tree.Tree.Nodes[refNode.LeftChild].LeftChild < 0 {
node.Flags |= leftLeaf
node.Left = uint32(len(t.leafValues))
t.leafValues = append(t.leafValues, tree.Tree.Values[refNode.LeftChild]*scale+base)
}
if tree.Tree.Nodes[refNode.RightChild].LeftChild < 0 {
node.Flags |= rightLeaf
node.Right = uint32(len(t.leafValues))
t.leafValues = append(t.leafValues, tree.Tree.Values[refNode.RightChild]*scale+base)
}
return node, nil
}
origNodeIdxStack := make([]uint32, 0, numNodes)
convNodeIdxStack := make([]uint32, 0, numNodes)
visited := make([]bool, tree.Tree.NNodes)
t.nodes = make([]lgNode, 0, numNodes)
node, err := createNode(0)
if err != nil {
return t, err
}
t.nodes = append(t.nodes, node)
origNodeIdxStack = append(origNodeIdxStack, 0)
convNodeIdxStack = append(convNodeIdxStack, 0)
for len(origNodeIdxStack) > 0 {
convIdx := convNodeIdxStack[len(convNodeIdxStack)-1]
if t.nodes[convIdx].Flags&rightLeaf == 0 {
origIdx := tree.Tree.Nodes[origNodeIdxStack[len(origNodeIdxStack)-1]].RightChild
if !visited[origIdx] {
node, err := createNode(origIdx)
if err != nil {
return t, err
}
t.nodes = append(t.nodes, node)
convNewIdx := len(t.nodes) - 1
convNodeIdxStack = append(convNodeIdxStack, uint32(convNewIdx))
origNodeIdxStack = append(origNodeIdxStack, uint32(origIdx))
visited[origIdx] = true
t.nodes[convIdx].Right = uint32(convNewIdx)
continue
}
}
if t.nodes[convIdx].Flags&leftLeaf == 0 {
origIdx := tree.Tree.Nodes[origNodeIdxStack[len(origNodeIdxStack)-1]].LeftChild
if !visited[origIdx] {
node, err := createNode(origIdx)
if err != nil {
return t, err
}
t.nodes = append(t.nodes, node)
convNewIdx := len(t.nodes) - 1
convNodeIdxStack = append(convNodeIdxStack, uint32(convNewIdx))
origNodeIdxStack = append(origNodeIdxStack, uint32(origIdx))
visited[origIdx] = true
t.nodes[convIdx].Left = uint32(convNewIdx)
continue
}
}
origNodeIdxStack = origNodeIdxStack[:len(origNodeIdxStack)-1]
convNodeIdxStack = convNodeIdxStack[:len(convNodeIdxStack)-1]
}
return t, nil
}
// SKEnsembleFromReader reads sklearn tree ensemble model from `reader`
func SKEnsembleFromReader(reader *bufio.Reader, loadTransformation bool) (*Ensemble, error) {
e := &lgEnsemble{name: "sklearn.ensemble.GradientBoostingClassifier"}
decoder := pickle.NewDecoder(reader)
res, err := decoder.Decode()
if err != nil {
return nil, fmt.Errorf("error while decoding: %s", err.Error())
}
gbdt := pickle.SklearnGradientBoosting{}
err = pickle.ParseClass(&gbdt, res)
if err != nil {
return nil, fmt.Errorf("error while parsing gradient boosting class: %s", err.Error())
}
e.nRawOutputGroups = gbdt.NClasses
if e.nRawOutputGroups == 2 {
e.nRawOutputGroups = 1
}
e.MaxFeatureIdx = gbdt.MaxFeatures - 1
nTrees := gbdt.NEstimators
if nTrees == 0 {
return nil, fmt.Errorf("no trees in file")
}
if gbdt.NEstimators*e.nRawOutputGroups != len(gbdt.Estimators) {
return nil, fmt.Errorf("unexpected number of trees (NEstimators = %d, nRawOutputGroups = %d, len(Estimatoers) = %d", gbdt.NEstimators, e.nRawOutputGroups, len(gbdt.Estimators))
}
scale := gbdt.LearningRate
base := make([]float64, e.nRawOutputGroups)
if gbdt.InitEstimator.Name == "LogOddsEstimator" {
for i := 0; i < e.nRawOutputGroups; i++ {
base[i] = gbdt.InitEstimator.Prior[0]
}
} else if gbdt.InitEstimator.Name == "PriorProbabilityEstimator" {
if len(gbdt.InitEstimator.Prior) != len(base) {
return nil, fmt.Errorf("len(gbdt.InitEstimator.Prior) != len(base)")
}
base = gbdt.InitEstimator.Prior
} else {
return nil, fmt.Errorf("unknown initial estimator \"%s\"", gbdt.InitEstimator.Name)
}
e.Trees = make([]lgTree, 0, gbdt.NEstimators*gbdt.NClasses)
for i := 0; i < gbdt.NEstimators; i++ {
for j := 0; j < e.nRawOutputGroups; j++ {
treeNum := i*e.nRawOutputGroups + j
tree, err := lgTreeFromSklearnDecisionTreeRegressor(gbdt.Estimators[treeNum], scale, base[j])
if err != nil {
return nil, fmt.Errorf("error while creating %d tree: %s", treeNum, err.Error())
}
e.Trees = append(e.Trees, tree)
}
for k := range base {
base[k] = 0.0
}
}
return &Ensemble{e, &transformation.TransformRaw{e.nRawOutputGroups}}, nil
}
// SKEnsembleFromFile reads sklearn tree ensemble model from pickle file
func SKEnsembleFromFile(filename string, loadTransformation bool) (*Ensemble, error) {
reader, err := os.Open(filename)
if err != nil {
return nil, err
}
defer reader.Close()
bufReader := bufio.NewReader(reader)
return SKEnsembleFromReader(bufReader, loadTransformation)
} | skensemble_io.go | 0.640074 | 0.435241 | skensemble_io.go | starcoder |
package value
import (
"math/big"
)
// domain: (−∞, ∞)
// range: [-1, +1]
func sin(c Context, v Value) Value {
return evalFloatFunc(c, v, floatSin)
}
// domain: (−∞, ∞)
// range: [-1, +1]
func cos(c Context, v Value) Value {
return evalFloatFunc(c, v, floatCos)
}
// domain: (−∞, ∞)
// range: (−∞, ∞)
func tan(c Context, v Value) Value {
x := floatSelf(c, v).(BigFloat).Float
if x.IsInf() {
Errorf("tangent of infinity")
}
negate := false
if x.Sign() < 0 {
x.Neg(x)
negate = true
}
twoPiReduce(c, x)
num := floatSin(c, x)
den := floatCos(c, x)
if den.Sign() == 0 {
Errorf("tangent is infinite")
}
num.Quo(num, den)
if negate {
num.Neg(num)
}
return BigFloat{num}.shrink()
}
// floatSin computes sin(x) using argument reduction and a Taylor series.
func floatSin(c Context, x *big.Float) *big.Float {
if x.IsInf() {
Errorf("sine of infinity")
}
negate := false
if x.Sign() < 0 {
x.Neg(x)
negate = true
}
twoPiReduce(c, x)
// sin(x) = x - x³/3! + x⁵/5! - ...
// First term to compute in loop will be -x³/3!
factorial := newFloat(c).SetInt64(6)
result := sincos("sin", c, 3, x, newFloat(c).Set(x), 3, factorial)
if negate {
result.Neg(result)
}
return result
}
// floatCos computes cos(x) using argument reduction and a Taylor series.
func floatCos(c Context, x *big.Float) *big.Float {
if x.IsInf() {
Errorf("cosine of infinity")
}
twoPiReduce(c, x)
// cos(x) = 1 - x²/2! + x⁴/4! - ...
// First term to compute in loop will be -x²/2!.
factorial := newFloat(c).Set(floatTwo)
return sincos("cos", c, 2, x, newFloat(c).SetInt64(1), 2, factorial)
}
// sincos iterates a sin or cos Taylor series.
func sincos(name string, c Context, index int, x *big.Float, z *big.Float, exp uint64, factorial *big.Float) *big.Float {
term := newFloat(c).Set(floatOne)
for j := 0; j < index; j++ {
term.Mul(term, x)
}
xN := newFloat(c).Set(term)
x2 := newFloat(c).Mul(x, x)
n := newFloat(c)
for loop := newLoop(c.Config(), name, x, 4); ; {
// Invariant: factorial holds -1ⁿ*exponent!.
factorial.Neg(factorial)
term.Quo(term, factorial)
z.Add(z, term)
if loop.done(z) {
break
}
// Advance x**index (multiply by x²).
term.Mul(xN, x2)
xN.Set(term)
// Advance factorial.
factorial.Mul(factorial, n.SetUint64(exp+1))
factorial.Mul(factorial, n.SetUint64(exp+2))
exp += 2
}
return z
}
// twoPiReduce guarantees x < 2π; x is known to be >= 0 coming in.
func twoPiReduce(c Context, x *big.Float) {
// TODO: Is there an easy better algorithm?
twoPi := newFloat(c).Set(floatTwo)
twoPi.Mul(twoPi, floatPi)
// Do something clever(er) if it's large.
if x.Cmp(newFloat(c).SetInt64(1000)) > 0 {
multiples := make([]*big.Float, 0, 100)
sixteen := newFloat(c).SetInt64(16)
multiple := newFloat(c).Set(twoPi)
for {
multiple.Mul(multiple, sixteen)
if x.Cmp(multiple) < 0 {
break
}
multiples = append(multiples, newFloat(c).Set(multiple))
}
// From the right, subtract big multiples.
for i := len(multiples) - 1; i >= 0; i-- {
multiple := multiples[i]
for x.Cmp(multiple) >= 0 {
x.Sub(x, multiple)
}
}
}
for x.Cmp(twoPi) >= 0 {
x.Sub(x, twoPi)
}
} | value/sin.go | 0.672009 | 0.542197 | sin.go | starcoder |
package hbook
import (
"bytes"
"encoding/gob"
"io"
"math"
"github.com/go-hep/dtypes"
"github.com/go-hep/rio"
)
// H1D is a 1-dim histogram with weighted entries.
type H1D struct {
bins []Bin1D // in-range bins
allbins []Bin1D // in-range bins and under/over-flow bins
axis Axis
entries int64 // number of entries for this histogram
ann Annotation // Annotations for this histogram (title, labels,...)
}
// NewH1D returns a 1-dim histogram with nbins bins between low and high.
func NewH1D(nbins int, low, high float64) *H1D {
h := &H1D{
bins: nil,
allbins: make([]Bin1D, nbins+2),
axis: NewEvenBinAxis(nbins, low, high),
entries: 0,
ann: make(Annotation),
}
h.bins = h.allbins[2:]
return h
}
// Name returns the name of this histogram, if any
func (h *H1D) Name() string {
n := h.ann["name"].(string)
return n
}
// Annotation returns the annotations attached to this histogram
func (h *H1D) Annotation() Annotation {
return h.ann
}
// Rank returns the number of dimensions for this histogram
func (h *H1D) Rank() int {
return 1
}
// Axis returns the axis of this histgram.
func (h *H1D) Axis() Axis {
return h.axis
}
// Entries returns the number of entries in this histogram
func (h *H1D) Entries() int64 {
return h.entries
}
// Fill fills this histogram with x and weight w.
func (h *H1D) Fill(x, w float64) {
//fmt.Printf("H1D.fill(x=%v, w=%v)...\n", x, w)
idx := h.axis.CoordToIndex(x)
switch idx {
case UnderflowBin:
h.allbins[0].fill(x, w)
case OverflowBin:
h.allbins[1].fill(x, w)
default:
h.bins[idx].fill(x, w)
}
h.entries += 1
//fmt.Printf("H1D.fill(x=%v, w=%v)...[done]\n", x, w)
}
// Value returns the content of the idx-th bin.
func (h *H1D) Value(idx int) float64 {
return h.bins[idx].sw
}
// Len returns the number of bins for this histogram
func (h *H1D) Len() int {
return h.Axis().Bins()
}
// XY returns the x,y values for the i-th bin
func (h *H1D) XY(i int) (float64, float64) {
x := float64(h.Axis().BinLowerEdge(i))
y := h.Value(i)
return x, y
}
// DataRange implements the gonum/plot.DataRanger interface
func (h *H1D) DataRange() (xmin, xmax, ymin, ymax float64) {
axis := h.Axis()
n := h.Len()
xmin = float64(axis.BinLowerEdge(0))
xmax = float64(axis.BinUpperEdge(n - 1))
ymin = +math.MaxFloat64
ymax = -math.MaxFloat64
for i := 0; i < n; i++ {
y := h.Value(i)
if y > ymax {
ymax = y
}
if y < ymin {
ymin = y
}
}
return xmin, xmax, ymin, ymax
}
// Mean returns the mean of this histogram.
func (h *H1D) Mean() float64 {
summeans := 0.0
sumweights := 0.0
idx := 0
for idx = 0; idx < len(h.bins); idx++ {
summeans = summeans + h.bins[idx].swc
sumweights = sumweights + h.bins[idx].sw
}
return summeans / sumweights
}
// RMS returns the root mean squared of this histogram.
func (h *H1D) RMS() float64 {
summeans := 0.0
summean2 := 0.0
sumweights := 0.0
idx := 0
for idx = 0; idx < len(h.bins); idx++ {
summeans = summeans + h.bins[idx].swc
sumweights = sumweights + h.bins[idx].sw
if h.bins[idx].sw != 0. {
summean2 = summean2 + h.bins[idx].swc*h.bins[idx].swc/h.bins[idx].sw
}
}
invw := 1. / sumweights
return math.Sqrt(invw * (summean2 - (summeans*summeans)*invw))
}
// Max returns the maximum y value of this histogram.
func (h *H1D) Max() float64 {
ymax := math.Inf(-1)
for idx := range h.bins {
c := h.bins[idx].sw
if c > ymax {
ymax = c
}
}
return ymax
}
// Min returns the minimum y value of this histogram.
func (h *H1D) Min() float64 {
ymin := math.Inf(1)
for idx := range h.bins {
c := h.bins[idx].sw
if c < ymin {
ymin = c
}
}
return ymin
}
func (h *H1D) MarshalBinary() ([]byte, error) {
buf := new(bytes.Buffer)
err := h.RioMarshal(buf)
return buf.Bytes(), err
}
func (h *H1D) UnmarshalBinary(data []byte) error {
buf := bytes.NewReader(data)
return h.RioUnmarshal(buf)
}
func (h *H1D) GobEncode() ([]byte, error) {
buf := new(bytes.Buffer)
err := h.RioMarshal(buf)
return buf.Bytes(), err
}
func (h *H1D) GobDecode(data []byte) error {
buf := bytes.NewReader(data)
return h.RioUnmarshal(buf)
}
func (h *H1D) RioMarshal(w io.Writer) error {
enc := gob.NewEncoder(w)
err := enc.Encode(h.allbins)
if err != nil {
return err
}
err = enc.Encode(&h.axis)
if err != nil {
return err
}
err = enc.Encode(h.entries)
if err != nil {
return err
}
err = enc.Encode(h.ann)
if err != nil {
return err
}
return err
}
func (h *H1D) RioUnmarshal(r io.Reader) error {
dec := gob.NewDecoder(r)
err := dec.Decode(&h.allbins)
if err != nil {
return err
}
h.bins = h.allbins[2:]
err = dec.Decode(&h.axis)
if err != nil {
return err
}
err = dec.Decode(&h.entries)
if err != nil {
return err
}
err = dec.Decode(&h.ann)
if err != nil {
return err
}
return err
}
func (h *H1D) RioVersion() rio.Version {
return 0
}
// check various interfaces
var _ Object = (*H1D)(nil)
var _ Histogram = (*H1D)(nil)
// serialization interfaces
var _ rio.Marshaler = (*H1D)(nil)
var _ rio.Unmarshaler = (*H1D)(nil)
var _ rio.Streamer = (*H1D)(nil)
func init() {
gob.Register((*H1D)(nil))
dtypes.Register((*H1D)(nil))
}
// EOF | h1d.go | 0.766905 | 0.51818 | h1d.go | starcoder |
package handlers
import (
"fmt"
"math"
"strconv"
"strings"
)
const (
earthRadius = 6378137.0
earthCircumference = math.Pi * earthRadius
initialResolution = 2 * earthCircumference / 256
dpi uint8 = 96
)
type tileCoord struct {
z uint8
x, y uint64
}
// tileCoordFromString parses and returns tileCoord coordinates and an optional
// extension from the three parameters. The parameter z is interpreted as the
// web mercator zoom level, it is supposed to be an unsigned integer that will
// fit into 8 bit. The parameters x and y are interpreted as longitude and
// latitude tile indices for that zoom level, both are supposed be integers in
// the integer interval [0,2^z). Additionally, y may also have an optional
// filename extension (e.g. "42.png") which is removed before parsing the
// number, and returned, too. In case an error occurred during parsing or if the
// values are not in the expected interval, the returned error is non-nil.
func tileCoordFromString(z, x, y string) (tc tileCoord, ext string, err error) {
var z64 uint64
if z64, err = strconv.ParseUint(z, 10, 8); err != nil {
err = fmt.Errorf("cannot parse zoom level: %v", err)
return
}
tc.z = uint8(z64)
const (
errMsgParse = "cannot parse %s coordinate axis: %v"
errMsgOOB = "%s coordinate (%d) is out of bounds for zoom level %d"
)
if tc.x, err = strconv.ParseUint(x, 10, 64); err != nil {
err = fmt.Errorf(errMsgParse, "first", err)
return
}
if tc.x >= (1 << z64) {
err = fmt.Errorf(errMsgOOB, "x", tc.x, tc.z)
return
}
s := y
if l := strings.LastIndex(s, "."); l >= 0 {
s, ext = s[:l], s[l:]
}
if tc.y, err = strconv.ParseUint(s, 10, 64); err != nil {
err = fmt.Errorf(errMsgParse, "y", err)
return
}
if tc.y >= (1 << z64) {
err = fmt.Errorf(errMsgOOB, "y", tc.y, tc.z)
return
}
return
}
func calcScaleResolution(zoomLevel uint8, dpi uint8) (float64, float64) {
var denom = 1 << zoomLevel
resolution := initialResolution / float64(denom)
scale := float64(dpi) * 39.37 * resolution // 39.37 in/m
return scale, resolution
} | handlers/tile.go | 0.728941 | 0.477981 | tile.go | starcoder |
package unit
import (
"fmt"
"math"
"github.com/brettbuddin/shaden/dsp"
"github.com/brettbuddin/shaden/graph"
)
// InMode is a mode of processing of an In.
type InMode int
// InModes
const (
Block InMode = iota
Sample
)
const controlPeriod = 64
// In is a unit input
type In struct {
name string
mode InMode
normal, constant dsp.Valuer
frame, normalFrame []float64
unit *Unit
source *Out
node *graph.Node
controlLastF float64
controlLastI int
}
// NewIn returns a new input
func NewIn(name string, v dsp.Valuer, frameSize int) *In {
f := make([]float64, frameSize)
in := &In{
name: name,
frame: f,
normalFrame: f,
}
in.setNormal(v)
return in
}
// Read reads a specific sample from the input frame
func (in *In) Read(i int) float64 {
if isSourceControlRate(in) {
return in.frame[0]
}
if in.mode == Sample {
size := len(in.frame)
i = (i - 1 + size) % size
}
return in.frame[i]
}
// ReadSlow reads a specific sample from the input frame at a slow rate
func (in *In) ReadSlow(i int, f func(float64) float64) float64 {
if i%controlPeriod == 0 {
in.controlLastF = f(in.Read(i))
}
return in.controlLastF
}
// ReadSlowInt reads a specific sample from the input frame at a slow rate
func (in *In) ReadSlowInt(i int, f func(float64) int) int {
if i%controlPeriod == 0 {
in.controlLastI = f(in.Read(i))
}
return in.controlLastI
}
// Fill fills the internal frame with a specific constant value
func (in *In) Fill(v dsp.Valuer) {
for i := range in.frame {
in.frame[i] = v.Float64()
}
in.constant = v
}
// Write writes a sample to the internal buffer
func (in *In) Write(i int, v float64) {
in.frame[i] = v
}
// Couple assigns the internal frame of this input to the frame of an output; binding them together. This in-of-itself
// does not define the connection. That is controlled by the the Nodes and Graph.
func (in *In) Couple(out Output) {
o := out.Out()
in.source = o
in.frame = o.frame
}
// HasSource returns whether or not we have an inbound connection
func (in *In) HasSource() bool {
return in.source != nil
}
// Source returns the inbound connection if it has one
func (in *In) Source() *Out {
return in.source
}
// Constant is the constant value that's filling the buffer.
func (in *In) Constant() dsp.Valuer {
return in.constant
}
// Reset disconnects an input from an output (if a connection has been established) and fills the frame with the normal
// constant value
func (in *In) Reset() {
in.source = nil
in.frame = in.normalFrame
in.constant = in.normal
in.Fill(in.normal)
}
// SetMode sets the processing mode for this input.
func (in *In) SetMode(mode InMode) {
in.mode = mode
}
// ExternalNeighborCount returns the count of neighboring nodes outside of the parent Unit
func (in *In) ExternalNeighborCount() int {
return in.node.InNeighborCount()
}
func (in *In) setNormal(v dsp.Valuer) {
in.normal = v
in.Fill(v)
}
func (in *In) String() string {
return fmt.Sprintf("%s/%s", in.unit.ID, in.name)
}
func isSourceControlRate(in *In) bool {
return in.HasSource() && in.source.Rate() == RateControl
}
func ident(v float64) float64 { return v }
func clamp(min, max float64) func(float64) float64 {
return func(v float64) float64 {
return dsp.Clamp(v, min, max)
}
}
func identInt(v float64) int { return int(v) }
func clampInt(min, max float64) func(float64) int {
return func(v float64) int {
return int(dsp.Clamp(v, min, max))
}
}
func minInt(min float64) func(float64) int {
return func(v float64) int {
return int(math.Max(v, min))
}
}
func modInt(mod int) func(float64) int {
return func(v float64) int {
return (int(v) + mod) % mod
}
} | unit/in.go | 0.777131 | 0.499207 | in.go | starcoder |
package obj
import (
"github.com/deadsy/sdfx/sdf"
"github.com/ivanpointer/pterosphera/render"
)
// BTU Defines the dimensions of a single BTU (Ball Transfer Unit)
type BTU struct {
// BaseR is the radius of the base of the BTU.
BaseR float64
// BaseH is the height of the base (stem) of the BTU.
BaseH float64
// HeadR is the radius of the head of the BTU.
HeadR float64
// HeadH is the height of the head of the BTU.
HeadH float64
// BallR is the radius of the ball within the BTU.
BallR float64
// TotalH is the total height of the BTU (used to calculate where to put the ball).
TotalH float64
}
// BTURender defines the render parameters for rendering a BTU.
type BTURender struct {
// Settings are the general render settings for this project.
Settings render.RenderSettings
}
// Render renders a single BTU
func (b *BTU) Render(r BTURender) (sdf.SDF3, error) {
// Render the base
base, err := sdf.Cylinder3D(b.BaseH, b.BaseR, 0)
if err != nil {
return nil, err
}
// Render the head
head, err := sdf.Cylinder3D(b.HeadH, b.HeadR, 0)
if err != nil {
return nil, err
}
head = sdf.Transform3D(head, sdf.Translate3d(sdf.V3{X: 0, Y: 0, Z: ((b.BaseH + b.HeadH) / 2) - r.Settings.WeldShift}))
// Render the ball
ball, err := sdf.Sphere3D(b.BallR)
if err != nil {
return nil, err
}
ballZ := b.TotalH - (((b.BaseH + b.HeadH) / 2) + b.BallR)
ball = sdf.Transform3D(ball, sdf.Translate3d(sdf.V3{X: 0, Y: 0, Z: ballZ}))
// Weld all the pieces together
btu := sdf.Union3D(base, head)
btu = sdf.Union3D(btu, ball)
// For dev purposes, put a bead on the "north" side of the BTU
const beadR = 0.5
bead, err := sdf.Sphere3D(beadR)
if err != nil {
return nil, err
}
bead = sdf.Transform3D(bead, sdf.Translate3d(sdf.V3{X: b.HeadR - beadR - 0.1, Y: 0, Z: (b.BaseH + b.HeadH + beadR) / 2}))
btu = sdf.Union3D(btu, bead)
// Move the whole model to recenter it
btu = sdf.Transform3D(btu, sdf.Translate3d(sdf.V3{Z: (b.TotalH - b.BaseH) / -2}))
// Return what we built
return btu, nil
}
// RenderPeg renders a peg version of a BTU (used for cutting holes), using a total peg height.
// Note: the peg base will always be the same length, the given height (h) determines the height of the head portion of the BTU.
func (b *BTU) RenderPeg(h float64, r BTURender) (sdf.SDF3, error) {
// Render the base
base, err := sdf.Cylinder3D(b.BaseH, b.BaseR+r.Settings.WeldShift, 0)
if err != nil {
return nil, err
}
// Render the head, to the length given
headH := h - b.BaseH + r.Settings.WeldShift
head, err := sdf.Cylinder3D(headH, b.HeadR+r.Settings.WeldShift, 0)
if err != nil {
return nil, err
}
head = sdf.Transform3D(head, sdf.Translate3d(sdf.V3{X: 0, Y: 0, Z: ((b.BaseH + headH) / 2) - r.Settings.WeldShift}))
// Weld the head and base together
peg := sdf.Union3D(base, head)
// Done
return peg, nil
} | go_sdx/obj/btu.go | 0.672117 | 0.415195 | btu.go | starcoder |
package ternary
// Str returns string on true or string on false condition
func Str(cond bool, onTrue string, onFalse string) string {
if cond {
return onTrue
}
return onFalse
}
// StrInt returns string on true or int on false condition
func StrInt(cond bool, onTrue string, onFalse int) interface{} {
if cond {
return onTrue
}
return onFalse
}
// StrInt8 returns string on true or int8 on false condition
func StrInt8(cond bool, onTrue string, onFalse int8) interface{} {
if cond {
return onTrue
}
return onFalse
}
// StrInt16 returns string on true or int16 on false condition
func StrInt16(cond bool, onTrue string, onFalse int16) interface{} {
if cond {
return onTrue
}
return onFalse
}
// StrInt32 returns string on true or int32 on false condition
func StrInt32(cond bool, onTrue string, onFalse int32) interface{} {
if cond {
return onTrue
}
return onFalse
}
// StrInt64 returns string on true or int64 on false condition
func StrInt64(cond bool, onTrue string, onFalse int64) interface{} {
if cond {
return onTrue
}
return onFalse
}
// StrFloat32 returns string on true or float32 on false condition
func StrFloat32(cond bool, onTrue string, onFalse float32) interface{} {
if cond {
return onTrue
}
return onFalse
}
// StrFloat64 returns string on true or float64 on false condition
func StrFloat64(cond bool, onTrue string, onFalse float64) interface{} {
if cond {
return onTrue
}
return onFalse
}
// StrCmplx64 returns string on true or complex64 on false condition
func StrCmplx64(cond bool, onTrue string, onFalse complex64) interface{} {
if cond {
return onTrue
}
return onFalse
}
// StrCmplx128 returns string on true or complex128 on false condition
func StrCmplx128(cond bool, onTrue string, onFalse complex128) interface{} {
if cond {
return onTrue
}
return onFalse
}
// StrUint returns string on true or uint on false condition
func StrUint(cond bool, onTrue string, onFalse uint) interface{} {
if cond {
return onTrue
}
return onFalse
}
// StrUint8 returns string on true or uint8 on false condition
func StrUint8(cond bool, onTrue string, onFalse uint8) interface{} {
if cond {
return onTrue
}
return onFalse
}
// StrUint16 returns string on true or uint16 on false condition
func StrUint16(cond bool, onTrue string, onFalse uint16) interface{} {
if cond {
return onTrue
}
return onFalse
}
// StrUint32 returns string on true or uint32 on false condition
func StrUint32(cond bool, onTrue string, onFalse uint32) interface{} {
if cond {
return onTrue
}
return onFalse
}
// StrUint64 returns string on true or uint64 on false condition
func StrUint64(cond bool, onTrue string, onFalse uint64) interface{} {
if cond {
return onTrue
}
return onFalse
}
// StrUintptr returns string on true or uintptr on false condition
func StrUintptr(cond bool, onTrue string, onFalse uintptr) interface{} {
if cond {
return onTrue
}
return onFalse
}
// StrBool returns string on true or bool on false condition
func StrBool(cond bool, onTrue string, onFalse bool) interface{} {
if cond {
return onTrue
}
return onFalse
}
// StrIface returns string on true or interface on false condition
func StrIface(cond bool, onTrue string, onFalse interface{}) interface{} {
if cond {
return onTrue
}
return onFalse
}
// StrError returns string on true or error on false condition
func StrError(cond bool, onTrue string, onFalse error) interface{} {
if cond {
return onTrue
}
return onFalse
} | string.go | 0.694717 | 0.403508 | string.go | starcoder |
package query
import (
"github.com/grafana-tools/sdk"
)
// Option represents an option that can be used to configure a query.
type Option func(constant *Query)
// SortOrder represents the ordering method applied to values.
type SortOrder int
const (
// None will preserve the results ordering as returned by the query.
None SortOrder = 0
// AlphabeticalAsc will sort the results by ascending alphabetical order.
AlphabeticalAsc SortOrder = 1
// AlphabeticalDesc will sort the results by descending alphabetical order.
AlphabeticalDesc SortOrder = 2
// NumericalAsc will sort the results by ascending numerical order.
NumericalAsc SortOrder = 3
// NumericalDesc will sort the results by descending numerical order.
NumericalDesc SortOrder = 4
// AlphabeticalNoCaseAsc will sort the results by ascending alphabetical order, case-insensitive.
AlphabeticalNoCaseAsc SortOrder = 5
// AlphabeticalNoCaseDesc will sort the results by descending alphabetical order, case-insensitive.
AlphabeticalNoCaseDesc SortOrder = 6
)
// RefreshInterval represents the interval at which the results of a query will
// be refreshed.
type RefreshInterval int
const (
// Never will prevent the results from being refreshed.
Never = 0
// DashboardLoad will refresh the results every time the dashboard is loaded.
DashboardLoad = 1
// TimeChange will refresh the results every time the time interval changes.
TimeChange = 2
)
// Query represents a "query" templated variable.
type Query struct {
Builder sdk.TemplateVar
}
// New creates a new "query" templated variable.
func New(name string, options ...Option) *Query {
query := &Query{Builder: sdk.TemplateVar{
Name: name,
Label: name,
Type: "query",
}}
for _, opt := range append([]Option{Refresh(DashboardLoad)}, options...) {
opt(query)
}
return query
}
// DataSource sets the data source to be used by the query.
func DataSource(source string) Option {
return func(query *Query) {
query.Builder.Datasource = &source
}
}
// Request defines the query to be executed.
func Request(request string) Option {
return func(query *Query) {
query.Builder.Query = request
}
}
// Sort defines the order in which the values will be sorted.
func Sort(order SortOrder) Option {
return func(query *Query) {
query.Builder.Sort = int(order)
}
}
// Refresh defines the interval in which the values will be refreshed.
func Refresh(refresh RefreshInterval) Option {
return func(query *Query) {
value := int64(refresh)
query.Builder.Refresh = sdk.BoolInt{Flag: true, Value: &value}
}
}
// Regex defines a filter allowing to filter the values returned by the request/query.
func Regex(regex string) Option {
return func(query *Query) {
query.Builder.Regex = regex
}
}
// Label sets the label of the variable.
func Label(label string) Option {
return func(query *Query) {
query.Builder.Label = label
}
}
// HideLabel ensures that this variable's label will not be displayed.
func HideLabel() Option {
return func(query *Query) {
query.Builder.Hide = 1
}
}
// Hide ensures that the variable will not be displayed.
func Hide() Option {
return func(query *Query) {
query.Builder.Hide = 2
}
}
// Multi allows several values to be selected.
func Multi() Option {
return func(query *Query) {
query.Builder.Multi = true
}
}
// IncludeAll adds an option to allow all values to be selected.
func IncludeAll() Option {
return func(query *Query) {
query.Builder.IncludeAll = true
query.Builder.Options = append(query.Builder.Options, sdk.Option{
Text: "All",
Value: "$__all",
})
}
}
// DefaultAll selects "All" values by default.
func DefaultAll() Option {
return func(query *Query) {
query.Builder.Current = sdk.Current{Text: "All", Value: "$_all"}
}
} | vendor/github.com/K-Phoen/grabana/variable/query/query.go | 0.81468 | 0.455683 | query.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.