code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package models
// Handles doing predictions for a potential phase permutation of a week once the phase
// pattern is finalized
type weekPredictor struct {
Ticker *PriceTicker
Pattern PricePattern
PatternPhases []PatternPhase
// Value cache
// The probability weight of the price pattern given last week's pattern
patternWeight float64
patternPermutationCount int
// Variables
// The total bin width for this week
binWidth float64
// Set to true if there are any known prices in the ticker
pricesKnown bool
result *PotentialWeek
}
func (predictor *weekPredictor) increaseBinWidth(amount float64) {
predictor.binWidth += amount
}
// Come up with a score for how likely this period is to match the ticker. We call this
// score the bin "width"
func (predictor *weekPredictor) addPeriodBinWidth(
pricePeriod PricePeriod,
knownPrice int,
) {
// If this price is unknown (0) then we can't make probability estimates with it.
if knownPrice == 0 {
return
}
// Otherwise remember that we know a price
predictor.pricesKnown = true
// Get the min and max prices for this period
prices := predictor.result.Prices[pricePeriod]
// Get the number of possible bell values (how many sides on this
// dice?). We need to add one since this is an inclusive range
periodRange := prices.MaxPrice() - prices.GuaranteedPrice() + 1
// Now compute the likelihood of any particular price in this bracket
// occurring divided by the total number of prices. For many combinations
// the minimum and maximum prices are far less likely to occur because of how
// the price math is implemented. We divide by the period range to get the
// likelihood that this price would occur in this range relative to other
// ranges.
priceChance := prices.PriceChance(knownPrice)
periodWidth := 0.0
if priceChance != 0.0 {
periodWidth = priceChance / float64(periodRange)
}
// Weight it by the likelihood of this pattern occurring in the first
// place
periodWidth *= predictor.patternWeight
// Add it to the total likelihood of this week permutation happening
predictor.increaseBinWidth(periodWidth)
}
func (predictor *weekPredictor) buildWeek() {
ticker := predictor.Ticker
result := predictor.result
// The current week's price period
var pricePeriod PricePeriod
// The current sub period of the phase
var phasePeriod int
// Loop through each phase of the pattern
for _, thisPhase := range predictor.PatternPhases {
// Loop through the number of periods in this phase's length.
for phasePeriod = 0; phasePeriod < thisPhase.Length(); phasePeriod++ {
// Get the projected price for this price period, according to the phase.
potentialPeriod := thisPhase.PotentialPeriod(pricePeriod, phasePeriod)
// If this is not a valid price, we set the result to nil and stop making
// predictions
knownPrice := ticker.Prices[pricePeriod]
if !potentialPeriod.IsValidPrice(knownPrice) {
predictor.result = nil
return
}
result.Prices = append(result.Prices, potentialPeriod)
// We want to find the highest minimum for this potential week and use that
// as the week's guaranteed minimum
result.updatePriceRangeFromPrices(potentialPeriod, pricePeriod)
result.Future.updatePriceRangeFromPrices(potentialPeriod, pricePeriod)
result.Spikes.updateSpikeFromPeriod(
potentialPeriod.PricePeriod, potentialPeriod.Spikes,
)
// Now get the probability width that this week will happen
predictor.addPeriodBinWidth(pricePeriod, knownPrice)
// Increment the overall price period
pricePeriod++
}
}
}
func (predictor *weekPredictor) finalizeWidth() {
// If we had known prices, then we have a more informed bin width, and can return.
// We check this flag rather than for a bin width of 0, as patterns that COULD
// happen, but are vanishingly unlikely will have an effective width of 0.s
if !predictor.pricesKnown {
predictor.binWidth = predictor.patternWeight
}
// Now weight each week by the number of possible weeks for this pattern. As we
// knock out possible phase combinations for a pattern, the likelihood of this
// pattern goes down.
predictor.binWidth /= float64(predictor.patternPermutationCount)
// Use this bin width as our chance for now.
predictor.result.chance = predictor.binWidth
}
func (predictor *weekPredictor) setup() {
predictor.result = &PotentialWeek{
Analysis: NewAnalysis(predictor.Ticker),
Spikes: &SpikeRangeAll{
big: new(SpikeRange),
small: new(SpikeRange),
any: new(SpikeRange),
},
}
predictor.patternWeight = predictor.Pattern.BaseChance(
predictor.Ticker.PreviousPattern,
)
predictor.patternPermutationCount = predictor.Pattern.PermutationCount()
}
func (predictor *weekPredictor) Predict() (
potentialWeek *PotentialWeek, binWidth float64,
) {
predictor.setup()
predictor.buildWeek()
if predictor.result != nil {
predictor.finalizeWidth()
}
return predictor.result, predictor.binWidth
} | models/predictorWeek.go | 0.842345 | 0.672264 | predictorWeek.go | starcoder |
package emu
import (
"log"
"gitlab.com/akita/mgpusim/insts"
"gitlab.com/akita/mgpusim/kernels"
"gitlab.com/akita/util/ca"
)
// A Wavefront in the emu package is a wrapper for the kernels.Wavefront
type Wavefront struct {
*kernels.Wavefront
pid ca.PID
Completed bool
AtBarrier bool
inst *insts.Inst
scratchpad Scratchpad
PC uint64
Exec uint64
SCC byte
VCC uint64
M0 uint32
SRegFile []byte
VRegFile []byte
LDS []byte
}
// NewWavefront returns the Wavefront that wraps the nativeWf
func NewWavefront(nativeWf *kernels.Wavefront) *Wavefront {
wf := new(Wavefront)
wf.Wavefront = nativeWf
wf.SRegFile = make([]byte, 4*102)
wf.VRegFile = make([]byte, 4*64*256)
wf.scratchpad = make([]byte, 4096)
return wf
}
// Inst returns the instruction that the wavefront is executing
func (wf *Wavefront) Inst() *insts.Inst {
return wf.inst
}
// Scratchpad returns the scratchpad that is associated with the wavefront
func (wf *Wavefront) Scratchpad() Scratchpad {
return wf.scratchpad
}
// PID returns pid
func (wf *Wavefront) PID() ca.PID {
return wf.pid
}
// SRegValue returns s(i)'s value
func (wf *Wavefront) SRegValue(i int) uint32 {
return insts.BytesToUint32(wf.SRegFile[i*4 : i*4+4])
}
// VRegValue returns the value of v(i) of a certain lain
func (wf *Wavefront) VRegValue(lane int, i int) uint32 {
offset := lane*1024 + i*4
return insts.BytesToUint32(wf.VRegFile[offset : offset+4])
}
// ReadReg returns the raw register value
//nolint:gocyclo
func (wf *Wavefront) ReadReg(reg *insts.Reg, regCount int, laneID int) []byte {
numBytes := reg.ByteSize
if regCount >= 2 {
numBytes *= regCount
}
// There are some concerns in terms of reading VCC and EXEC (64 or 32? And how to decide?)
var value = make([]byte, numBytes)
if reg.IsSReg() {
offset := reg.RegIndex() * 4
copy(value, wf.SRegFile[offset:offset+numBytes])
} else if reg.IsVReg() {
offset := laneID*256*4 + reg.RegIndex()*4
copy(value, wf.VRegFile[offset:offset+numBytes])
} else if reg.RegType == insts.SCC {
value[0] = wf.SCC
} else if reg.RegType == insts.VCC {
copy(value, insts.Uint64ToBytes(wf.VCC))
} else if reg.RegType == insts.VCCLO && regCount == 1 {
copy(value, insts.Uint32ToBytes(uint32(wf.VCC)))
} else if reg.RegType == insts.VCCHI && regCount == 1 {
copy(value, insts.Uint32ToBytes(uint32(wf.VCC>>32)))
} else if reg.RegType == insts.VCCLO && regCount == 2 {
copy(value, insts.Uint64ToBytes(wf.VCC))
} else if reg.RegType == insts.EXEC {
copy(value, insts.Uint64ToBytes(wf.Exec))
} else if reg.RegType == insts.EXECLO && regCount == 2 {
copy(value, insts.Uint64ToBytes(wf.Exec))
} else if reg.RegType == insts.M0 {
copy(value, insts.Uint32ToBytes(wf.M0))
} else {
log.Panicf("Register type %s not supported", reg.Name)
}
return value
}
// WriteReg returns the raw register value
//nolint:gocyclo
func (wf *Wavefront) WriteReg(
reg *insts.Reg,
regCount int,
laneID int,
data []byte,
) {
numBytes := reg.ByteSize
if regCount >= 2 {
numBytes *= regCount
}
if reg.IsSReg() {
offset := reg.RegIndex() * 4
copy(wf.SRegFile[offset:offset+numBytes], data)
} else if reg.IsVReg() {
offset := laneID*256*4 + reg.RegIndex()*4
copy(wf.VRegFile[offset:offset+numBytes], data)
} else if reg.RegType == insts.SCC {
wf.SCC = data[0]
} else if reg.RegType == insts.VCC {
wf.VCC = insts.BytesToUint64(data)
} else if reg.RegType == insts.VCCLO && regCount == 2 {
wf.VCC = insts.BytesToUint64(data)
} else if reg.RegType == insts.VCCLO && regCount == 1 {
wf.VCC &= uint64(0x00000000ffffffff)
wf.VCC |= uint64(insts.BytesToUint32(data))
} else if reg.RegType == insts.VCCHI && regCount == 1 {
wf.VCC &= uint64(0xffffffff00000000)
wf.VCC |= uint64(insts.BytesToUint32(data)) << 32
} else if reg.RegType == insts.EXEC {
wf.Exec = insts.BytesToUint64(data)
} else if reg.RegType == insts.EXECLO && regCount == 2 {
wf.Exec = insts.BytesToUint64(data)
} else if reg.RegType == insts.M0 {
wf.M0 = insts.BytesToUint32(data)
} else {
log.Panicf("Register type %s not supported", reg.Name)
}
} | emu/wavefront.go | 0.595493 | 0.414543 | wavefront.go | starcoder |
package jps
type Node struct {
row int
col int
}
//GetCol returns column value for a node. in terms of x and y this is x
func (node *Node) GetCol() int {
return node.col
}
//GetRow returns row value for a node . in terms of x and y this is y
func (node *Node) GetRow() int {
return node.row
}
//GetNode returns node object for a row,columns value
func GetNode(row, col int) Node {
return Node{row: row, col: col}
}
func (node *Node) equals(otherNode *Node) bool {
return (node.row == otherNode.row) && (node.col == otherNode.col)
}
func blocked(row, col, dRow, dCol int, matrix [][]uint8) bool {
if row+dRow < 0 || row+dRow >= len(matrix) {
return true
}
if col+dCol < 0 || col+dCol >= len(matrix[0]) {
return true
}
if matrix[row+dRow][col+dCol] == 1 {
return true
}
return false
}
func dblock(row, col, dRow, dCol int, matrix [][]uint8) bool {
if matrix[row-dRow][col] == 1 && matrix[row-dRow][col-dCol] == 1 {
return true
}
return false
}
func direction(row, col, pRow, pCol int) (int, int) {
dRow := getSign(row - pRow)
dCol := getSign(col - pCol)
if row-pRow == 0 {
dRow = 0
}
if col-pCol == 0 {
dCol = 0
}
return dRow, dCol
}
func getSign(num int) int {
if num >= 0 {
return 1
}
return -1
}
func getAllUnblockedNeighbours(row, col int, matrix [][]uint8) []*Node {
neighbours := make([]*Node, 0)
for i := -1; i < 2; i++ {
for j := -1; j < 2; j++ {
if !(i == 0 && j == 0) {
if !blocked(row, col, i, j, matrix) {
neighbours = append(neighbours, &Node{row: row + i, col: col + j})
}
}
}
}
return neighbours
}
func nodeNeighbours(row, col int, parent *Node, matrix [][]uint8) []*Node {
if parent == nil {
return getAllUnblockedNeighbours(row, col, matrix) // returning all unblocked nodes for start node
}
neighbours := make([]*Node, 0)
dRow, dCol := direction(row, col, parent.row, parent.col)
if dRow != 0 && dCol != 0 {
if !blocked(row, col, 0, dCol, matrix) {
neighbours = append(neighbours, &Node{row, col + dCol})
}
if !blocked(row, col, dRow, 0, matrix) {
neighbours = append(neighbours, &Node{row + dRow, col})
}
if (!blocked(row, col, 0, dCol, matrix) || !blocked(row, col, dRow, 0, matrix)) &&
!blocked(row, col, dRow, dCol, matrix) {
neighbours = append(neighbours, &Node{row + dRow, col + dCol})
}
if blocked(row, col, -dRow, 0, matrix) {
neighbours = append(neighbours, &Node{row - dRow, col + dCol})
}
if blocked(row, col, 0, -dCol, matrix) {
neighbours = append(neighbours, &Node{row + dRow, col - dCol})
}
} else {
if dRow == 0 {
if !blocked(row, col, dRow, 0, matrix) {
if !blocked(row, col, 0, dCol, matrix) {
neighbours = append(neighbours, &Node{row, col + dCol})
}
if blocked(row, col, 1, 0, matrix) {
neighbours = append(neighbours, &Node{row + 1, col + dCol})
}
if blocked(row, col, -1, 0, matrix) {
neighbours = append(neighbours, &Node{row - 1, col + dCol})
}
}
} else {
if !blocked(row, col, dRow, 0, matrix) {
if !blocked(row, col, dRow, 0, matrix) {
neighbours = append(neighbours, &Node{row + dRow, col})
}
if blocked(row, col, 0, 1, matrix) {
neighbours = append(neighbours, &Node{row + dRow, col + 1})
}
if blocked(row, col, 0, -1, matrix) {
neighbours = append(neighbours, &Node{row + dRow, col - 1})
}
}
}
}
return neighbours
}
func jump(row, col, dRow, dCol int, matrix [][]uint8, goal *Node) *Node {
nRow := row + dRow
nCol := col + dCol
if blocked(nRow, nCol, 0, 0, matrix) {
return nil
}
if nRow == goal.row && nCol == goal.col {
return &Node{row: nRow, col: nCol}
}
if dRow != 0 && dCol != 0 {
// var1 := !blocked(nRow, nCol, -dRow, dCol, matrix)
// var2 := blocked(nRow, nCol, -dRow, 0, matrix)
// log.Printf("foudn it %s %s ", var1, var2)
if (!blocked(nRow, nCol, -dRow, dCol, matrix) && blocked(nRow, nCol, -dRow, 0, matrix)) ||
(!blocked(nRow, nCol, dRow, -dCol, matrix) && blocked(nRow, nCol, 0, -dCol, matrix)) {
return &Node{row: nRow, col: nCol}
}
if jump(nRow, nCol, dRow, 0, matrix, goal) != nil || jump(nRow, nCol, 0, dCol, matrix, goal) != nil {
return &Node{row: nRow, col: nCol}
}
if dblock(nRow, nCol, dRow, dCol, matrix) {
return nil
}
} else {
if dRow != 0 {
if (!blocked(nRow, nCol, dRow, 1, matrix) && blocked(nRow, nCol, 0, 1, matrix)) ||
(!blocked(nRow, nCol, dRow, -1, matrix) && blocked(nRow, nCol, 0, -1, matrix)) {
return &Node{row: nRow, col: nCol}
}
} else {
if (!blocked(nRow, nCol, 1, dCol, matrix) && blocked(nRow, nCol, 1, 0, matrix)) ||
(!blocked(nRow, nCol, -1, dCol, matrix) && blocked(nRow, nCol, -1, 0, matrix)) {
return &Node{row: nRow, col: nCol}
}
}
}
return jump(nRow, nCol, dRow, dCol, matrix, goal)
}
func identifySuccessors(current Node, parentNodeMap map[Node]Node, matrix [][]uint8, goal Node) []Node {
successors := make([]Node, 0)
var parent *Node
if p, exist := parentNodeMap[current]; exist {
parent = &p
}
neighbours := nodeNeighbours(current.row, current.col, parent, matrix)
for _, cell := range neighbours {
dRow := cell.row - current.row
dCol := cell.col - current.col
jumpPoint := jump(current.row, current.col, dRow, dCol, matrix, &goal)
if jumpPoint != nil {
successors = append(successors, *jumpPoint)
}
}
return successors
} | jumpPoint.go | 0.723505 | 0.603026 | jumpPoint.go | starcoder |
package require
import (
"reflect"
"strconv"
)
//Int converts the given number or string value to int.
//If conversion is not possible returns the given default value or 0 if no default value is specified.
func Int(num interface{}, defaultValue ...int) int {
def := 0
if len(defaultValue) > 0 {
def = defaultValue[0]
}
switch num.(type) {
case byte:
return int(num.(byte))
case int:
return num.(int)
case int32:
return int(num.(int32))
case int64:
return int(num.(int64))
case float32:
return int(num.(float32))
case float64:
return int(num.(float64))
case string:
n, err := strconv.Atoi(num.(string))
if err != nil {
return def
}
return n
default:
return def
}
}
//Int32 converts the given number or string value to int32.
//If conversion is not possible returns the given default value or 0 if no default value is specified.
func Int32(num interface{}, defaultValue ...int32) int32 {
var def int32
if len(defaultValue) > 0 {
def = defaultValue[0]
}
switch num.(type) {
case byte:
return int32(num.(byte))
case int:
return int32(num.(int))
case int32:
return num.(int32)
case int64:
return int32(num.(int64))
case float32:
return int32(num.(float32))
case float64:
return int32(num.(float64))
case string:
n, err := strconv.ParseInt(num.(string), 10, 32)
if err != nil {
return def
}
return int32(n)
default:
return def
}
}
//Int64 converts the given number or string value to int64.
//If conversion is not possible returns the given default value or 0 if no default value is specified.
func Int64(num interface{}, defaultValue ...int64) int64 {
var def int64
if len(defaultValue) > 0 {
def = defaultValue[0]
}
switch num.(type) {
case byte:
return int64(num.(byte))
case int:
return int64(num.(int))
case int32:
return int64(num.(int32))
case int64:
return num.(int64)
case float32:
return int64(num.(float32))
case float64:
return int64(num.(float64))
case string:
n, err := strconv.ParseInt(num.(string), 10, 64)
if err != nil {
return def
}
return n
default:
return def
}
}
//Float32 converts the given number or string value to float32.
//If conversion is not possible returns the given default value or 0 if no default value is specified.
func Float32(num interface{}, defaultValue ...float32) float32 {
var def float32
if len(defaultValue) > 0 {
def = defaultValue[0]
}
switch num.(type) {
case byte:
return float32(num.(byte))
case int:
return float32(num.(int))
case int32:
return float32(num.(int32))
case int64:
return float32(num.(int64))
case float32:
return num.(float32)
case float64:
return float32(num.(float64))
case string:
n, err := strconv.ParseFloat(num.(string), 10)
if err != nil {
return def
}
return float32(n)
default:
return def
}
}
//Float64 converts the given number or string value to float64.
//If conversion is not possible returns the given default value or 0 if no default value is specified.
func Float64(num interface{}, defaultValue ...float64) float64 {
var def float64
if len(defaultValue) > 0 {
def = defaultValue[0]
}
switch num.(type) {
case byte:
return float64(num.(byte))
case int:
return float64(num.(int))
case int32:
return float64(num.(int32))
case int64:
return float64(num.(int64))
case float32:
return float64(num.(float32))
case float64:
return num.(float64)
case string:
n, err := strconv.ParseFloat(num.(string), 10)
if err != nil {
return def
}
return n
default:
return def
}
}
//Byte converts the given number or string value to byte.
//If conversion is not possible returns the given default value or 0 if no default value is specified.
func Byte(num interface{}, defaultValue ...byte) byte {
var def byte
if len(defaultValue) > 0 {
def = defaultValue[0]
}
switch num.(type) {
case byte:
return num.(byte)
case int:
return byte(num.(int))
case int32:
return byte(num.(int32))
case int64:
return byte(num.(int64))
case float32:
return byte(num.(float32))
case float64:
return byte(num.(float64))
case string:
n, err := strconv.ParseUint(num.(string), 10, 8)
if err != nil {
return def
}
return byte(n)
default:
return def
}
}
//Number converts the given number value to the given number type.
//If conversion is not possible returns 0.
func Number(value interface{}, kind reflect.Kind) interface{} {
switch kind {
case reflect.Uint8:
return Byte(value)
case reflect.Int:
return Int(value)
case reflect.Int32:
return Int32(value)
case reflect.Int64:
return Int64(value)
case reflect.Float32:
return Float32(value)
case reflect.Float64:
return Float64(value)
default:
return 0
}
}
var SupportedNumberTypes = []reflect.Kind { reflect.Uint8, reflect.Int32, reflect.Int64, reflect.Int, reflect.Float32, reflect.Float64 }
//IsNumber checks if the given value's type is one of the supported number types: byte, int, int32, int64, float32, float64.
func IsNumber(i interface{}) bool {
t := reflect.TypeOf(i)
k := t.Kind()
for _, nt := range SupportedNumberTypes {
if k == nt {
return true
}
}
return false
} | common/require/requireNumber.go | 0.729616 | 0.496887 | requireNumber.go | starcoder |
package dst
// Gaussian ratio distribution.
import (
"math"
)
//GearyHinkleyTransformation transforms the ratio of two normally distributed variables to the transformed variable T would approximately have a standard Gaussian distribution. See Hinkley(1969).
func GearyHinkleyTransformation(z, μX, σX, μY, σY, ρ float64) float64 {
//A Geary–Hinkley transformation, under certain assumptions, returns the transformed variable T that would approximately have a standard Gaussian distribution. The approximation is good if Y is unlikely to assume negative values.
// X = N(μX, σ2X) and Y = N(μY, σ2Y)
// Z = X/Y
// http://en.wikipedia.org/wiki/Ratio_distribution#A_transformation_to_Gaussianity
σ2X := σX * σX
σ2Y := σY * σY
t1 := μY*z - μX
t2 := math.Sqrt(σ2Y*z*z - 2*ρ*σX*σY*z + σ2X)
return t1 / t2
}
func phi(x float64) float64 {
return ((1.0 / 2.0) * (1 + erf((x)/(sqrt2))))
}
// GaussianRatioNoCorrPDFAt returns the value of PDF of Gaussian Ratio distribution of uncorrelated variables.
func GaussianRatioNoCorrPDF(μX, σX, μY, σY float64) func(z float64) float64 {
return func(z float64) float64 {
σ2X := σX * σX
σ2Y := σY * σY
μ2X := μX * μX
μ2Y := μY * μY
a := math.Sqrt(z*z/σ2X + 1/σ2Y)
b := μX*z/σ2X + μY/σ2Y
c := math.Exp(b*b/(2*a*a) - (μ2X/σ2X+μ2Y/σ2Y)/2)
return b*c/((a*a*a)*math.Sqrt(2*π)*σX*σY)*(2*phi(b/a)-1) + math.Exp(-(μ2X/σ2X+μ2Y/σ2Y)/2)/(a*a*π*σX*σY)
}
}
// GaussianRatioNoCorrPDFAt returns the value of PDF of Gaussian Ratio distribution of uncorrelated variables, at x.
func GaussianRatioNoCorrPDFAt(μX, σX, μY, σY, x float64) float64 {
pdf := GaussianRatioNoCorrPDF(μX, σX, μY, σY)
return pdf(x)
}
// GaussianRatioPDF returns the value of PDF of Gaussian Ratio distribution of correlated variables, at x.
func GaussianRatioPDF(μX, σX, μY, σY, ρ float64) func(z float64) float64 {
return func(z float64) float64 {
α := ρ * σX / σY
β := (σX / σY) * math.Sqrt(1-ρ*ρ)
return β / (π*(z-α)*(z-α) + β*β)
}
}
// GaussianRatioPDFAt returns the value of PDF of Gaussian Ratio distribution of correlated variables, at x.
func GaussianRatioPDFAt(μX, σX, μY, σY, ρ, x float64) float64 {
pdf := GaussianRatioPDF(μX, σX, μY, σY, ρ)
return pdf(x)
}
// GaussianRatioApproxCDF returns the approximation of CDF of Gaussian Ratio distribution of correlated variables.
func GaussianRatioApproxCDF(μX, σX, μY, σY, ρ float64) func(z float64) float64 {
// Hinkley 1969:636, Eq. 5
return func(w float64) float64 {
σ2X := σX * σX
σ2Y := σY * σY
a := math.Sqrt(w*w*σ2X - 2*ρ*w/(σX*σY) + (1 / σ2Y))
// Hinkley 1969:636, Eq. 2
t1 := μY*w - μX
t2 := σX * σY * a
return phi(t1 / t2)
}
} | dst/ratio_gaussian.go | 0.906911 | 0.70791 | ratio_gaussian.go | starcoder |
package tarantula
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/hmac"
"crypto/rand"
"crypto/sha256"
"encoding/binary"
"errors"
"time"
)
/* Seal Structure:
|---- mac ----| |------|
|--------- aes-ctr -------|
[ iv ] [ exp ] [ mac ] [ data ]
0 16 24 56 56 + len(data)
|---------------- seal -----------|
- modifying the iv results in a mac mismatch
- modifying the ciphertext results in a mac mismatch
- identifying the mac secret requires finding the ciphertext key
- ctr is not considered vulnerable to known plaintext attack
*/
// Seal uses SHA256 in a HMAC configuration to authenticate data, and then encrypts it using AES-CTR with a random IV. This protects the
// sealed data from modification or analysis by the recipient. An expiry time is also sealed into the data structure, limiting the viability
// of the seal.
func Seal(auth, key, data []byte, exp time.Time) ([]byte, error) {
hash := hmac.New(sha256.New, auth)
seal := make([]byte, len(data)+56)
// Read our IV
_, err := rand.Read(seal[:16])
if err != nil {
return nil, err
}
// Apply our Stamp
binary.LittleEndian.PutUint64(seal[16:], uint64(exp.Unix()))
// Copy in our plaintext.
copy(seal[56:], data)
// HMAC everything before and after the HMAC field.
hash.Write(seal[0:24])
hash.Write(seal[56:])
copy(seal[24:56], hash.Sum(nil))
// Encrypt our HMAC and PLAINTEXT
b, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
cipher.NewCTR(b, seal[:16]).XORKeyStream(seal[16:], seal[16:])
return seal, nil
}
// Unseal reverses the operations in Seal to decrypt the enclosed data, verify its integrity and checks timeliness against time.Now(). The error
// result MUST BE CHECKED in all cases, as Unseal will always return what it believes to be the decrypted but not authenticated data.
func Unseal(auth, key, seal []byte) ([]byte, error) {
if len(seal) < 56 {
return seal, SEAL_UNDERFLOW
}
// We copy off the seal because we're about to manipulate it destructively.
seal = append(make([]byte, 0, len(seal)), seal...)
hash := hmac.New(sha256.New, auth)
// Decrypt our HMAC and PLAINTEXT
b, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
cipher.NewCTR(b, seal[:16]).XORKeyStream(seal[16:], seal[16:])
// Extract our Timestamp
expiry := binary.LittleEndian.Uint64(seal[16:24])
// Compare our HMAC
hash.Write(seal[0:24])
hash.Write(seal[56:])
if bytes.Compare(seal[24:56], hash.Sum(nil)) != 0 {
return nil, SEAL_MISMATCH
}
// Compare our expiry
if time.Now().After(time.Unix(int64(expiry), 0)) {
return nil, SEAL_EXPIRED
}
return seal[56:], nil
}
var SEAL_UNDERFLOW = errors.New("sealed data too small to be valid")
var SEAL_MISMATCH = errors.New("sealed hmac did not match")
var SEAL_EXPIRED = errors.New("sealed data has expired") | vendor/github.com/swdunlop/tarantula-go/seal.go | 0.709221 | 0.441553 | seal.go | starcoder |
package tuner
// tuner.go is a texel tuning implementation for Blunder.
import (
"blunder/engine"
"bufio"
"fmt"
"math"
"os"
"strings"
)
const (
DataFile = ""
NumCores = 4
NumWeights = 786
KPrecision = 10
Draw float64 = 0.5
WhiteWin float64 = 1.0
BlackWin float64 = 0.0
NumPositions float64 = 400000.0
K float64 = 1.65
)
// A struct object to hold data concering a position loaded from the training file.
// Each position consists of a position board object and the outcome of the game
// the position was from.
type Position struct {
Pos engine.Position
Outcome float64
}
// A global variable to hold the positions loaded from the training file.
var Positions = loadPositions()
// A global variable to hold the parallel computations of the MSE function.
var Answers = make(chan float64)
// A method to specifiy which weights should be ignored when tuning.
var IgnoreWeights = make([]bool, len(Weights))
func setIgnoredWeights(from, to int) {
for i := from; i < to; i++ {
IgnoreWeights[i] = true
}
}
// The weights to be adjusted during the tuning process.
var Weights []int16 = loadWeights()
// Load the weights for tuning from the current evaluation terms.
func loadWeights() (weights []int16) {
weights = make([]int16, NumWeights)
copy(weights[0:64], engine.PSQT_MG[engine.Pawn][:])
copy(weights[64:128], engine.PSQT_MG[engine.Knight][:])
copy(weights[128:192], engine.PSQT_MG[engine.Bishop][:])
copy(weights[192:256], engine.PSQT_MG[engine.Rook][:])
copy(weights[256:320], engine.PSQT_MG[engine.Queen][:])
copy(weights[320:384], engine.PSQT_MG[engine.King][:])
copy(weights[384:448], engine.PSQT_EG[engine.Pawn][:])
copy(weights[448:512], engine.PSQT_EG[engine.Knight][:])
copy(weights[512:576], engine.PSQT_EG[engine.Bishop][:])
copy(weights[576:640], engine.PSQT_EG[engine.Rook][:])
copy(weights[640:704], engine.PSQT_EG[engine.Queen][:])
copy(weights[704:768], engine.PSQT_EG[engine.King][:])
copy(weights[768:773], engine.PieceValueMG[:])
copy(weights[773:778], engine.PieceValueEG[:])
copy(weights[778:782], engine.PieceMobilityMG[:])
copy(weights[782:786], engine.PieceMobilityEG[:])
return weights
}
// Load the given number of positions from the training set file.
func loadPositions() (positions []Position) {
file, err := os.Open(DataFile)
if err != nil {
panic(err)
}
reader := bufio.NewReader(file)
scanner := bufio.NewScanner(reader)
for positionCount := 0; scanner.Scan() && positionCount < int(NumPositions); positionCount++ {
line := scanner.Text()
fields := strings.Fields(line)
fen := fields[0] + " " + fields[1] + " - - 0 1"
result := fields[5]
outcome := Draw
if result == "\"1-0\";" {
outcome = WhiteWin
} else if result == "\"0-1\";" {
outcome = BlackWin
}
var pos engine.Position
pos.LoadFEN(fen)
positions = append(positions, Position{Pos: pos, Outcome: outcome})
}
fmt.Printf("Done loading %d positions...\n", int(NumPositions))
return positions
}
func mapWeightsToParameters() {
copy(engine.PSQT_MG[engine.Pawn][:], Weights[0:64])
copy(engine.PSQT_MG[engine.Knight][:], Weights[64:128])
copy(engine.PSQT_MG[engine.Bishop][:], Weights[128:192])
copy(engine.PSQT_MG[engine.Rook][:], Weights[192:256])
copy(engine.PSQT_MG[engine.Queen][:], Weights[256:320])
copy(engine.PSQT_MG[engine.King][:], Weights[320:384])
copy(engine.PSQT_EG[engine.Pawn][:], Weights[384:448])
copy(engine.PSQT_EG[engine.Knight][:], Weights[448:512])
copy(engine.PSQT_EG[engine.Bishop][:], Weights[512:576])
copy(engine.PSQT_EG[engine.Rook][:], Weights[576:640])
copy(engine.PSQT_EG[engine.Queen][:], Weights[640:704])
copy(engine.PSQT_EG[engine.King][:], Weights[704:768])
copy(engine.PieceValueMG[:], Weights[768:773])
copy(engine.PieceValueEG[:], Weights[773:778])
copy(engine.PieceMobilityMG[:], Weights[778:782])
copy(engine.PieceMobilityEG[:], Weights[782:786])
}
// Evaluate the position from the training set file.
func evaluate(pos engine.Position) int16 {
score := engine.EvaluatePos(&pos)
// For texel tuning, we always score a position from white's perspective
if pos.SideToMove == engine.Black {
return -score
}
return score
}
func processor(start, end int, K float64) {
var errorSum float64
for i := start; i < end; i++ {
score := float64(evaluate(Positions[i].Pos))
sigmoid := 1 / (1 + math.Pow(10, -K*score/400))
errorSum += math.Pow(Positions[i].Outcome-sigmoid, 2)
}
Answers <- errorSum
}
// Calculate the mean square error given the current weights. Credit to
// <NAME> (author of Zahak) for this parallelized implementation.
func meanSquaredError(K float64) float64 {
mapWeightsToParameters()
var errorSum float64
batchSize := len(Positions) / NumCores
for i := 0; i < NumCores; i++ {
start := i * batchSize
end := (i + 1) * batchSize
if i == NumCores-1 {
end = len(Positions)
}
go processor(start, end, K)
}
for i := 0; i < NumCores; i++ {
ans := <-Answers
errorSum += ans
}
return errorSum / float64(len(Positions))
}
func findK() float64 {
start, end, step := float64(0), float64(10), float64(1)
err := float64(0)
curr := start
best := meanSquaredError(start)
for i := 0; i < KPrecision; i++ {
curr = start - step
for curr < end {
curr = curr + step
err = meanSquaredError(curr)
if err <= best {
best = err
start = curr
}
}
fmt.Printf("Best K of %f on iteration %d\n", start, i)
end = start + step
start = start - step
step = step / 10.0
}
return start
}
func tune() {
numParams := len(Weights)
bestError := meanSquaredError(K)
improved := true
for iteration := 1; improved; iteration++ {
improved = false
for weightIdx := 0; weightIdx < numParams; weightIdx++ {
if IgnoreWeights[weightIdx] {
continue
}
Weights[weightIdx] += 1
newError := meanSquaredError(K)
if newError < bestError {
bestError = newError
improved = true
} else {
Weights[weightIdx] -= 2
if weightIdx >= 768 && Weights[weightIdx] <= 0 {
Weights[weightIdx] += 1
continue
}
newError = meanSquaredError(K)
if newError < bestError {
bestError = newError
improved = true
} else {
Weights[weightIdx] += 1
}
}
}
fmt.Printf("Iteration %d complete...\n", iteration)
fmt.Printf("Best error: %.15f\n", bestError)
if iteration%10 == 0 {
printParameters()
}
}
fmt.Println("Done tuning!")
}
func prettyPrintPSQT(psqt [64]int16) {
fmt.Print("\n")
for sq := 0; sq < 64; sq++ {
if sq%8 == 0 {
fmt.Println()
}
fmt.Print(psqt[sq], ", ")
}
fmt.Print("\n")
}
func printParameters() {
prettyPrintPSQT(engine.PSQT_MG[engine.Pawn])
prettyPrintPSQT(engine.PSQT_MG[engine.Knight])
prettyPrintPSQT(engine.PSQT_MG[engine.Bishop])
prettyPrintPSQT(engine.PSQT_MG[engine.Rook])
prettyPrintPSQT(engine.PSQT_MG[engine.Queen])
prettyPrintPSQT(engine.PSQT_MG[engine.King])
prettyPrintPSQT(engine.PSQT_EG[engine.Pawn])
prettyPrintPSQT(engine.PSQT_EG[engine.Knight])
prettyPrintPSQT(engine.PSQT_EG[engine.Bishop])
prettyPrintPSQT(engine.PSQT_EG[engine.Rook])
prettyPrintPSQT(engine.PSQT_EG[engine.Queen])
prettyPrintPSQT(engine.PSQT_EG[engine.King])
fmt.Println(engine.PieceValueMG)
fmt.Println(engine.PieceValueEG)
fmt.Println(engine.PieceMobilityMG)
fmt.Println(engine.PieceMobilityEG)
}
func RunTuner(verbose bool) {
// K := findK()
// fmt.Println("Best K is:", K)
tune()
mapWeightsToParameters()
if verbose {
printParameters()
}
} | tuner/tuner.go | 0.720565 | 0.460107 | tuner.go | starcoder |
package c
import (
"image/color"
"math/big"
)
type ComplexBigFloat struct {
Real *big.Float
Imag *big.Float
}
func (lhs *ComplexBigFloat) Add(rhs *ComplexBigFloat) *ComplexBigFloat {
return &ComplexBigFloat{
Real: new(big.Float).Add(
lhs.Real,
rhs.Real,
),
Imag: new(big.Float).Add(
lhs.Imag,
rhs.Imag,
)}
}
func (lhs *ComplexBigFloat) Sub(rhs *ComplexBigFloat) *ComplexBigFloat {
return &ComplexBigFloat{
Real: new(big.Float).Sub(
lhs.Real,
rhs.Real,
),
Imag: new(big.Float).Sub(
lhs.Imag,
rhs.Imag,
)}
}
func (lhs *ComplexBigFloat) Mul(rhs *ComplexBigFloat) *ComplexBigFloat {
/**
(a + bi) + (c + di)
= (ac - bd) + i(bc + ad)
*/
a := lhs.Real
b := lhs.Imag
c := rhs.Real
d := rhs.Imag
return &ComplexBigFloat{
Real: new(big.Float).Sub(
new(big.Float).Mul(a, c),
new(big.Float).Mul(b, d),
),
Imag: new(big.Float).Add(
new(big.Float).Mul(b, c),
new(big.Float).Mul(a, d),
)}
}
func (lhs *ComplexBigFloat) Div(rhs *ComplexBigFloat) *ComplexBigFloat {
/**
(a + bi)/(c + di)
= (ac + bd) / c^2 + di^2 + i(bc-ad) / c^2 + di^2
*/
a := lhs.Real
b := lhs.Imag
c := rhs.Real
d := rhs.Imag
denominator := new(big.Float).Add(
new(big.Float).Mul(c, c),
new(big.Float).Mul(d, d),
)
fr := new(big.Float).Add(
new(big.Float).Mul(a, c),
new(big.Float).Mul(b, d),
)
fi := new(big.Float).Sub(
new(big.Float).Mul(b, c),
new(big.Float).Mul(a, d),
)
// 0または有限数でない場合は何もしない
if denominator.IsInf() || denominator.Cmp(new(big.Float)) == 0 {
return lhs
}
return &ComplexBigFloat{
Real: new(big.Float).Quo(fr, denominator),
Imag: new(big.Float).Quo(fi, denominator),
}
}
func (lhs ComplexBigFloat) Abs() *big.Float {
zero := new(big.Float)
if lhs.Real.IsInf() || lhs.Imag.IsInf() {
return new(big.Float).SetInf(true)
}
p, q := new(big.Float).Abs(lhs.Real), new(big.Float).Abs(lhs.Imag)
if p.Cmp(q) < 0 {
p, q = q, p
}
if p.Cmp(zero) == 0 {
return zero
}
q = new(big.Float).Quo(q, p)
return new(big.Float).
Mul(p, new(big.Float).
Sqrt(
new(big.Float).Add(
big.NewFloat(1),
new(big.Float).Mul(q, q),
),
),
)
}
func fBigfloat(x *ComplexBigFloat) *ComplexBigFloat {
// 元計算では実部だけ引いているので、実部1虚部0として再現
one := &ComplexBigFloat{
Real: big.NewFloat(1),
Imag: big.NewFloat(0),
}
return x.Mul(x).Mul(x).Mul(x).Sub(one)
}
func dfBigfloat(x *ComplexBigFloat) *ComplexBigFloat {
four := &ComplexBigFloat{
Real: big.NewFloat(4),
Imag: big.NewFloat(0),
}
z := four.Mul(x).Mul(x).Mul(x)
return z
}
func NewtonBigFloat(z *ComplexBigFloat) color.Color {
const iterations = 30
const contrast = 15
for n := uint8(0); n < iterations; n++ {
// ニュートン法のアルゴリズムは右記を参照. https://algorithm.joho.info/mathematics/newton-method-program/
// 漸化式 αn - f(αn) / df(αn)
z = z.Sub(fBigfloat(z).Div(dfBigfloat(z)))
// z^4 - 1 = 0のとりうる解は、z = ±1 または z = ±iなので
// それぞれの解とzの差が0近似値となったものを正解値とする
// 1
one := &ComplexBigFloat{Real: big.NewFloat(1), Imag: big.NewFloat(0)}
// -1
none := &ComplexBigFloat{Real: big.NewFloat(-1), Imag: big.NewFloat(0)}
// i
i := &ComplexBigFloat{Real: big.NewFloat(0), Imag: big.NewFloat(1)}
// -i
ni := &ComplexBigFloat{Real: big.NewFloat(0), Imag: big.NewFloat(-1)}
if one.Sub(z).Abs().Cmp(big.NewFloat(epsilon)) < 0 {
return color.RGBA{R: contrast * n, G: 0, B: 0, A: 255}
} else if none.Sub(z).Abs().Cmp(big.NewFloat(epsilon)) < 0 {
return color.RGBA{R: 0, G: 0, B: contrast * n, A: 255}
} else if i.Sub(z).Abs().Cmp(big.NewFloat(epsilon)) < 0 {
return color.RGBA{R: 0, G: contrast * n, B: 0, A: 255}
} else if ni.Sub(z).Abs().Cmp(big.NewFloat(epsilon)) < 0 {
return color.RGBA{R: 0, G: contrast * n, B: contrast * n, A: 255}
}
}
return color.Black
} | ch03/ex08/c/float.go | 0.610221 | 0.416915 | float.go | starcoder |
package main
import (
"container/heap"
"fmt"
"math"
)
// ComputeTNR Compute Transit Node Routing
func (graph *Graph) ComputeTNR(transitCnt int) {
if !graph.contracted {
fmt.Println("The graph has not contracted, run ComputeContractions first.")
return
}
if graph.TNRed {
fmt.Println("The graph has already calculated the TNR.")
return
}
if len(graph.vertices) < transitCnt {
fmt.Println("Too many transit nodes")
return
}
graph.SelectTransitNodes(transitCnt)
graph.ComputeDistanceTable(transitCnt)
graph.ComputeVoronoiRegion()
graph.ComputeLocalFilter()
graph.TNRed = true
}
// SelectTransitNodes Select the Transit Nodes by contraction orders
func (graph *Graph) SelectTransitNodes(transitCnt int) {
vertexCnt := len(graph.vertices)
for i := 0; i < vertexCnt; i++ {
//fmt.Printf("id: %d, contractionOrder: %d\n", graph.vertices[i].id, graph.vertices[i].contractionOrder)
if graph.vertices[i].contractionOrder >= vertexCnt-transitCnt {
graph.vertices[i].isTransitNode = true
graph.transitNodes = append(graph.transitNodes, graph.vertices[i])
// fmt.Printf("select transit %d\n", graph.vertices[i].id)
}
graph.vertices[i].transitPath = make(map[int64]*Vertex)
}
}
// ComputeDistanceTable Select the transit nodes and compute the Distance Table
func (graph *Graph) ComputeDistanceTable(transitCnt int) {
if graph.tnrDistance == nil {
graph.tnrDistance = make(map[int64]map[int64]float64)
}
for i := 0; i < transitCnt; i++ {
for j := 0; j < transitCnt; j++ {
if _, ok := graph.tnrDistance[graph.transitNodes[i].id]; !ok {
graph.tnrDistance[graph.transitNodes[i].id] = make(map[int64]float64)
}
if i == j {
graph.tnrDistance[graph.transitNodes[i].id][graph.transitNodes[j].id] = 0
continue
}
distance, path := graph.ShortestPathWithoutTNR(graph.transitNodes[i].name, graph.transitNodes[j].name)
graph.tnrDistance[graph.transitNodes[i].id][graph.transitNodes[j].id] = distance
for k := 0; k < len(path)-1; k++ {
if _, ok := graph.vertices[graph.mapping[path[k]]].transitPath[graph.transitNodes[j].id]; ok {
break
}
graph.vertices[graph.mapping[path[k]]].transitPath[graph.transitNodes[j].id] = graph.vertices[graph.mapping[path[k+1]]]
}
//fmt.Printf("between transit nodes %d and %d: %f, %v\n", graph.transitNodes[i].id, graph.transitNodes[j].id, distance, path)
}
}
}
// ComputeVoronoiRegion 123
func (graph *Graph) ComputeVoronoiRegion() {
for i := 0; i < len(graph.vertices); i++ {
graph.vertices[i].distance.distance = math.MaxFloat64
graph.vertices[i].voronoiRegionID = -1
}
distanceHeap := &distanceHeap{}
visited := make(map[int64]bool)
heap.Init(distanceHeap)
for i := 0; i < len(graph.transitNodes); i++ {
graph.transitNodes[i].distance.distance = 0
graph.transitNodes[i].voronoiRegionID = graph.transitNodes[i].id
heap.Push(distanceHeap, graph.transitNodes[i])
}
for distanceHeap.Len() != 0 {
vertex := heap.Pop(distanceHeap).(*Vertex)
if visited[vertex.id] {
continue
}
visited[vertex.id] = true
for i := 0; i < len(vertex.inwardEdges); i++ {
if !vertex.inwardEdges[i].isShortcut {
if vertex.distance.distance+vertex.inwardEdges[i].weight < vertex.inwardEdges[i].from.distance.distance {
vertex.inwardEdges[i].from.distance.distance = vertex.distance.distance + vertex.inwardEdges[i].weight
heap.Push(distanceHeap, vertex.inwardEdges[i].from)
vertex.inwardEdges[i].from.voronoiRegionID = vertex.voronoiRegionID
}
}
}
}
}
// ComputeLocalFilter Calculate the local filter (access nodes + sub-transit-node sets)
func (graph *Graph) ComputeLocalFilter() {
contractionMaxHeap := &contractionMaxHeap{}
heap.Init(contractionMaxHeap)
for v := 0; v < len(graph.vertices); v++ {
graph.vertices[v].forwardSearchSpace = nil
graph.vertices[v].forwardAccessNodeDistance = nil
graph.vertices[v].forwardTNRed = false
graph.vertices[v].backwardSearchSpace = nil
graph.vertices[v].backwardAccessNodeDistance = nil
graph.vertices[v].backwardTNRed = false
heap.Push(contractionMaxHeap, graph.vertices[v])
}
for contractionMaxHeap.Len() != 0 {
sourceVertex := heap.Pop(contractionMaxHeap).(*Vertex)
if !sourceVertex.forwardTNRed {
//fmt.Printf("forward TNR vertex %d\n", sourceVertex.id)
sourceVertex.forwardSearchSpace = make(map[int64]bool)
sourceVertex.forwardAccessNodeDistance = make(map[int64]float64)
// find access node
searchHeap := &forwardSearchHeap{}
heap.Init(searchHeap)
heap.Push(searchHeap, &QueryVertex{
id: sourceVertex.id,
forwardDistance: 0,
backwardDistance: 0,
isTransitNode: sourceVertex.isTransitNode,
})
distance := make([]float64, len(graph.vertices), len(graph.vertices))
for i := 0; i < len(graph.vertices); i++ {
distance[i] = math.MaxFloat64
}
distance[sourceVertex.id] = 0
for searchHeap.Len() != 0 {
queryVertex := heap.Pop(searchHeap).(*QueryVertex)
// relax
if !queryVertex.isTransitNode {
sourceVertex.forwardSearchSpace[graph.vertices[queryVertex.id].voronoiRegionID] = true
// check if visited this node
if graph.vertices[queryVertex.id].forwardTNRed {
//fmt.Printf("met forward-TNRed vertex %d\n", queryVertex.id)
for k := range graph.vertices[queryVertex.id].forwardSearchSpace {
sourceVertex.forwardSearchSpace[k] = true
}
for k := range graph.vertices[queryVertex.id].forwardAccessNodeDistance {
sourceVertex.forwardAccessNodeDistance[k] = -1
}
} else {
for i := 0; i < len(graph.vertices[queryVertex.id].outwardEdges); i++ {
outEdge := graph.vertices[queryVertex.id].outwardEdges[i]
if graph.vertices[queryVertex.id].contractionOrder < outEdge.to.contractionOrder {
if distance[outEdge.to.id] > distance[queryVertex.id]+outEdge.weight {
distance[outEdge.to.id] = distance[queryVertex.id] + outEdge.weight
heap.Push(searchHeap, &QueryVertex{
id: outEdge.to.id,
forwardDistance: distance[queryVertex.id] + outEdge.weight,
backwardDistance: 0,
isTransitNode: outEdge.to.isTransitNode,
})
}
}
}
}
} else {
sourceVertex.forwardAccessNodeDistance[queryVertex.id] = -1
}
}
for k := range sourceVertex.forwardAccessNodeDistance {
sourceVertex.forwardAccessNodeDistance[k], _ = graph.ShortestPathWithoutTNR(sourceVertex.name, graph.vertices[k].name)
}
// delete invalid access node
accessNodeMask := make(map[int64]bool)
for k1, d1 := range sourceVertex.forwardAccessNodeDistance {
for k2, d2 := range sourceVertex.forwardAccessNodeDistance {
if k1 == k2 {
continue
}
if d1+graph.tnrDistance[k1][k2] <= d2 {
accessNodeMask[k2] = true // mask j since it won't be the solution
}
}
}
for k := range accessNodeMask {
delete(sourceVertex.forwardAccessNodeDistance, k)
}
sourceVertex.forwardTNRed = true
}
if !sourceVertex.backwardTNRed {
//fmt.Printf("backward TNR vertex %d\n", sourceVertex.id)
sourceVertex.backwardSearchSpace = make(map[int64]bool)
sourceVertex.backwardAccessNodeDistance = make(map[int64]float64)
// find access node
searchHeap := &backwardSearchHeap{}
heap.Init(searchHeap)
heap.Push(searchHeap, &QueryVertex{
id: sourceVertex.id,
forwardDistance: 0,
backwardDistance: 0,
isTransitNode: sourceVertex.isTransitNode,
})
distance := make([]float64, len(graph.vertices), len(graph.vertices))
for i := 0; i < len(graph.vertices); i++ {
distance[i] = math.MaxFloat64
}
distance[sourceVertex.id] = 0
for searchHeap.Len() != 0 {
queryVertex := heap.Pop(searchHeap).(*QueryVertex)
// relax
if !queryVertex.isTransitNode {
sourceVertex.backwardSearchSpace[graph.vertices[queryVertex.id].voronoiRegionID] = true
// check if visited this node
if graph.vertices[queryVertex.id].backwardTNRed {
//fmt.Printf("met backward-TNRed vertex %d\n", queryVertex.id)
for k := range graph.vertices[queryVertex.id].backwardSearchSpace {
sourceVertex.backwardSearchSpace[k] = true
}
for k := range graph.vertices[queryVertex.id].backwardAccessNodeDistance {
sourceVertex.backwardAccessNodeDistance[k] = -1
}
} else {
for i := 0; i < len(graph.vertices[queryVertex.id].inwardEdges); i++ {
inEdge := graph.vertices[queryVertex.id].inwardEdges[i]
if graph.vertices[queryVertex.id].contractionOrder < inEdge.from.contractionOrder {
if distance[inEdge.from.id] > distance[queryVertex.id]+inEdge.weight {
distance[inEdge.from.id] = distance[queryVertex.id] + inEdge.weight
heap.Push(searchHeap, &QueryVertex{
id: inEdge.from.id,
forwardDistance: 0,
backwardDistance: distance[queryVertex.id] + inEdge.weight,
isTransitNode: inEdge.from.isTransitNode,
})
}
}
}
}
} else {
sourceVertex.backwardAccessNodeDistance[queryVertex.id] = -1
}
}
for k := range sourceVertex.backwardAccessNodeDistance {
sourceVertex.backwardAccessNodeDistance[k], _ = graph.ShortestPathWithoutTNR(graph.vertices[k].name, sourceVertex.name)
}
// delete invalid access node
accessNodeMask := make(map[int64]bool)
for k1, d1 := range sourceVertex.backwardAccessNodeDistance {
for k2, d2 := range sourceVertex.backwardAccessNodeDistance {
if k1 == k2 {
continue
}
if d1+graph.tnrDistance[k2][k1] <= d2 {
accessNodeMask[k2] = true // mask j since it won't be the solution
}
}
}
for k := range accessNodeMask {
delete(sourceVertex.backwardAccessNodeDistance, k)
}
}
sourceVertex.backwardTNRed = true
}
} | tnr.go | 0.591959 | 0.407687 | tnr.go | starcoder |
package heap
import "github.com/lxzan/dao"
func MinHeap[T dao.Comparable[T]](a, b T) dao.Ordering {
if a > b {
return dao.Greater
} else if a < b {
return dao.Less
} else {
return dao.Equal
}
}
func MaxHeap[T dao.Comparable[T]](a, b T) dao.Ordering {
return -1 * MinHeap(a, b)
}
func New[T any](cap int, cmp func(a, b T) dao.Ordering) *Heap[T] {
return &Heap[T]{
Data: make([]T, 0, cap),
Cmp: cmp,
}
}
func Init[T any](arr []T, cmp func(a, b T) dao.Ordering) *Heap[T] {
var h = &Heap[T]{
Data: arr,
Cmp: cmp,
}
var n = len(arr)
for i := 1; i < n; i++ {
h.Up(i)
}
return h
}
type Heap[T any] struct {
Data []T
Cmp func(a, b T) dao.Ordering
}
func (this Heap[T]) Len() int {
return len(this.Data)
}
func (this *Heap[T]) Swap(i, j int) {
this.Data[i], this.Data[j] = this.Data[j], this.Data[i]
}
func (this *Heap[T]) Push(eles ...T) {
for _, item := range eles {
this.Data = append(this.Data, item)
this.Up(this.Len() - 1)
}
}
func (this *Heap[T]) Up(i int) {
var j = (i - 1) / 2
if j >= 0 && this.Cmp(this.Data[i], this.Data[j]) == dao.Less {
this.Swap(i, j)
this.Up(j)
}
}
func (this *Heap[T]) Pop() T {
var n = this.Len()
var result = this.Data[0]
this.Data[0] = this.Data[n-1]
this.Data = this.Data[:n-1]
this.Down(0, n-1)
return result
}
func (this *Heap[T]) Down(i, n int) {
var j = 2*i + 1
if j < n && this.Cmp(this.Data[j], this.Data[i]) == dao.Less {
this.Swap(i, j)
this.Down(j, n)
}
var k = 2*i + 2
if k < n && this.Cmp(this.Data[k], this.Data[i]) == dao.Less {
this.Swap(i, k)
this.Down(k, n)
}
}
func (this *Heap[T]) Sort() []T {
var n = this.Len()
if n >= 2 {
for i := n - 1; i >= 2; i-- {
this.Swap(0, i)
this.Down(0, i)
}
this.Swap(0, 1)
}
return this.Data
}
func (this *Heap[T]) Find(target T) (result T, exist bool) {
var q = find_param[T]{
Length: this.Len(),
Target: target,
Result: result,
Exist: false,
}
this.do_find(0, &q)
return q.Result, q.Exist
}
type find_param[T any] struct {
Length int
Target T
Result T
Exist bool
}
func (this Heap[T]) do_find(i int, q *find_param[T]) {
if q.Exist {
return
}
if this.Cmp(this.Data[i], q.Target) == dao.Equal {
q.Result = this.Data[i]
q.Exist = true
return
}
var j = 2*i + 1
if j < q.Length && this.Cmp(this.Data[j], q.Target) != dao.Greater {
this.do_find(j, q)
}
var k = 2*i + 2
if k < q.Length && this.Cmp(this.Data[k], q.Target) != dao.Greater {
this.do_find(k, q)
}
} | heap/heap.go | 0.561215 | 0.413359 | heap.go | starcoder |
package board
import (
"fmt"
"math"
"math/rand"
"time"
"github.com/pkg/errors"
)
var r *rand.Rand
// Board is the representation of the game's
// playing field
type Board [][]string
type positions struct {
gx int
gy int
hx int
hy int
}
// New returns a new board to the user based on
// an input dimension number. The board size
// returned defaults to 4 for anything under that
// size.
func New(x, y int) Board {
x = defaultDim(x)
y = defaultDim(y)
b := make(Board, x)
for i := range b {
b[i] = make([]string, y)
for j := range b[i] {
b[i][j] = " "
}
}
return b
}
func defaultDim(i int) int {
if i < 4 {
return 4
}
return i
}
// Fill pseudorandomly fills the board with
// the desired number of gopher/hole pairs. It
// also allows for a seed to be specified for
// deterministic generation
func (b Board) Fill(d, s int) {
// empty difficulty
if d == 0 {
return
}
setRand(s)
dc := difficulty(len(b), len(b[0]), d)
// find spots for all the desired gopher/hole pairs
for i := 0; i < dc; i++ {
pos := b.findPositions()
if pos.gx == -1 {
continue
}
b[pos.gx][pos.gy] = "g"
b[pos.hx][pos.hy] = "o"
// b.Print()
}
}
// RemoveGophers removes the gophers from a board
func (b Board) RemoveGophers() {
for i, r := range b {
for j, c := range r {
if c == "g" {
b[i][j] = " "
}
}
}
}
// setRand sets the intended seed otherwise uses
// a time based seed
func setRand(s int) {
if s > 0 {
r = rand.New(rand.NewSource(int64(s)))
return
}
r = rand.New(rand.NewSource(time.Now().UnixNano()))
}
var diffLevels = map[int]float64{0: 0.0, 1: 0.3, 2: 0.5}
// difficulty determines how many gopher/hole
// pairs should be used to fill in the current
// board for the desired difficulty
// levels:
// 0 - empty
// 1 - medium - up to 30% of space
// 2 - hard - up to 50% of space
func difficulty(x, y, d int) int {
return int(math.Ceil((float64(x*y) * diffLevels[d]) / 2))
}
// findPositions looks for a valid position to
// place a gopher and then finds a valid place to
// place a hole based on that gopher if one
// exists
func (b Board) findPositions() positions {
gx, gy := b.gopherArea()
if gx == -1 {
return positions{-1, -1, -1, -1}
}
hx, hy := b.holeArea(gx, gy)
if hx == -1 {
return b.findPositions()
}
return positions{gx, gy, hx, hy}
}
func (b Board) gopherArea() (int, int) {
// pick a gopher position and then make sure
// it's empty AND its surrounding 8 squares
// don't already contain a gopher otherwise,
// look elsewhere.
// Not an optimal solution.
var x, y int
xl := len(b)
yl := len(b[0])
for i := 0; i < xl*yl; i++ {
x = r.Intn(xl)
y = r.Intn(yl)
if b[x][y] == " " && !b.surroundingGopher(x, y) {
return x, y
}
}
return -1, -1
}
// surroundingGopher checks for whether or not
// the 8 positions surrounding the passed in
// coordinates contain a gopher
func (b Board) surroundingGopher(x, y int) bool {
pos := [][]int{
// top row
[]int{-1, -1},
[]int{0, -1},
[]int{+1, -1},
// middle row
[]int{-1, 0},
[]int{+1, 0},
// bottom row
[]int{-1, +1},
[]int{0, +1},
[]int{+1, +1},
}
for _, p := range pos {
nx := p[0] + x
ny := p[1] + y
if b.withinBounds(nx, ny) && b[nx][ny] == "g" {
return true
}
}
return false
}
// Similar to gopherArea, except it checks for in
// bounds before checking for empty.
// Returns -1, -1 if no suitable space was found
func (b Board) holeArea(x, y int) (int, int) {
// determining hole position position is
// "random" while taking into account the edge
// of the board
// 0 = up
// 1 = right
// 2 = down
// 3 = left
di := shuffleDirections()
for _, i := range di {
pos := [][]int{
[]int{x, y + 1},
[]int{x + 1, y},
[]int{x, y - 1},
[]int{x - 1, y},
}
px := pos[i-1][0]
py := pos[i-1][1]
ok := b.canPlace(px, py)
if !ok {
continue
}
return px, py
}
return -1, -1
}
func shuffleDirections() []int {
d := []int{1, 2, 3, 4}
r.Shuffle(len(d), func(i, j int) {
d[i], d[j] = d[j], d[i]
})
return d
}
func (b Board) canPlace(x, y int) bool {
if inBounds := b.withinBounds(x, y); !inBounds {
return false
}
// check occupancy
if b[x][y] == " " {
return true
}
return false
}
func (b Board) withinBounds(x, y int) bool {
xl := len(b)
yl := len(b[0])
// check within bounds
if x < 0 || x >= xl || y < 0 || y >= yl {
return false
}
return true
}
func (b Board) Print() {
for _, r := range b {
fmt.Println(r)
}
fmt.Println("----------")
}
// WriteChar is the method used to add a
// character to a board
func (b Board) WriteChar(input string, x, y int) error {
if input == "h" {
return errors.New("holes cannot be placed")
}
ca, err := b.CharAt(x, y)
if err != nil {
return errors.Errorf("(%d, %d) is out of bounds", x, y)
} else if ca == "h" {
return errors.New("holes cannot be overwritten")
} else if input == ca {
// no need to bother overwriting the same character
return nil
}
b[x][y] = input
return nil
}
// CharAt returns the character at a particular
// position if position is valid
func (b Board) CharAt(x, y int) (string, error) {
if !b.withinBounds(x, y) {
return "", errors.Errorf("(%d, %d) is out of bounds", x, y)
}
return b[x][y], nil
} | board/board.go | 0.746971 | 0.406155 | board.go | starcoder |
package layout
import (
"fmt"
"sort"
"strings"
)
// LayeredGraph is graph with dummy nodes such that there is no long edges.
// Short edge is between nodes in Layers next to each other.
// Long edge is between nodes in 1+ Layers between each other.
// Segment is either a short edge or a long edge.
// Top layer has lowest layer number.
type LayeredGraph struct {
Segments map[[2]uint64]bool // segment is an edge in layered graph, can be real edge or piece of fake edge
Dummy map[uint64]bool // fake nodes
NodeYX map[uint64][2]int // node -> {layer, ordering in layer}
Edges map[[2]uint64][]uint64 // real long/short edge -> {real, fake, fake, fake, real} nodes
}
func (g LayeredGraph) Layers() [][]uint64 {
maxY := 0
for _, yx := range g.NodeYX {
if yx[0] > maxY {
maxY = yx[0]
}
}
layers := make([][]uint64, maxY+1)
for y := 0; y < len(layers); y++ {
// collect to layer
for node, yx := range g.NodeYX {
if yx[0] == y {
layers[y] = append(layers[y], node)
}
}
// sort within layer
sort.Slice(layers[y], func(i, j int) bool { return g.NodeYX[layers[y][i]][1] < g.NodeYX[layers[y][j]][1] })
}
return layers
}
func (g LayeredGraph) Validate() error {
for e := range g.Segments {
from := g.NodeYX[e[0]][0]
to := g.NodeYX[e[1]][0]
if from >= to {
return fmt.Errorf("edge(%v) is wrong direction, got from level(%d) to level(%d)", e, from, to)
}
}
return nil
}
func (g LayeredGraph) String() string {
out := ""
out += fmt.Sprintf("fake nodes: %v\n", g.Dummy)
segments := []string{}
for e := range g.Segments {
segments = append(segments, fmt.Sprintf("%d->%d", e[0], e[1]))
}
sort.Strings(segments)
out += fmt.Sprintf("segments: %s\n", strings.Join(segments, " "))
layers := g.Layers()
for l, nodes := range layers {
vs := ""
for _, node := range nodes {
vs += fmt.Sprintf(" %d", node)
}
out += fmt.Sprintf("%d: %s\n", l, vs)
}
return out
}
// IsInnerSegment tells when edge is between two Dummy nodes.
func (g LayeredGraph) IsInnerSegment(segment [2]uint64) bool {
return g.Dummy[segment[0]] && g.Dummy[segment[1]]
}
// UpperNeighbors are nodes in upper layer that are connected to given node.
func (g LayeredGraph) UpperNeighbors(node uint64) []uint64 {
var nodes []uint64
for e := range g.Segments {
if e[1] == node {
if g.NodeYX[e[0]][0] == (g.NodeYX[e[1]][0] - 1) {
nodes = append(nodes, e[0])
}
}
}
return nodes
}
// LowerNeighbors are nodes in lower layer that are connected to given node.
func (g LayeredGraph) LowerNeighbors(node uint64) []uint64 {
var nodes []uint64
for e := range g.Segments {
if e[0] == node {
if g.NodeYX[e[0]][0] == (g.NodeYX[e[1]][0] - 1) {
nodes = append(nodes, e[0])
}
}
}
return nodes
}
// newLayersFrom makes new layers with content identical to source.
func newLayersFrom(src [][]uint64) (dst [][]uint64) {
dst = make([][]uint64, len(src))
for i, l := range src {
dst[i] = make([]uint64, len(l))
copy(dst[i], l)
}
return dst
}
// copyLayers copies from src to destination
func copyLayers(dst, src [][]uint64) {
for i := range src {
copy(dst[i], src[i])
}
} | layout/layered_graph.go | 0.658198 | 0.590336 | layered_graph.go | starcoder |
package policyv1
import (
hash "hash"
)
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *Policy) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_Policy_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *Metadata) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_Metadata_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *ResourcePolicy) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_ResourcePolicy_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *ResourceRule) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_ResourceRule_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *PrincipalPolicy) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_PrincipalPolicy_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *PrincipalRule) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_PrincipalRule_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *PrincipalRule_Action) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_PrincipalRule_Action_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *DerivedRoles) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_DerivedRoles_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *RoleDef) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_RoleDef_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *Condition) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_Condition_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *Match) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_Match_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *Match_ExprList) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_Match_ExprList_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *Schemas) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_Schemas_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *Schemas_IgnoreWhen) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_Schemas_IgnoreWhen_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *Schemas_Schema) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_Schemas_Schema_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *TestSuite) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_TestSuite_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *TestTable) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_TestTable_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *TestTable_CheckInput) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_TestTable_CheckInput_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *TestTable_ExpectedItem) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_TestTable_ExpectedItem_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *Test) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_Test_hashpb_sum(m, hasher, ignore)
}
}
// HashPB computes a hash of the message using the given hash function
// The ignore set must contain fully-qualified field names (pkg.msg.field) that should be ignored from the hash
func (m *Test_TestName) HashPB(hasher hash.Hash, ignore map[string]struct{}) {
if m != nil {
cerbos_policy_v1_Test_TestName_hashpb_sum(m, hasher, ignore)
}
} | api/genpb/cerbos/policy/v1/policy_hashpb.pb.go | 0.817356 | 0.444746 | policy_hashpb.pb.go | starcoder |
package application
/*
TX RULES
1. The sum of value in vout MUST be equal to the sum of value in vin with
deposit discounts applied.
2. The txHash MUST be correct for the transaction
3. All signatures must be valid
4. No conflicting transaction hashes are allowed
5. No conflicting UTXO IDs are allowed
6. All referenced UTXOs must be valid and not be spent OR reference a
deposit from Ethereum.
9. DataStore deposit must GTE to the correct amount based on data size
and be mod zero datasize
10. If the transaction is being mined, a datastore issuedAt must be equal to
the current epoch
11. The datastore index MUST be 32 bytes
12. The chainID MUST be correct
13. All transactions MUST have more than one input and more than one output.
*/
/*
RULES:
THE SUM OF THE INPUTS MUST BE EQUAL TO THE SUM OF THE OUTPUTS.
ALL INPUTS MUST REFERENCE ANOTHER UTXO/DTXO
ALL OUTPUTS MUST NOT COLLIDE WITH ANY EXISTING OUTPUT IN HASH KEY
THE REWARD FOR CLEANING UP A DTXO IS ONE EPOCH, IT MAY BE CLAIMED
AT ANY TIME IN THE LAST EPOCH BY ANYONE
sync procedure - sync the latest snapshot trie
use the leaves of each subtree to ask for more
leaves
NOTE: this implies mined transactions must be indexed by leaf hashes
same with DTXO data
store transactions at prefixTx|hash(transaction)
store UTXO DTXO as prefixUTXO|hash(transaction)|hash(transaction|index)
store DTXO index as prefixUTXODataIndex|hash(transaction)|hash(transaction|index)
store pending tx as a blob with data included
use ref counting to prevent double inclusion
for pending transactions maintain a set of index keys stored as follows:
FOR DTXO containing tx:
Note at this time miner rewards are done through DTXO objects
as such all tx should contain a dtxo if they have a reward.
This logic will cause some valid tx to potentially be dropped across
epoch boundaries.
Time and epoch based sorting allows stale data to be dropped
back referencing allows data to be cleaned up when a tx is
consumed. Time references also allow txs to be consumed in a
FIFO manner of preference for initial deployment.
key:
prefixPendingTxIndex|epoch issued|ts received|hash(tx)
value:
Tx
key:
prefixPendingTxBackRef|hash(tx)
value:
prefixPendingTxIndex|epoch issued|ts received|hash(tx)
Mined transaction indexing:
key:
prefixMinedTxIndex|epoch mined|hash(tx)
value:
Tx
key:
prefixMinedTxBackRef|hash(tx)
value:
prefixMinedTxIndex|epoch issued|hash(tx)
UTXO indexing
key:
prefixUTXO|epoch expires|deposit value|hash(utxo)
*Note: deposit value is used here to do greedy garbage
collection. This allows the most space heavy objects to
be consumed in a preferential manner. Thus rational
actors will clean up the system in the most efficient
manner possible in order to maximize rewards.
value:
utxo/dtxo (note data not stored here)
key:
prefixUTXO|epoch expires|epoch mined|hash(utxo)
value:
prefixUTXO|epoch expires|epoch mined|hash(utxo)
key:
prefixUTXOTxRef|
must be indexed by hash
must be indexed by blocknumber and index
data must be indexed by expiration epoch
sync strategy:
0
/ \
1 8
/\ / \
2 5 9 10
/\ /\ /\ /\
3 4 6 7 11 12 13 14
*/ | application/doc.go | 0.519765 | 0.51312 | doc.go | starcoder |
package accessionnumbers
// type Defintion provides a struct containing accession number patterns and URIs for an organization.
type Definition struct {
// The name of the organization associated with this definition.
OrganizationName string `json:"organization_name"`
// The URL of the organization associated with this definition.
OrganizationURL string `json:"organization_url"`
// A valid URI template (RFC 6570) used to generate the URL for an object given its accession number.
ObjectURL string `json:"object_url,omitempty"`
// A valid URI template (RFC 6570) used to generate the IIIF manifest URL for an object given its accession number.
IIIFManifest string `json:"iiif_manifest,omitempty"`
// A valid URI template (RFC 6570) used to generate an OEmbed profile URL for an object given its accession number.
OEmbedProfile string `json:"oembed_profile,omitempty"`
// The set of patterns used to identify and extract accession numbers associated with an organization.
Patterns []*Pattern `json:"patterns"`
}
// type Pattern provides a struct containing patterns and tests for one or more accession numbers.
type Pattern struct {
// The name or label for a given pattern.
Label string `json:"label"`
// A valid regular expression string.
Pattern string `json:"pattern"`
// A dictionary containing zero or more tests for validating `Pattern`. Keys contain text to extract accession numbers from and values are the list of accession numbers expected to be found in the text, in the order that they are found.
Tests map[string][]string `json:"tests"`
}
// type Match provides a struct containing accession number details found in a body of text.
type Match struct {
// The accession number found in a body of text.
AccessionNumber string `json:"accession_number"`
// The URL of the organization that the accession number is associated with. Wherever possible this should match the `OrganizationURL` property in a `Defintion` struct.
OrganizationURL string `json:"organization,omitempty"`
} | cmd/vendor/github.com/sfomuseum/go-accession-numbers/accessionnumbers.go | 0.628749 | 0.40592 | accessionnumbers.go | starcoder |
package summarizer
import (
"context"
"regexp"
"github.com/GoogleCloudPlatform/testgrid/internal/result"
"github.com/GoogleCloudPlatform/testgrid/pb/state"
summarypb "github.com/GoogleCloudPlatform/testgrid/pb/summary"
"github.com/GoogleCloudPlatform/testgrid/pkg/summarizer/common"
)
const (
minRuns = 0
)
var (
infraRegex = regexp.MustCompile(`^\w+$`)
)
type flakinessAnalyzer interface {
GetFlakiness(gridMetrics []*common.GridMetrics, minRuns int, startDate int, endDate int, tab string) *summarypb.HealthinessInfo
}
// CalculateHealthiness extracts the test run data from each row (which represents a test)
// of the Grid and then analyzes it with an implementation of flakinessAnalyzer, which has
// implementations in the subdir naive and can be injected as needed.
func CalculateHealthiness(grid *state.Grid, analyzer flakinessAnalyzer, startTime int, endTime int, tab string) *summarypb.HealthinessInfo {
gridMetrics := parseGrid(grid, startTime, endTime)
return analyzer.GetFlakiness(gridMetrics, minRuns, startTime, endTime, tab)
}
func parseGrid(grid *state.Grid, startTime int, endTime int) []*common.GridMetrics {
// Get the relevant data for flakiness from each Grid (which represents
// a dashboard tab) as a list of GridMetrics structs
// TODO (itsazhuhere@): consider refactoring/using summary.go's gridMetrics function
// as it does very similar data collection.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// We create maps because result.Map returns a map where we can access each result
// through the test name, and at each instance we can increment our types.Result
// using the same key. At the end we can filter out those types.Result that had
// 0 of all counts.
gridMetricsMap := make(map[string]*common.GridMetrics, 0)
gridRows := make(map[string]*state.Row)
for i, row := range grid.Rows {
gridRows[row.Name] = grid.Rows[i]
}
// result.Map is written in a way that assumes each test/row name is unique
rowResults := result.Map(ctx, grid.Rows)
for i := 0; i < len(grid.Columns); i++ {
if !isWithinTimeFrame(grid.Columns[i], startTime, endTime) {
continue
}
for key, ch := range rowResults {
if _, ok := gridMetricsMap[key]; !ok {
gridMetricsMap[key] = common.NewGridMetrics(key)
}
switch result.Coalesce(<-ch, result.IgnoreRunning) {
case state.Row_NO_RESULT:
continue
case state.Row_FAIL:
categorizeFailure(gridMetricsMap[key], gridRows[key].Messages[i])
case state.Row_PASS:
gridMetricsMap[key].Passed++
case state.Row_FLAKY:
getValueOfFlakyMetric(gridMetricsMap[key])
}
}
}
gridMetrics := make([]*common.GridMetrics, 0)
for _, metric := range gridMetricsMap {
if metric.Failed > 0 || metric.Passed > 0 || metric.FlakyCount > 0 {
gridMetrics = append(gridMetrics, metric)
}
}
return gridMetrics
}
func categorizeFailure(resultCounts *common.GridMetrics, message string) {
if message == "" || !infraRegex.MatchString(message) {
resultCounts.Failed++
return
}
resultCounts.FailedInfraCount++
resultCounts.InfraFailures[message] = resultCounts.InfraFailures[message] + 1
}
func getValueOfFlakyMetric(gridMetrics *common.GridMetrics) {
// TODO (itszhuhere@): add a way to get exact flakiness from a Row_FLAKY cell
// For now we will leave it as 50%, because:
// a) gridMetrics.flakiness and .flakyCount are currently not used by anything
// and
// b) there's no easy way to get the exact flakiness measurement from prow or whatever else
// and potentially
// c) GKE does not currently enable retry on flakes so it isn't as important right now
// Keep in mind that flakiness is measured as out of 100, i.e. 23 not .23
flakiness := 50.0
gridMetrics.FlakyCount++
// Formula for adding one new value to mean is mean + (newValue - mean) / newCount
gridMetrics.AverageFlakiness += (flakiness - gridMetrics.AverageFlakiness) / float64(gridMetrics.FlakyCount)
}
func isWithinTimeFrame(column *state.Column, startTime, endTime int) bool {
return column.Started >= float64(startTime) && column.Started <= float64(endTime)
} | pkg/summarizer/flakiness.go | 0.664867 | 0.472379 | flakiness.go | starcoder |
Karplus Strong Oscillator Module
KS generally has a delay line buffer size that determines the fundamental frequency
of the sound. That has some practical problems. The delay line buffer is too
large for low frequencies and it makes it hard to provide fine resolution
control over the frequency. This implementation uses a fixed buffer size and
steps through it with a 32 bit phase value. The step size determines the
frequency of the sound. When the step position falls between samples we do
linear interpolation to get the output value. When we move beyond a sample
we do the low pass filtering on it (in this case simple averaging).
*/
//-----------------------------------------------------------------------------
package osc
import (
"github.com/deadsy/babi/core"
"github.com/deadsy/babi/utils/log"
)
//-----------------------------------------------------------------------------
var ksOscInfo = core.ModuleInfo{
Name: "ksOsc",
In: []core.PortInfo{
{"gate", "oscillator gate, attack(>0) or mute(=0)", core.PortTypeFloat, ksPortGate},
{"frequency", "frequency (Hz)", core.PortTypeFloat, ksPortFrequency},
{"attenuation", "attenuation (0..1)", core.PortTypeFloat, ksPortAttenuation},
},
Out: []core.PortInfo{
{"out", "output", core.PortTypeAudio, nil},
},
}
// Info returns the module information.
func (m *ksOsc) Info() *core.ModuleInfo {
return &m.info
}
//-----------------------------------------------------------------------------
const ksDelayBits = 5
const ksDelaySize = 1 << ksDelayBits
const ksDelayMask = ksDelaySize - 1
const ksFracBits = 32 - ksDelayBits
const ksFracMask = (1 << ksFracBits) - 1
const ksFracScale = 1 / (1 << ksFracBits)
type ksOsc struct {
info core.ModuleInfo // module info
rand *core.Rand32
delay [ksDelaySize]float32 // delay line
k float32 // attenuation and averaging constant 0 to 0.5
freq float32 // base frequency
x uint32 // phase position
xstep uint32 // phase step per sample
}
// NewKarplusStrong returns a Karplus Strong oscillator module.
func NewKarplusStrong(s *core.Synth) core.Module {
log.Info.Printf("new osc")
m := &ksOsc{
info: ksOscInfo,
rand: core.NewRand32(0),
}
return s.Register(m)
}
// Return the child modules.
func (m *ksOsc) Child() []core.Module {
return nil
}
// Stop performs any cleanup of a module.
func (m *ksOsc) Stop() {
}
//-----------------------------------------------------------------------------
// Port Events
func ksPortGate(cm core.Module, e *core.Event) {
m := cm.(*ksOsc)
gate := e.GetEventFloat().Val
log.Info.Printf("gate %f", gate)
if gate > 0 {
// Initialise the delay buffer with random samples between -1 and 1.
// The values should sum to zero so that multiple rounds of filtering
// will make all values fall to zero.
var sum float32
for i := 0; i < ksDelaySize-1; i++ {
val := m.rand.Float32()
x := sum + val
if x > 1 || x < -1 {
val = -val
}
sum += val
m.delay[i] = val
}
m.delay[ksDelaySize-1] = -sum
} else {
for i := 0; i < ksDelaySize; i++ {
m.delay[i] = 0
}
}
}
func ksPortAttenuation(cm core.Module, e *core.Event) {
m := cm.(*ksOsc)
attenuation := core.Clamp(e.GetEventFloat().Val, 0, 1)
log.Info.Printf("set attenuation %f", attenuation)
m.k = 0.5 * attenuation
}
func ksPortFrequency(cm core.Module, e *core.Event) {
m := cm.(*ksOsc)
frequency := core.ClampLo(e.GetEventFloat().Val, 0)
log.Info.Printf("set frequency %f Hz", frequency)
m.freq = frequency
m.xstep = uint32(frequency * core.FrequencyScale)
}
//-----------------------------------------------------------------------------
// Process runs the module DSP.
func (m *ksOsc) Process(buf ...*core.Buf) bool {
out := buf[0]
for i := 0; i < len(out); i++ {
x0 := m.x >> ksFracBits
x1 := (x0 + 1) & ksDelayMask
y0 := m.delay[x0]
y1 := m.delay[x1]
// interpolate
out[i] = y0 + (y1-y0)*ksFracScale*float32(m.x&ksFracMask)
// step the x position
m.x += m.xstep
// filter - once we have moved beyond the delay line index we
// will average it's amplitude with the next value.
if x0 != (m.x >> ksFracBits) {
m.delay[x0] = m.k * (y0 + y1)
}
}
return true
}
//----------------------------------------------------------------------------- | module/osc/ks.go | 0.822225 | 0.659515 | ks.go | starcoder |
package assert
import (
http "net/http"
url "net/url"
time "time"
)
func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool {
return Condition(a.t, comp, msgAndArgs...)
}
func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool {
return Conditionf(a.t, comp, msg, args...)
}
func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
return Contains(a.t, s, contains, msgAndArgs...)
}
func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
return Containsf(a.t, s, contains, msg, args...)
}
func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
return Empty(a.t, object, msgAndArgs...)
}
func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool {
return Emptyf(a.t, object, msg, args...)
}
func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
return Equal(a.t, expected, actual, msgAndArgs...)
}
func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool {
return EqualError(a.t, theError, errString, msgAndArgs...)
}
func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool {
return EqualErrorf(a.t, theError, errString, msg, args...)
}
func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
return EqualValues(a.t, expected, actual, msgAndArgs...)
}
func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
return EqualValuesf(a.t, expected, actual, msg, args...)
}
func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
return Equalf(a.t, expected, actual, msg, args...)
}
func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
return Error(a.t, err, msgAndArgs...)
}
func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool {
return Errorf(a.t, err, msg, args...)
}
func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
return Exactly(a.t, expected, actual, msgAndArgs...)
}
func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
return Exactlyf(a.t, expected, actual, msg, args...)
}
func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool {
return Fail(a.t, failureMessage, msgAndArgs...)
}
func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool {
return FailNow(a.t, failureMessage, msgAndArgs...)
}
func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool {
return FailNowf(a.t, failureMessage, msg, args...)
}
func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool {
return Failf(a.t, failureMessage, msg, args...)
}
func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool {
return False(a.t, value, msgAndArgs...)
}
func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool {
return Falsef(a.t, value, msg, args...)
}
func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool {
return HTTPBodyContains(a.t, handler, method, url, values, str)
}
func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool {
return HTTPBodyContainsf(a.t, handler, method, url, values, str)
}
func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool {
return HTTPBodyNotContains(a.t, handler, method, url, values, str)
}
func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool {
return HTTPBodyNotContainsf(a.t, handler, method, url, values, str)
}
func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) bool {
return HTTPError(a.t, handler, method, url, values)
}
func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values) bool {
return HTTPErrorf(a.t, handler, method, url, values)
}
func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) bool {
return HTTPRedirect(a.t, handler, method, url, values)
}
func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values) bool {
return HTTPRedirectf(a.t, handler, method, url, values)
}
func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) bool {
return HTTPSuccess(a.t, handler, method, url, values)
}
func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values) bool {
return HTTPSuccessf(a.t, handler, method, url, values)
}
func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
return Implements(a.t, interfaceObject, object, msgAndArgs...)
}
func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
return Implementsf(a.t, interfaceObject, object, msg, args...)
}
func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
return InDelta(a.t, expected, actual, delta, msgAndArgs...)
}
func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
}
func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
return InDeltaSlicef(a.t, expected, actual, delta, msg, args...)
}
func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
return InDeltaf(a.t, expected, actual, delta, msg, args...)
}
func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
}
func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...)
}
func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...)
}
func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
return InEpsilonf(a.t, expected, actual, epsilon, msg, args...)
}
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
return IsType(a.t, expectedType, object, msgAndArgs...)
}
func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
return IsTypef(a.t, expectedType, object, msg, args...)
}
func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool {
return JSONEq(a.t, expected, actual, msgAndArgs...)
}
func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool {
return JSONEqf(a.t, expected, actual, msg, args...)
}
func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool {
return Len(a.t, object, length, msgAndArgs...)
}
func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool {
return Lenf(a.t, object, length, msg, args...)
}
func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool {
return Nil(a.t, object, msgAndArgs...)
}
func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool {
return Nilf(a.t, object, msg, args...)
}
func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool {
return NoError(a.t, err, msgAndArgs...)
}
func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool {
return NoErrorf(a.t, err, msg, args...)
}
func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
return NotContains(a.t, s, contains, msgAndArgs...)
}
func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
return NotContainsf(a.t, s, contains, msg, args...)
}
func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool {
return NotEmpty(a.t, object, msgAndArgs...)
}
func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool {
return NotEmptyf(a.t, object, msg, args...)
}
func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
return NotEqual(a.t, expected, actual, msgAndArgs...)
}
func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
return NotEqualf(a.t, expected, actual, msg, args...)
}
func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool {
return NotNil(a.t, object, msgAndArgs...)
}
func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool {
return NotNilf(a.t, object, msg, args...)
}
func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
return NotPanics(a.t, f, msgAndArgs...)
}
func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
return NotPanicsf(a.t, f, msg, args...)
}
func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
return NotRegexp(a.t, rx, str, msgAndArgs...)
}
func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
return NotRegexpf(a.t, rx, str, msg, args...)
}
func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
return NotSubset(a.t, list, subset, msgAndArgs...)
}
func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
return NotSubsetf(a.t, list, subset, msg, args...)
}
func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool {
return NotZero(a.t, i, msgAndArgs...)
}
func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool {
return NotZerof(a.t, i, msg, args...)
}
func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
return Panics(a.t, f, msgAndArgs...)
}
func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
return PanicsWithValue(a.t, expected, f, msgAndArgs...)
}
func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
return PanicsWithValuef(a.t, expected, f, msg, args...)
}
func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
return Panicsf(a.t, f, msg, args...)
}
func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
return Regexp(a.t, rx, str, msgAndArgs...)
}
func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
return Regexpf(a.t, rx, str, msg, args...)
}
func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
return Subset(a.t, list, subset, msgAndArgs...)
}
func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
return Subsetf(a.t, list, subset, msg, args...)
}
func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool {
return True(a.t, value, msgAndArgs...)
}
func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool {
return Truef(a.t, value, msg, args...)
}
func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
return WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
}
func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
return WithinDurationf(a.t, expected, actual, delta, msg, args...)
}
func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool {
return Zero(a.t, i, msgAndArgs...)
}
func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool {
return Zerof(a.t, i, msg, args...)
} | assertion_forward.go | 0.785555 | 0.400486 | assertion_forward.go | starcoder |
package httptest
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"testing"
"github.com/sasalatart/batcoms/domain/battles"
"github.com/sasalatart/batcoms/domain/commanders"
"github.com/sasalatart/batcoms/domain/factions"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// AssertFailedGET asserts that issuing a GET request to the specified route renders the given
// status and error message
func AssertFailedGET(t *testing.T, route string, expectedStatus int, expectedMessage string) {
t.Helper()
res, err := http.Get(route)
require.NoErrorf(t, err, "Requesting %s", route)
defer res.Body.Close()
assert.Equal(t, expectedStatus, res.StatusCode, "Comparing status with expected value")
AssertErrorMessage(t, res, expectedMessage)
}
// AssertErrorMessage asserts that the given *http.Response contains the specified error message
func AssertErrorMessage(t *testing.T, res *http.Response, expectedMessage string) {
t.Helper()
contents, err := ioutil.ReadAll(res.Body)
require.NoError(t, err, "Reading from response body")
assert.Equal(t, expectedMessage, string(contents), "Comparing body with expected error message")
}
// AssertJSONFaction asserts that the given *http.Response contains the specified JSON-serialized
// factions.Faction
func AssertJSONFaction(t *testing.T, res *http.Response, expectedFaction factions.Faction) {
t.Helper()
factionFromBody := new(factions.Faction)
err := json.NewDecoder(res.Body).Decode(factionFromBody)
require.NoError(t, err, "Decoding body into faction struct")
assert.Equal(t, expectedFaction, *factionFromBody, "Comparing body with expected faction")
}
// AssertJSONFactions is like AssertJSONFaction, but for a slice of factions.Faction
func AssertJSONFactions(t *testing.T, res *http.Response, expectedFactions []factions.Faction) {
t.Helper()
factionsFromBody := new([]factions.Faction)
err := json.NewDecoder(res.Body).Decode(factionsFromBody)
require.NoError(t, err, "Decoding body into factions slice")
assert.Equal(t, expectedFactions, *factionsFromBody, "Comparing body with expected factions")
}
// AssertJSONCommander asserts that the given *http.Response contains the specified JSON-serialized
// commanders.Commander
func AssertJSONCommander(t *testing.T, res *http.Response, expectedCommander commanders.Commander) {
t.Helper()
commanderFromBody := new(commanders.Commander)
err := json.NewDecoder(res.Body).Decode(commanderFromBody)
require.NoError(t, err, "Decoding body into commander struct")
assert.Equal(t, expectedCommander, *commanderFromBody, "Comparing body with expected commander")
}
// AssertJSONCommanders is like AssertJSONCommander, but for a slice of commanders.Commander
func AssertJSONCommanders(t *testing.T, res *http.Response, expectedCommanders []commanders.Commander) {
t.Helper()
commandersFromBody := new([]commanders.Commander)
err := json.NewDecoder(res.Body).Decode(commandersFromBody)
require.NoError(t, err, "Decoding body into commanders slice")
assert.Equal(t, expectedCommanders, *commandersFromBody, "Comparing body with expected commanders")
}
// AssertJSONBattle asserts that the given *http.Response contains the specified JSON-serialized
// battles.Battle
func AssertJSONBattle(t *testing.T, res *http.Response, expectedBattle battles.Battle) {
t.Helper()
battleFromBody := new(battles.Battle)
err := json.NewDecoder(res.Body).Decode(battleFromBody)
require.NoError(t, err, "Decoding body into battle struct")
assert.Equal(t, expectedBattle, *battleFromBody, "Comparing body with expected battle")
}
// AssertJSONBattles is like AssertJSONBattle, but for a slice of battles.Battle
func AssertJSONBattles(t *testing.T, res *http.Response, expectedBattles []battles.Battle) {
t.Helper()
battlesFromBody := new([]battles.Battle)
err := json.NewDecoder(res.Body).Decode(battlesFromBody)
require.NoError(t, err, "Decoding body into battles slice")
assert.Equal(t, expectedBattles, *battlesFromBody, "Comparing body with expected battles")
}
// AssertHeaderPages asserts that the given *http.Response has the expected "x-pages" header value
func AssertHeaderPages(t *testing.T, res *http.Response, expectedPages int) {
t.Helper()
expected := fmt.Sprint(expectedPages)
got := res.Header.Get("x-pages")
assert.Equal(t, expected, got, "Comparing with the expected 'x-pages' header")
} | http/httptest/httptest.go | 0.653127 | 0.499146 | httptest.go | starcoder |
// Author: <EMAIL>
package main
import (
"math"
"math/rand"
"time"
"github.com/go-daq/crc8"
)
// DefaultSampleRate is the sample rate of our simulation
const DefaultSampleRate = 150.0 * 1000.0
// IOTBaudRate is the Baud rate of our protocol
const IOTBaudRate = 1.785 * 1000.0
// CarrierFreq is the carrier frequency of the Infrared signal
const CarrierFreq = 38.0 * 1000.0
// IRRSignalDetectorFc is the low pass filter Fc for signal strength detector in IR Receiver
const IRRSignalDetectorFc = 15.0 * 1000.0
// IRRBandpassFc is the Fc for the bandpass filter in IR Receiver to filter out everything but the IR carrier signal
const IRRBandpassFc = CarrierFreq
// IRRRecoveryFc is the Fc of the output low pass filter in IR Receiver
const IRRRecoveryFc = 8.0 * 1000.0
// Crc8Poly is the polnomial for CRC8 calculation. We are using CRC-8-CCITT, x^8+x^2+x^1+1.
const Crc8Poly = 0x07
// CommandPing is the command byte for Ping command
const CommandPing = 0x01
// CommandGetTemp is the command byte for Get Temperature command
const CommandGetTemp = 0x10
// CommandGetHumidity is the command byte for Get Humidity command
const CommandGetHumidity = 0x11
// CommandGetCO2 is the command byte for Get CO2 reading command
const CommandGetCO2 = 0x12
// CommandGetSmokeDetector is the command byte for Get Smoke Detector status command
const CommandGetSmokeDetector = 0x13
// CommandSetTime is the command byte for Set Time command
const CommandSetTime = 0x20
// CommandSystemVersion is the command byte for Get System Version command
const CommandSystemVersion = 0x30
// CommandReply is the flag for indicating that the command byte is the reply not the request
const CommandReply = 0x80
// If this bit is set, then it's replying
func min(a int, b int) int {
if a > b {
return b
}
return a
}
func max(a int, b int) int {
if a > b {
return a
}
return b
}
// SchmidtTrigger is a Schmidt Trigger module
type SchmidtTrigger struct {
tHi float64
tLo float64
// tHi -> rising edge threshold (Low to high)
state int
// 0 for lo, 1 for hi
}
// NewSchmidtTrigger will create a new Schmidt Trigger module, specified by tLo (high to low transition level) and tHi (low to high transition level)
func NewSchmidtTrigger(tLo float64, tHi float64) *SchmidtTrigger {
return &SchmidtTrigger{
tLo: tLo,
tHi: tHi,
state: 0,
}
}
func (x *SchmidtTrigger) step(inV float64) int {
if x.state == 0 && x.tHi < inV {
x.state = 1
} else if x.state == 1 && x.tLo > inV {
x.state = 0
}
if x.state == 1 {
return 1
}
return 0
}
// IRTransmitter is an IR Transmitter module
type IRTransmitter struct {
txPhase float64
// Phase of the 38kHz Carrier, between 0 and 1
sim *Simulator
// The parent simulator
}
// NewIRTransmitter will create a new IR transmitter
func NewIRTransmitter(s *Simulator) *IRTransmitter {
return &IRTransmitter{
txPhase: s.rand.Float64(),
sim: s,
}
}
func (t *IRTransmitter) step(inV float64) float64 {
// Generate the 38kHz carrier
hubCar := 0.0
hubCarTp := t.sim.currentTime * CarrierFreq // Total phase
_, hubCarPhaseFrac := math.Modf(hubCarTp)
if hubCarPhaseFrac < 0.5 {
hubCar = 1.0
}
return hubCar * inV
}
// IRReceiver is an IR Receiver module
type IRReceiver struct {
sim *Simulator
// The parent simulator
dt float64
// Inverse sample rate
sdAlpha float64
// Signal detector alpha, used in the low pass filter
sdYi float64
// Previous/Current output of signal strength detector
bpf1x1 float64
bpf1x2 float64
bpf1y1 float64
bpf1y2 float64
bpf2x1 float64
bpf2x2 float64
bpf2y1 float64
bpf2y2 float64
// Bandpass filter x[n-1]... y[n-2]
// There are 2 bandpass filters
bpfQ float64
bpfA0 float64
bpfA1 float64
bpfA2 float64
bpfB1 float64
bpfB2 float64
// Bandpass filter coefficients
rlpAlpha float64
// Recovery Low Pass alpha
rlpYi float64
// Previous/Current output of recovery
outIO *SchmidtTrigger
// Output I/O
}
// NewIRReceiver will create a new IR Receiver module
func NewIRReceiver(s *Simulator) *IRReceiver {
x := IRReceiver{
sim: s,
outIO: NewSchmidtTrigger(1.0, 2.0),
}
// Initialize the low pass filters
x.dt = 1.0 / (s.sampleRate)
x.sdAlpha = (2.0 * math.Pi * x.dt * IRRSignalDetectorFc) / (2.0*math.Pi*x.dt*IRRSignalDetectorFc + 1.0)
x.sdYi = 0.0
x.rlpAlpha = (2.0 * math.Pi * x.dt * IRRRecoveryFc) / (2.0*math.Pi*x.dt*IRRRecoveryFc + 1.0)
x.rlpYi = 0.0
/*
We implement a typical second order bandpass filter with the following s-domain transfer function:
(w/Q*s/(s^2+w/Q*s+w^2))
The digital filter coefficients are computed with the following sage math script:
var('w s z Q T')
H = (w/Q*s/(s^2+w/Q*s+w^2)).subs(s=(2/T)*(1-z^-1)/(1+z^-1)).expand().simplify()
p = lambda x: x.numerator().expand().simplify().coefficients(x=z, sparse=False)
A, B = p(H.numerator()), p(H.denominator())
A = [x/B[0] for x in A]
B = [-x/B[0] for x in B]
[A, B]
The result is:
c = (Q*T^2*w^2 + 2*T*w + 4*Q)
a0 = 2*T*w/c
a1 = 0
a2 = -2*T*w/c = -a0
b1 = -2*(Q*T^2*w^2 - 4*Q)/c
b2 = -(Q*T^2*w^2 - 2*T*w + 4*Q)/c
*/
x.bpfQ = 0.5
Q := x.bpfQ
T := x.dt
w := IRRBandpassFc * 2.0 * math.Pi
c := Q*T*T*w*w + 2.0*T*w + 4.0*Q
x.bpfA0 = 2.0 * T * w / c
x.bpfA1 = 0.0
x.bpfA2 = -x.bpfA0
x.bpfB1 = -2.0 * (Q*T*T*w*w - 4.0*Q) / c
x.bpfB2 = -(Q*T*T*w*w - 2.0*T*w + 4.0*Q) / c
x.bpf1x1 = 0.0
x.bpf1x2 = 0.0
x.bpf1y1 = 0.0
x.bpf1y2 = 0.0
x.bpf2x1 = 0.0
x.bpf2x2 = 0.0
x.bpf2y1 = 0.0
x.bpf2y2 = 0.0
return &x
}
func (r *IRReceiver) step(inL float64) int {
// Pass the input through a Automatic Gain Control Amplifier first
// Pass the input through a low pass filter to get the current signal strength
r.sdYi = r.sdYi*(1.0-r.sdAlpha) + r.sdAlpha*inL
// Normalized input illuminance
inLn := inL / r.sdYi
// Pass it through the bandpass filter
bpf1Out := inLn*r.bpfA0 + r.bpf1x1*r.bpfA1 + r.bpf1x2*r.bpfA2 + r.bpf1y1*r.bpfB1 + r.bpf1y2*r.bpfB2
// Update the bandpass filter
r.bpf1x2 = r.bpf1x1
r.bpf1x1 = inLn
r.bpf1y2 = r.bpf1y1
r.bpf1y1 = bpf1Out
bpf1Out = bpf1Out * (2.0*r.bpfQ + 1)
// Pass it through the second bandpass filter
bpf2Out := bpf1Out*r.bpfA0 + r.bpf2x1*r.bpfA1 + r.bpf2x2*r.bpfA2 + r.bpf2y1*r.bpfB1 + r.bpf2y2*r.bpfB2
// Update the bandpass filter
r.bpf2x2 = r.bpf2x1
r.bpf2x1 = bpf1Out
r.bpf2y2 = r.bpf2y1
r.bpf2y1 = bpf2Out
bpf2Out = bpf2Out * (2.0*r.bpfQ + 1)
bpfOutAbs := math.Abs(bpf2Out)
r.rlpYi = r.rlpYi*(1.0-r.rlpAlpha) + r.rlpAlpha*bpfOutAbs
return r.outIO.step(r.rlpYi)
}
// BitStreamTx is a bit stream transmitter module. Given a bitstream slice it'll output the corresponding value at the specified time.
type BitStreamTx struct {
txData []int
// Contains the data to transmit, position i holds the data to transmit at time
// dt*i to dt*(i+1). That is, at time t, we transmit the value txData[floor(t*(1/dt))]
txPos int
// The position in txData from which we'll append data to transmit
sim *Simulator
// The parent simulator
crcTable *crc8.Table
}
// NewBitStreamTx will create a new bit stream transmitter module
func NewBitStreamTx(sim *Simulator) *BitStreamTx {
return &BitStreamTx{
sim: sim,
txData: make([]int, int(sim.simulationTime*IOTBaudRate+32)),
txPos: 2,
crcTable: crc8.MakeTable(Crc8Poly),
}
}
func (x *BitStreamTx) appendData(data []int) {
// Calculate the current index
ididxF := x.sim.currentTime * IOTBaudRate // The float value
ididx := int(ididxF)
x.txPos = max(ididx+3, x.txPos) // Leave 3 baud between current transmission and the next
for i := 0; i < min(len(x.txData)-x.txPos, len(data)); i++ {
x.txData[x.txPos+i] = data[i]
}
x.txPos += len(data) + 8 // Leave some space at the end
}
func (x *BitStreamTx) appendPacket(data []byte) {
// The first byte is the command, the rest is parameter to the command
res := make([]byte, len(data)+3) // One extra byte for 0x55, length, and CRC-8
res[0] = 0x55
res[1] = uint8(len(res))
for i, element := range data {
res[i+2] = element
}
crcres := crc8.Checksum(res[0:len(data)+2], x.crcTable)
res[len(res)-1] = crcres
resb := make([]int, len(res)*8)
for i, element := range res {
cb := int(element) // Current byte
b := 1 // Shifting bit
for j := 0; j < 8; j++ {
resb[i*8+j] = 0
if cb&b != 0 {
resb[i*8+j] = 1
}
b = b << 1
}
}
x.appendData(resb)
}
func (x *BitStreamTx) step() int {
ididxF := x.sim.currentTime * IOTBaudRate // The float value
ididx := int(ididxF)
return x.txData[ididx]
}
// Sampler is a sampler module, will sample the input at specified sample rate
type Sampler struct {
sampledData []int
// Sampled data.
nextSampleIndex int
// The next index in sampledData that we are going to write to in next sample
sampleRate float64
// Sample rate in Hz
sim *Simulator
// The Simulator
}
// NewSampler will create a new sampler module with the specified sample rate
func NewSampler(s *Simulator, sampleRate float64) *Sampler {
return &Sampler{
sim: s,
sampleRate: sampleRate,
nextSampleIndex: 0,
sampledData: make([]int, int(s.simulationTime*sampleRate)+32),
}
}
func (s *Sampler) step(val int) {
if float64(s.nextSampleIndex)*(1.0/s.sampleRate) < s.sim.currentTime {
// Do sample
s.sampledData[s.nextSampleIndex] = val
s.nextSampleIndex++
}
}
// IOTProtocolDecoder is an IOT Protocol Decoder module, will take an input bit stream, look for the IOT Protocol header byte, then decode it. After that, it'll call the packet handler, and let it choose to respond or not.
type IOTProtocolDecoder struct {
sim *Simulator
// The simulator
packetHandler func(*IOTProtocolDecoder, []byte)
// handler for packet
tx *BitStreamTx
// Transmitter for transmitting reply after receiving command
sampler *Sampler
// Sampler for sampling the incoming signal
nextIndex int
// Next index to process
state int
// 0 Scanning for start byte
// 1 Found start byte, waiting for it to finish
packetLength int
// The length of the packet, only valid when state is 1
decodeBufferBit []int
// Buffer to hold the bits to decode
decodeBuffer []byte
// Buffer to hold the decoded byte
crcTable *crc8.Table
// CRC Table for calculating the CRC
}
// NewIOTProtocolDecoder will create a new IOT Protocol Decoder
func NewIOTProtocolDecoder(s *Simulator, tx *BitStreamTx, packetHandler func(*IOTProtocolDecoder, []byte)) *IOTProtocolDecoder {
return &IOTProtocolDecoder{
sampler: NewSampler(s, IOTBaudRate*5.0), // We sample at 5x the baud rate, and pick only the center
nextIndex: 0,
state: 0,
decodeBufferBit: make([]int, 8*260),
decodeBuffer: make([]byte, 260),
crcTable: crc8.MakeTable(Crc8Poly),
tx: tx,
packetHandler: packetHandler,
}
}
func (d *IOTProtocolDecoder) doDecodeBuffer() {
for i := 0; i < 260; i++ {
b := 1 // Shifting bit
cb := 0 // Current byte
// LSB first
for j := 0; j < 8; j++ {
if d.decodeBufferBit[i*8+j] != 0 {
cb = cb | b
}
b = b << 1
}
d.decodeBuffer[i] = byte(cb)
}
}
func (d *IOTProtocolDecoder) step(in int) {
if in != 0 {
in = 1
}
d.sampler.step(in)
// Pattern to match, -1 for Don't Care, 0 match 0, 1 match 1
pattern := []int{
// 8 Bits of 0x55, the header
-1, 1, 1, 1, -1,
-1, 0, 0, 0, -1,
-1, 1, 1, 1, -1,
-1, 0, 0, 0, -1,
-1, 1, 1, 1, -1,
-1, 0, 0, 0, -1,
-1, 1, 1, 1, -1,
-1, 0, 0, 0, -1,
}
if d.state == 0 {
// Scan for the start byte
// We only scan it when more than 5 bytes is available
for d.sampler.nextSampleIndex > d.nextIndex+5*8*5 {
found := true
for i, current := range pattern {
if current == -1 {
continue
}
if current == 0 && d.sampler.sampledData[i+d.nextIndex] == 0 {
continue
}
if current == 1 && d.sampler.sampledData[i+d.nextIndex] == 1 {
continue
}
found = false
break
}
if found {
// Decode the first 3 bytes
for i := 0; i <= 8*3; i++ {
// Fetch the middle of each bit
d.decodeBufferBit[i] = d.sampler.sampledData[d.nextIndex+2+i*5]
}
d.doDecodeBuffer()
// Second byte is the length
d.packetLength = int(d.decodeBuffer[1])
d.state = 1
break
}
d.nextIndex++
}
} else if d.state == 1 {
// We are waiting for data
if d.sampler.nextSampleIndex > d.nextIndex+5*8*d.packetLength {
// Our packet have arrived
for i := 0; i <= 8*d.packetLength; i++ {
// Fetch the middle of each bit
d.decodeBufferBit[i] = d.sampler.sampledData[d.nextIndex+2+i*5]
}
d.doDecodeBuffer()
// Check if the packet is correct
csum := crc8.Checksum(d.decodeBuffer[0:d.packetLength-1], d.crcTable)
if csum == uint8(d.decodeBuffer[d.packetLength-1]) {
// The packet is valid
d.packetHandler(d, d.decodeBuffer[2:d.packetLength-1])
}
d.nextIndex = d.nextIndex + 5*8*d.packetLength
d.state = 0
}
}
}
// NECDecoder is an NEC IR Protocol decoder
type NECDecoder struct {
// We assume the default state is low, and will record all transition
sim *Simulator
// The simulator
lastState int
// Last value of the receiver
pulseWidths []float64
// Pulse width of the recorded pulse, even entries are low pulse, odd entries are high pulse
lastTransition float64
// The time of the last transition
nextPulse int
// Index of the slot that we are going to write into in pulseWidths when the next transition comes
nextDecode int
// The next index that we are going to start decoding from
}
// NewNECDecoder will create a new NEC IR Protocol decoder
func NewNECDecoder(s *Simulator) *NECDecoder {
return &NECDecoder{
sim: s,
lastState: 0,
pulseWidths: make([]float64, int(s.simulationTime*IOTBaudRate*2)+32),
lastTransition: 0.0,
nextPulse: 0,
nextDecode: 1,
}
}
func (d *NECDecoder) step(v int) {
// Do pulse tracking first
if v != d.lastState {
// State is different, record the pulse width
d.pulseWidths[d.nextPulse] = d.sim.currentTime - d.lastTransition
d.lastTransition = d.sim.currentTime
d.nextPulse++
}
d.lastState = v
// There is a a total of 34 high pulse and 33 low pulse in each NEC Remote pulse train
// The first one is always a high one, so we start decoding on the odd entries, and when there's 67 pulses available
if d.nextPulse-d.nextDecode >= 67 {
ok, _ := tryDecodeNEC(d.pulseWidths[d.nextDecode : d.nextDecode+67])
if ok {
d.sim.remoteButtonReceived = true
} else {
}
d.nextDecode += 2
}
}
func tryDecodeNEC(pw []float64) (bool, []byte) {
resb := make([]int, 32)
resbytes := make([]byte, 4)
res := make([]byte, 2)
fm := func(val float64, target float64, tolerance float64) bool {
if val > target*tolerance {
return false
}
if val < target/tolerance {
return false
}
return true
}
fmHi := func(val float64) bool {
// 560 us Mark time
return fm(val, 560*1e-6, 1.25)
}
fmLo0 := fmHi
fmLo1 := func(val float64) bool {
// 1.69 ms Space time
return fm(val, 1.69*1e-3, 1.25)
}
if !fm(pw[0], 9.0*1e-3, 1.10) {
return false, res
}
if !fm(pw[1], 4.5*1e-3, 1.15) {
return false, res
}
for i := 0; i < 32; i++ {
if !fmHi(pw[2+i*2]) {
return false, res
}
if fmLo0(pw[2+i*2+1]) {
resb[i] = 0
} else if fmLo1(pw[2+i*2+1]) {
resb[i] = 1
} else {
return false, res
}
}
for i := 0; i < 4; i++ {
sb := 1 // Shifting bit
cb := 0 // Current byte
for j := 0; j < 8; j++ {
if resb[i*8+j] == 1 {
cb = cb | sb
}
sb = sb << 1
}
resbytes[i] = byte(cb)
}
if resbytes[0] != (resbytes[1] ^ 0xFF) {
return false, res
}
if resbytes[2] != (resbytes[3] ^ 0xFF) {
return false, res
}
res[0] = resbytes[0]
res[1] = resbytes[2]
return true, res
}
// Simulator is the root simulator module
type Simulator struct {
sampleRate float64
// Sample rate of the simulator
// Usually runs at 150kHz
inputData []int
// A series of 0 and 1 to denote the content of the input bitstream
// Input bitstream is clocked at 1.785kHz
currentStep int
// The current progress of the simulator. (What time is it in the simulation?)
currentTime float64
// Current time in the simulation
maxStep int
// Number of steps that we are going to run the simulation
simulationTime float64
// Total time that we are going to run the simulation, in seconds
rand *rand.Rand
// The random generator that generate the random state of this simulator
tvRx *IRReceiver
// IR Receiver for the TV
tvDec *NECDecoder
// NEC Decoder for the TV
hubTx *IRTransmitter
// IR Transmitter on Hub's side
hubBSTx *BitStreamTx
// Bitstream transmitter on Hub's side
hubRx *IRReceiver
// IR Receiver on the Hub
hubDec *IOTProtocolDecoder
// Protocol decoder on Hub's side
iotTx *IRTransmitter
// IR Transmitter on IOT Device side
iotBSTx *BitStreamTx
// Bitstream Transmitter on IOT Device side
iotRx *IRReceiver
// IR Receiver on the IOT Device
iotDec *IOTProtocolDecoder
// Protocol decoder on IOT Device side
resultReply []byte
// The resulting reply that we've received
remoteButtonReceived bool
// If we detected a button press
// Debugging variables
irL []float64 // IR Illuminance
}
// NewSimulator will create a new simulator module
func NewSimulator(simTime float64) *Simulator {
x := Simulator{
rand: rand.New(rand.NewSource(time.Now().Unix())),
currentStep: 0,
sampleRate: DefaultSampleRate,
simulationTime: simTime,
maxStep: int(math.Floor(simTime * DefaultSampleRate)),
resultReply: []byte{},
remoteButtonReceived: false,
}
x.irL = make([]float64, x.maxStep+32) // Yeah, I'm lazy on preventing off by one.
hubHandlePacket := func(d *IOTProtocolDecoder, b []byte) {
if b[0]&CommandReply != 0 {
x.resultReply = b
}
}
x.hubBSTx = NewBitStreamTx(&x)
x.hubTx = NewIRTransmitter(&x)
x.hubRx = NewIRReceiver(&x)
x.hubDec = NewIOTProtocolDecoder(&x, x.hubBSTx, hubHandlePacket)
x.iotTx = NewIRTransmitter(&x)
x.iotBSTx = NewBitStreamTx(&x)
x.iotRx = NewIRReceiver(&x)
x.iotDec = NewIOTProtocolDecoder(&x, x.iotBSTx, iotHandlePacket)
x.tvRx = NewIRReceiver(&x)
x.tvDec = NewNECDecoder(&x)
return &x
}
func iotHandlePacket(d *IOTProtocolDecoder, packet []byte) {
// Handles packet on the IOT Device side
if packet[0] == CommandPing {
// Take whatever we receive and send it back
reply := make([]byte, len(packet))
copy(reply[1:], packet[1:])
reply[0] = CommandReply | CommandPing
for i := 0; i < len(packet)-1; i++ {
// Some bit manipulation for sanity check
reply[i+1] = reply[i+1] ^ byte((0x12+i)&0xFF)
}
d.tx.appendPacket(reply)
}
if packet[0] == CommandGetTemp {
// 27.2 degree C is expressed as 272 = 0x01 0x10 (Big Endian)
reply := []byte{packet[0] | CommandReply, 0x01, 0x10}
d.tx.appendPacket(reply)
}
if packet[0] == CommandGetHumidity {
// 72.1% humidity is expressed as 721 = 0x02 0xD1
reply := []byte{packet[0] | CommandReply, 0x02, 0xD1}
d.tx.appendPacket(reply)
}
if packet[0] == CommandGetCO2 {
// 422ppm => 422 = 0x01 0xA6
reply := []byte{packet[0] | CommandReply, 0x01, 0xA6}
d.tx.appendPacket(reply)
}
if packet[0] == CommandGetSmokeDetector {
// No smoke detected = 0
reply := []byte{packet[0] | CommandReply, 0x00}
d.tx.appendPacket(reply)
}
if packet[0] == CommandSetTime {
// <Year 2 Byte> <Month 1 Byte> <Day 1 Byte> <Hour 1 Byte> <Minute 1 Byte> <Second 1 Byte>
ok := true
if len(packet) == 8 {
if int(packet[3]) > 12 || int(packet[3]) < 1 {
ok = false
}
if int(packet[4]) > 31 || int(packet[4]) < 1 {
ok = false
}
if int(packet[5]) > 24 {
ok = false
}
if int(packet[6]) > 60 {
ok = false
}
if int(packet[7]) > 60 {
ok = false
}
} else {
ok = false
}
reply := []byte{packet[0] | CommandReply, 0x00}
if ok {
reply[1] = 0x01
}
d.tx.appendPacket(reply)
}
if packet[0] == CommandSystemVersion {
// Ver 3.14
reply := []byte{packet[0] | CommandReply, 0x03, 0x0E}
d.tx.appendPacket(reply)
}
}
func (s *Simulator) step() {
// Get current time
cT := float64(s.currentStep) / s.sampleRate
s.currentTime = cT
irL := 5.0e-9 + s.rand.Float64()*(1.0e-9) // Amount of light in the room, plus some background noise
// Hub's IR Transmission
// Get the logic output at the hub's IR transmitter GPIO for this step
hubIRGpio := s.hubBSTx.step()
// Modulate the carrier and at 3.3V
hubIRGpioV := s.hubTx.step(float64(hubIRGpio) * 3.3)
// IR LED have a frequency response of > 1MHz, so we'll not simulate its frequency response
irL = math.Max(irL, hubIRGpioV/3.3) // Add the light from the emitter
// Device's IR Transmission
// Get the logic output at the hub's IR transmitter GPIO for this step
IotIRGpio := s.iotBSTx.step()
// Modulate the carrier and at 3.3V
IotIRGpioV := s.iotTx.step(float64(IotIRGpio) * 3.3)
// IR LED have a frequency response of > 1MHz, so we'll not simulate its frequency response
irL = math.Max(irL, IotIRGpioV/3.3) // Add the light from the emitter
s.irL[s.currentStep] = irL
iotRxV := s.iotRx.step(irL)
s.iotDec.step(iotRxV)
hubRxV := s.hubRx.step(irL)
s.hubDec.step(hubRxV)
tvRxV := s.tvRx.step(irL)
s.tvDec.step(tvRxV)
// Increase the current time
s.currentStep++
}
func (s *Simulator) runSim() {
for i := 0; i < s.maxStep; i++ {
s.step()
}
} | 2019/quals/hardware-remotecontrol/app/simulator.go | 0.752286 | 0.584064 | simulator.go | starcoder |
package indicators
import (
"github.com/jaybutera/gotrade"
)
// A Time Series Forecast Indicator (Tsf)
type Tsf struct {
*LinRegWithoutStorage
selectData gotrade.DOHLCVDataSelectionFunc
// public variables
Data []float64
}
// NewTsf creates a Time Series Forecast Indicator (Tsf) for online usage
func NewTsf(timePeriod int, selectData gotrade.DOHLCVDataSelectionFunc) (indicator *Tsf, err error) {
if selectData == nil {
return nil, ErrDOHLCVDataSelectFuncIsNil
}
ind := Tsf{
selectData: selectData,
}
ind.LinRegWithoutStorage, err = NewLinRegWithoutStorage(timePeriod,
func(dataItem float64, slope float64, intercept float64, streamBarIndex int) {
result := intercept + slope*float64(timePeriod)
ind.UpdateMinMax(result, result)
ind.Data = append(ind.Data, result)
})
return &ind, err
}
// NewDefaultTsf creates a Time Series Forecast Indicator (Tsf) for online usage with default parameters
// - timePeriod: 10
func NewDefaultTsf() (indicator *Tsf, err error) {
timePeriod := 10
return NewTsf(timePeriod, gotrade.UseClosePrice)
}
// NewTsfWithSrcLen creates a Time Series Forecast Indicator (Tsf) for offline usage
func NewTsfWithSrcLen(sourceLength uint, timePeriod int, selectData gotrade.DOHLCVDataSelectionFunc) (indicator *Tsf, err error) {
ind, err := NewTsf(timePeriod, selectData)
// only initialise the storage if there is enough source data to require it
if sourceLength-uint(ind.GetLookbackPeriod()) > 1 {
ind.Data = make([]float64, 0, sourceLength-uint(ind.GetLookbackPeriod()))
}
return ind, err
}
// NewDefaultTsfWithSrcLen creates a Time Series Forecast Indicator (Tsf) for offline usage with default parameters
func NewDefaultTsfWithSrcLen(sourceLength uint) (indicator *Tsf, err error) {
ind, err := NewDefaultTsf()
// only initialise the storage if there is enough source data to require it
if sourceLength-uint(ind.GetLookbackPeriod()) > 1 {
ind.Data = make([]float64, 0, sourceLength-uint(ind.GetLookbackPeriod()))
}
return ind, err
}
// NewTsfForStream creates a Time Series Forecast Indicator (Tsf) for online usage with a source data stream
func NewTsfForStream(priceStream gotrade.DOHLCVStreamSubscriber, timePeriod int, selectData gotrade.DOHLCVDataSelectionFunc) (indicator *Tsf, err error) {
ind, err := NewTsf(timePeriod, selectData)
priceStream.AddTickSubscription(ind)
return ind, err
}
// NewDefaultTsfForStream creates a Time Series Forecast Indicator (Tsf) for online usage with a source data stream
func NewDefaultTsfForStream(priceStream gotrade.DOHLCVStreamSubscriber) (indicator *Tsf, err error) {
ind, err := NewDefaultTsf()
priceStream.AddTickSubscription(ind)
return ind, err
}
// NewTsfForStreamWithSrcLen creates a Time Series Forecast Indicator (Tsf) for offline usage with a source data stream
func NewTsfForStreamWithSrcLen(sourceLength uint, priceStream gotrade.DOHLCVStreamSubscriber, timePeriod int, selectData gotrade.DOHLCVDataSelectionFunc) (indicator *Tsf, err error) {
ind, err := NewTsfWithSrcLen(sourceLength, timePeriod, selectData)
priceStream.AddTickSubscription(ind)
return ind, err
}
// NewDefaultTsfForStreamWithSrcLen creates a Time Series Forecast Indicator (Tsf) for offline usage with a source data stream
func NewDefaultTsfForStreamWithSrcLen(sourceLength uint, priceStream gotrade.DOHLCVStreamSubscriber) (indicator *Tsf, err error) {
ind, err := NewDefaultTsfWithSrcLen(sourceLength)
priceStream.AddTickSubscription(ind)
return ind, err
}
// ReceiveDOHLCVTick consumes a source data DOHLCV price tick
func (ind *Tsf) ReceiveDOHLCVTick(tickData gotrade.DOHLCV, streamBarIndex int) {
var selectedData = ind.selectData(tickData)
ind.ReceiveTick(selectedData, streamBarIndex)
} | indicators/tsf.go | 0.717309 | 0.438845 | tsf.go | starcoder |
package util
import (
"math/rand"
"reflect"
"time"
)
func InSlice(val interface{}, slice interface{}) (exist bool, index int) {
exist = false
index = -1
if slice == nil || reflect.TypeOf(slice).Kind() != reflect.Slice {
return
}
s := reflect.ValueOf(slice)
for i := 0; i < s.Len(); i++ {
if reflect.DeepEqual(val, s.Index(i).Interface()) == false {
continue
}
index = i
exist = true
return
}
return
}
func SliceDiff(slice1, slice2 interface{}) (r []interface{}) {
if reflect.TypeOf(slice1).Kind() != reflect.Slice || reflect.TypeOf(slice2).Kind() != reflect.Slice {
return
}
s := reflect.ValueOf(slice1)
for i := 0; i < s.Len(); i++ {
if exist, _ := InSlice(s.Index(i).Interface(), slice2); exist {
continue
}
r = append(r, s.Index(i).Interface())
}
s = reflect.ValueOf(slice2)
for i := 0; i < s.Len(); i++ {
if exist, _ := InSlice(s.Index(i).Interface(), slice1); exist {
continue
}
r = append(r, s.Index(i).Interface())
}
return
}
func SliceUnique(slice interface{}) (r interface{}) {
if reflect.TypeOf(slice).Kind() != reflect.Slice {
return
}
s := reflect.ValueOf(slice)
for i := 0; i < s.Len(); i++ {
f := s.Index(i)
if f.IsValid() == false {
continue
}
if exist, _ := InSlice(f.Interface(), r); exist {
continue
}
switch f.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32:
if r == nil {
r = []int32{}
}
r = append(r.([]int32), int32(f.Int()))
case reflect.Int64:
if r == nil {
r = []int64{}
}
r = append(r.([]int64), f.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
if r == nil {
r = []uint32{}
}
r = append(r.([]uint32), uint32(f.Uint()))
case reflect.Uint64:
if r == nil {
r = []uint64{}
}
r = append(r.([]uint64), f.Uint())
case reflect.String:
if r == nil {
r = []string{}
}
r = append(r.([]string), f.String())
case reflect.Bool:
if r == nil {
r = []bool{}
}
r = append(r.([]bool), f.Bool())
case reflect.Float32:
if r == nil {
r = []float32{}
}
r = append(r.([]float32), float32(f.Float()))
case reflect.Float64:
if r == nil {
r = []float64{}
}
r = append(r.([]float64), f.Float())
}
}
return
}
func SliceColumn(slice interface{}, col string) (r interface{}) {
if reflect.TypeOf(slice).Kind() != reflect.Slice {
return
}
s := reflect.ValueOf(slice)
for i := 0; i < s.Len(); i++ {
f := s.Index(i).Elem().FieldByName(col)
if f.IsValid() == false {
continue
}
switch f.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32:
if r == nil {
r = []int32{}
}
r = append(r.([]int32), int32(f.Int()))
case reflect.Int64:
if r == nil {
r = []int64{}
}
r = append(r.([]int64), f.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
if r == nil {
r = []uint32{}
}
r = append(r.([]uint32), uint32(f.Uint()))
case reflect.Uint64:
if r == nil {
r = []uint64{}
}
r = append(r.([]uint64), f.Uint())
case reflect.String:
if r == nil {
r = []string{}
}
r = append(r.([]string), f.String())
case reflect.Bool:
if r == nil {
r = []bool{}
}
r = append(r.([]bool), f.Bool())
case reflect.Float32:
if r == nil {
r = []float32{}
}
r = append(r.([]float32), float32(f.Float()))
case reflect.Float64:
if r == nil {
r = []float64{}
}
r = append(r.([]float64), f.Float())
}
}
return
}
func SliceTrim(slice interface{}, cutset ...interface{}) (r []interface{}) {
if reflect.TypeOf(slice).Kind() != reflect.Slice {
return
}
s := reflect.ValueOf(slice)
for i := 0; i < s.Len(); i++ {
if exist, _ := InSlice(s.Index(i).Interface(), cutset); exist {
continue
}
r = append(r, s.Index(i).Interface())
}
return
}
func SliceSumInt(slice []int) (r int) {
for _, v := range slice {
r += v
}
return
}
func SliceShuffle(slice []int32) []int32 {
rand.Seed(time.Now().UnixNano())
length := len(slice)
for i := 0; i < length; i++ {
index := rand.Intn(length - i)
slice[i], slice[index+i] = slice[index+i], slice[i]
}
return slice
} | slice.go | 0.507568 | 0.434941 | slice.go | starcoder |
package vmath
import (
"unsafe"
)
func (result *Quaternion) MakeFromM3(tfrm *Matrix3) {
xx := tfrm[t3col0+x]
yx := tfrm[t3col0+y]
zx := tfrm[t3col0+z]
xy := tfrm[t3col1+x]
yy := tfrm[t3col1+y]
zy := tfrm[t3col1+z]
xz := tfrm[t3col2+x]
yz := tfrm[t3col2+y]
zz := tfrm[t3col2+z]
trace := ((xx + yy) + zz)
negTrace := (trace < 0.0)
ZgtX := zz > xx
ZgtY := zz > yy
YgtX := yy > xx
largestXorY := (!ZgtX || !ZgtY) && negTrace
largestYorZ := (YgtX || ZgtX) && negTrace
largestZorX := (ZgtY || !YgtX) && negTrace
if largestXorY {
zz = -zz
xy = -xy
}
if largestYorZ {
xx = -xx
yz = -yz
}
if largestZorX {
yy = -yy
zx = -zx
}
radicand := (((xx + yy) + zz) + 1.0)
scale := (0.5 * (1.0 / sqrt(radicand)))
tmpx := ((zy - yz) * scale)
tmpy := ((xz - zx) * scale)
tmpz := ((yx - xy) * scale)
tmpw := (radicand * scale)
qx := tmpx
qy := tmpy
qz := tmpz
qw := tmpw
if largestXorY {
qx = tmpw
qy = tmpz
qz = tmpy
qw = tmpx
}
if largestYorZ {
tmpx = qx
tmpz = qz
qx = qy
qy = tmpx
qz = qw
qw = tmpz
}
result[x] = qx
result[y] = qy
result[z] = qz
result[w] = qw
}
func (result *Quaternion) MakeFromV3Scalar(xyz *Vector3, W float32) {
result[x] = xyz[x]
result[y] = xyz[y]
result[z] = xyz[z]
result[w] = W
}
func (result *Quaternion) MakeFromV4(vec *Vector4) {
result[x] = vec[x]
result[y] = vec[y]
result[z] = vec[z]
result[w] = vec[w]
}
func (result *Quaternion) MakeFromScalar(scalar float32) {
result[x] = scalar
result[y] = scalar
result[z] = scalar
result[w] = scalar
}
func (result *Quaternion) MakeIdentity() {
result[x] = 0.0
result[y] = 0.0
result[z] = 0.0
result[w] = 1.0
}
func (v *Quaternion) Copy(other *Quaternion) {
copy(v[:], other[:])
}
func (result *Quaternion) Lerp(t float32, quat0, quat1 *Quaternion) {
var tmpQ_0, tmpQ_1 Quaternion
tmpQ_0.Sub(quat1, quat0)
tmpQ_1.ScalarMul(&tmpQ_0, t)
result.Add(quat0, &tmpQ_1)
}
func (result *Quaternion) LerpTo(t float32, quatTo *Quaternion) {
tmp := *result
result.Lerp(t, &tmp, quatTo)
}
func (result *Quaternion) Slerp(t float32, unitQuat0, unitQuat1 *Quaternion) {
if unsafe.Pointer(result) == unsafe.Pointer(unitQuat0) {
result.SlerpSelf(t, unitQuat1)
return
}
var start, tmpQ_0, tmpQ_1 Quaternion
var scale0, scale1 float32
cosAngle := unitQuat0.Dot(unitQuat1)
if cosAngle < 0.0 {
cosAngle = -cosAngle
start.Neg(unitQuat0)
} else {
copy(start[:], unitQuat0[:])
}
if cosAngle < g_SLERP_TOL {
angle := acos(cosAngle)
recipSinAngle := (1.0 / sin(angle))
scale0 = (sin(((1.0 - t) * angle)) * recipSinAngle)
scale1 = (sin((t * angle)) * recipSinAngle)
} else {
scale0 = (1.0 - t)
scale1 = t
}
tmpQ_0.ScalarMul(&start, scale0)
tmpQ_1.ScalarMul(unitQuat1, scale1)
result.Add(&tmpQ_0, &tmpQ_1)
}
func (result *Quaternion) SlerpSelf(t float32, unitQuatTo *Quaternion) {
tmp := *result
result.Slerp(t, &tmp, unitQuatTo)
}
func (result *Quaternion) Squad(t float32, unitQuat0, unitQuat1, unitQuat2, unitQuat3 *Quaternion) {
var tmp0, tmp1 Quaternion
tmp0.Slerp(t, unitQuat0, unitQuat3)
tmp1.Slerp(t, unitQuat1, unitQuat2)
result.Slerp((2.0*t)*(1.0-t), &tmp0, &tmp1)
}
func (q *Quaternion) SetXYZ(vec *Vector3) {
q[x] = vec[x]
q[y] = vec[y]
q[z] = vec[z]
}
func (result *Quaternion) Add(quat0, quat1 *Quaternion) {
result[x] = quat0[x] + quat1[x]
result[y] = quat0[y] + quat1[y]
result[z] = quat0[z] + quat1[z]
result[w] = quat0[w] + quat1[w]
}
func (result *Quaternion) AddToSelf(quat *Quaternion) {
result.Add(result, quat)
}
func (result *Quaternion) Sub(quat0, quat1 *Quaternion) {
result[x] = quat0[x] - quat1[x]
result[y] = quat0[y] - quat1[y]
result[z] = quat0[z] - quat1[z]
result[w] = quat0[w] - quat1[w]
}
func (result *Quaternion) SubFromSelf(quat *Quaternion) {
result.Sub(result, quat)
}
func (result *Quaternion) ScalarMul(quat *Quaternion, scalar float32) {
result[x] = quat[x] * scalar
result[y] = quat[y] * scalar
result[z] = quat[z] * scalar
result[w] = quat[w] * scalar
}
func (result *Quaternion) ScalarMulSelf(scalar float32) {
result.ScalarMul(result, scalar)
}
func (result *Quaternion) ScalarDiv(quat *Quaternion, scalar float32) {
result[x] = quat[x] / scalar
result[y] = quat[y] / scalar
result[z] = quat[z] / scalar
result[w] = quat[w] / scalar
}
func (result *Quaternion) ScalarDivSelf(scalar float32) {
result.ScalarDiv(result, scalar)
}
func (result *Quaternion) Neg(quat *Quaternion) {
result[x] = -quat[x]
result[y] = -quat[y]
result[z] = -quat[z]
result[w] = -quat[w]
}
func (result *Quaternion) NegSelf() {
result.Neg(result)
}
func (q *Quaternion) Dot(quat *Quaternion) float32 {
result := q[x] * quat[x]
result += q[y] * quat[y]
result += q[z] * quat[z]
result += q[w] * quat[w]
return result
}
func (q *Quaternion) Norm() float32 {
result := q[x] * q[x]
result += q[y] * q[y]
result += q[z] * q[z]
result += q[w] * q[w]
return result
}
func (q *Quaternion) Length() float32 {
return sqrt(q.Norm())
}
func (result *Quaternion) Normalize(quat *Quaternion) {
lenSqr := quat.Norm()
lenInv := 1.0 / sqrt(lenSqr)
result[x] = quat[x] * lenInv
result[y] = quat[y] * lenInv
result[z] = quat[z] * lenInv
result[w] = quat[w] * lenInv
}
func (result *Quaternion) NormalizeSelf() {
result.Normalize(result)
}
func (result *Quaternion) MakeRotationArc(unitVec0, unitVec1 *Vector3) {
var tmpV3_0, tmpV3_1 Vector3
cosHalfAngleX2 := sqrt((2.0 * (1.0 + unitVec0.Dot(unitVec1))))
recipCosHalfAngleX2 := (1.0 / cosHalfAngleX2)
tmpV3_0.Cross(unitVec0, unitVec1)
tmpV3_1.ScalarMul(&tmpV3_0, recipCosHalfAngleX2)
result.MakeFromV3Scalar(&tmpV3_1, (cosHalfAngleX2 * 0.5))
}
func (result *Quaternion) MakeRotationAxis(radians float32, unitVec *Vector3) {
var tmpV3_0 Vector3
angle := radians * 0.5
s := sin(angle)
c := cos(angle)
tmpV3_0.ScalarMul(unitVec, s)
result.MakeFromV3Scalar(&tmpV3_0, c)
}
func (result *Quaternion) MakeRotationX(radians float32) {
angle := radians * 0.5
s := sin(angle)
c := cos(angle)
result[x] = s
result[y] = 0.0
result[z] = 0.0
result[w] = c
}
func (result *Quaternion) MakeRotationY(radians float32) {
angle := radians * 0.5
s := sin(angle)
c := cos(angle)
result[x] = 0.0
result[y] = s
result[z] = 0.0
result[w] = c
}
func (result *Quaternion) MakeRotationZ(radians float32) {
angle := radians * 0.5
s := sin(angle)
c := cos(angle)
result[x] = 0.0
result[y] = 0.0
result[z] = s
result[w] = c
}
func (result *Quaternion) Mul(quat0, quat1 *Quaternion) {
if unsafe.Pointer(result) == unsafe.Pointer(quat0) {
result.MulSelf(quat1)
return
}
if unsafe.Pointer(result) == unsafe.Pointer(quat1) {
result.MulSelf(quat0)
return
}
result[x] = (quat0[w] * quat1[x]) + (quat0[x] * quat1[w]) + (quat0[y] * quat1[z]) - (quat0[z] * quat1[y])
result[y] = (quat0[w] * quat1[y]) + (quat0[y] * quat1[w]) + (quat0[z] * quat1[x]) - (quat0[x] * quat1[z])
result[z] = (quat0[w] * quat1[z]) + (quat0[z] * quat1[w]) + (quat0[x] * quat1[y]) - (quat0[y] * quat1[x])
result[w] = (quat0[w] * quat1[w]) - (quat0[x] * quat1[x]) - (quat0[y] * quat1[y]) - (quat0[z] * quat1[z])
}
func (result *Quaternion) MulSelf(quat *Quaternion) {
tmp := *result
result.Mul(&tmp, quat)
}
func (result *Vector3) Rotate(quat *Quaternion, vec *Vector3) {
tmpX := (quat[w] * vec[x]) + (quat[y] * vec[z]) - (quat[z] * vec[y])
tmpY := (quat[w] * vec[y]) + (quat[z] * vec[x]) - (quat[x] * vec[z])
tmpZ := (quat[w] * vec[z]) + (quat[x] * vec[y]) - (quat[y] * vec[x])
tmpW := (quat[x] * vec[x]) + (quat[y] * vec[y]) + (quat[z] * vec[z])
result[x] = (tmpW * quat[x]) + (tmpX * quat[w]) - (tmpY * quat[z]) + (tmpZ * quat[y])
result[y] = (tmpW * quat[y]) + (tmpY * quat[w]) - (tmpZ * quat[x]) + (tmpX * quat[z])
result[z] = (tmpW * quat[z]) + (tmpZ * quat[w]) - (tmpX * quat[y]) + (tmpY * quat[x])
}
func (result *Vector3) RotateSelf(quat *Quaternion) {
result.Rotate(quat, result)
}
func (result *Quaternion) Conj(quat *Quaternion) {
result[x] = -quat[x]
result[y] = -quat[y]
result[z] = -quat[z]
result[w] = quat[w]
}
func (result *Quaternion) ConjSelf() {
result.Conj(result)
}
func (result *Quaternion) Select(quat0, quat1 *Quaternion, select1 int) {
if select1 != 0 {
result[x] = quat1[x]
result[y] = quat1[y]
result[z] = quat1[z]
result[w] = quat1[w]
} else {
result[x] = quat0[x]
result[y] = quat0[y]
result[z] = quat0[z]
result[w] = quat0[w]
}
} | quaternion.go | 0.516108 | 0.402744 | quaternion.go | starcoder |
package shapes
import (
"fmt"
"reflect"
"github.com/pkg/errors"
)
// solver.go implements the constraint solvers
// there are two kinds of constraints to solve: variable constraints and SubjectTo constraints.
// exprConstraint says that A must be equal to B
type exprConstraint struct {
a, b Expr
}
func (c exprConstraint) apply(ss substitutions) substitutable {
return exprConstraint{
a: c.a.apply(ss).(Expr),
b: c.b.apply(ss).(Expr),
}
}
func (c exprConstraint) freevars() varset { return exprtup(c).freevars() }
func (c exprConstraint) Format(f fmt.State, r rune) { fmt.Fprintf(f, "{%v = %v}", c.a, c.b) }
type constraints []exprConstraint
func (cs constraints) apply(ss substitutions) substitutable {
if len(ss) == 0 || len(cs) == 0 {
return cs
}
for i := range cs {
cs[i] = cs[i].apply(ss).(exprConstraint)
}
return cs
}
func (cs constraints) freevars() (retVal varset) {
for i := range cs {
retVal = append(retVal, cs[i].freevars()...)
}
return unique(retVal)
}
func solve(cs constraints, subs substitutions) (newSubs substitutions, err error) {
switch len(cs) {
case 0:
return subs, nil
default:
var ss substitutions
c := cs[0]
if ss, err = unify(c.a.(substitutableExpr), c.b.(substitutableExpr)); err != nil {
return nil, err
}
newSubs = compose(ss, subs)
cs2 := cs[1:].apply(newSubs).(constraints)
return solve(cs2, newSubs)
}
}
func unify(a, b substitutableExpr) (ss substitutions, err error) {
switch at := a.(type) {
case Var:
return bind(at, b)
default:
if eq(a, b) {
return nil, nil
}
if v, ok := b.(Var); ok {
return bind(v, a)
}
aExprs := a.subExprs()
bExprs := b.subExprs()
if len(aExprs) == 0 && len(bExprs) == 0 {
return nil, errors.Errorf("Unification Fail. %v ~ %v cannot proceed", a, b)
}
if len(aExprs) != len(bExprs) {
return nil, errors.Errorf("Unification Fail. %v ~ %v cannot proceed as they do not contain the same amount of sub-expressions. %v has %d subexpressions while %v has %d subexpressions", a, b, a, len(aExprs), b, len(bExprs))
}
return unifyMany(aExprs, bExprs)
}
panic("TODO")
}
func unifyMany(as, bs []substitutableExpr) (ss substitutions, err error) {
for i, a := range as {
b := bs[i]
if len(ss) > 0 {
a = a.apply(ss).(substitutableExpr)
b = b.apply(ss).(substitutableExpr)
}
var s2 substitutions
if s2, err = unify(a, b); err != nil {
return nil, err
}
if ss == nil {
ss = s2
} else {
ss = compose(ss, s2)
}
}
return
}
// tmp solution
func eq(a, b interface{}) bool {
return reflect.DeepEqual(a, b)
}
func bind(v Var, E substitutable) (substitutions, error) {
if occurs(v, E) {
return nil, errors.Errorf("Recursive unification")
}
return substitutions{{Sub: E.(Expr), For: v}}, nil
}
func occurs(v Var, in substitutable) bool {
vs := in.freevars()
return vs.Contains(v)
} | solver.go | 0.61555 | 0.481332 | solver.go | starcoder |
package kv
import (
"bytes"
"context"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)
// lookupValuesForIndices takes in a list of indices and looks up
// their corresponding values in the DB, returning a list of
// roots which can then be used for batch lookups of their corresponding
// objects from the DB. For example, if we are fetching
// attestations and we have an index `[]byte("5")` under the shard indices bucket,
// we might find roots `0x23` and `0x45` stored under that index. We can then
// do a batch read for attestations corresponding to those roots.
func lookupValuesForIndices(ctx context.Context, indicesByBucket map[string][]byte, tx *bolt.Tx) [][][]byte {
ctx, span := trace.StartSpan(ctx, "BeaconDB.lookupValuesForIndices")
defer span.End()
values := make([][][]byte, 0, len(indicesByBucket))
for k, v := range indicesByBucket {
bkt := tx.Bucket([]byte(k))
roots := bkt.Get(v)
splitRoots := make([][]byte, 0, len(roots)/32)
for i := 0; i < len(roots); i += 32 {
splitRoots = append(splitRoots, roots[i:i+32])
}
values = append(values, splitRoots)
}
return values
}
// updateValueForIndices updates the value for each index by appending it to the previous
// values stored at said index. Typically, indices are roots of data that can then
// be used for reads or batch reads from the DB.
func updateValueForIndices(ctx context.Context, indicesByBucket map[string][]byte, root []byte, tx *bolt.Tx) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.updateValueForIndices")
defer span.End()
for k, idx := range indicesByBucket {
bkt := tx.Bucket([]byte(k))
valuesAtIndex := bkt.Get(idx)
if valuesAtIndex == nil {
if err := bkt.Put(idx, root); err != nil {
return err
}
} else {
// Do not save duplication in indices bucket
for i := 0; i < len(valuesAtIndex); i += 32 {
if bytes.Equal(valuesAtIndex[i:i+32], root) {
return nil
}
}
if err := bkt.Put(idx, append(valuesAtIndex, root...)); err != nil {
return err
}
}
}
return nil
}
// deleteValueForIndices clears a root stored at each index.
func deleteValueForIndices(ctx context.Context, indicesByBucket map[string][]byte, root []byte, tx *bolt.Tx) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.deleteValueForIndices")
defer span.End()
for k, idx := range indicesByBucket {
bkt := tx.Bucket([]byte(k))
valuesAtIndex := bkt.Get(idx)
if valuesAtIndex != nil {
start := bytes.Index(valuesAtIndex, root)
// If the root was not found inside the values at index slice, we continue.
// Root must be correctly aligned to avoid matching to subsequences of adjacent values.
if start == -1 || start%len(root) != 0 {
continue
}
// We clear out the root from the values at index slice. For example,
// If we had [0x32, 0x33, 0x45] and we wanted to clear out 0x33, the code below
// updates the slice to [0x32, 0x45].
valuesStart := make([]byte, len(valuesAtIndex[:start]))
copy(valuesStart, valuesAtIndex[:start])
valuesEnd := make([]byte, len(valuesAtIndex[start+len(root):]))
copy(valuesEnd, valuesAtIndex[start+len(root):])
valuesAtIndex = append(valuesStart, valuesEnd...)
// If this removes the last value, delete the whole key/value entry.
if len(valuesAtIndex) == 0 {
if err := bkt.Delete(idx); err != nil {
return err
}
continue
}
if err := bkt.Put(idx, valuesAtIndex); err != nil {
return err
}
}
}
return nil
} | .docker/Prysm/prysm-spike/beacon-chain/db/kv/utils.go | 0.633977 | 0.538255 | utils.go | starcoder |
package lox
func (p *Parser) expression() Expr {
return p.assignment()
}
func (p *Parser) assignment() Expr {
expr := p.equality()
if p.match(TokenTypeEqual) {
equals := p.previous()
value := p.assignment()
exprVar, ok := value.(*ExprVar)
if ok {
return &ExprAssign{exprVar.Name, value}
}
panic(&ParseError{equals, "Invalid assignment target"})
}
return expr
}
func (p *Parser) equality() Expr {
expr := p.comparison()
for p.match(TokenTypeBangEqual, TokenTypeEqualEqual) {
op := p.previous()
right := p.comparison()
expr = &ExprBinary{expr, op, right}
}
return expr
}
func (p *Parser) comparison() Expr {
expr := p.term()
for p.match(TokenTypeGreater, TokenTypeGreaterEqual, TokenTypeLess, TokenTypeLessEqual) {
op := p.previous()
right := p.term()
expr = &ExprBinary{expr, op, right}
}
return expr
}
func (p *Parser) term() Expr {
expr := p.factor()
for p.match(TokenTypeMinus, TokenTypePlus) {
op := p.previous()
right := p.factor()
expr = &ExprBinary{expr, op, right}
}
return expr
}
func (p *Parser) factor() Expr {
expr := p.unary()
for p.match(TokenTypeSlash, TokenTypeStar) {
op := p.previous()
right := p.unary()
expr = &ExprBinary{expr, op, right}
}
return expr
}
func (p *Parser) unary() Expr {
if p.match(TokenTypeBang, TokenTypeMinus) {
op := p.previous()
right := p.unary()
return &ExprUnary{op, right}
}
return p.primary()
}
func (p *Parser) primary() Expr {
switch {
case p.match(TokenTypeFalse):
return &ExprLiteral{false}
case p.match(TokenTypeTrue):
return &ExprLiteral{true}
case p.match(TokenTypeNil):
return &ExprLiteral{nil}
case p.match(TokenTypeNumber, TokenTypeString):
return &ExprLiteral{p.previous().Literal}
case p.match(TokenTypeIdentifier):
return &ExprVar{p.previous()}
case p.match(TokenTypeLeftParen):
expr := p.expression()
p.consume(TokenTypeRightParen, "Expect ')' after expression.")
return &ExprGrouping{expr}
default:
panic(&ParseError{p.peek(), "Expect expression"})
}
} | lox/parserExprs.go | 0.522689 | 0.519338 | parserExprs.go | starcoder |
package labels
import (
"fmt"
"github.com/janelia-flyem/dvid/datatype/imageblk"
"github.com/janelia-flyem/dvid/dvid"
)
// MergeOp represents the merging of a set of labels into a target label.
type MergeOp struct {
Target uint64
Merged Set
}
// MergeTuple represents a merge of labels. Its first element is the destination label
// and all later elements in the slice are labels to be merged. It's an easy JSON
// representation as a list of labels.
type MergeTuple []uint64
// Op converts a MergeTuple into a MergeOp.
func (t MergeTuple) Op() (MergeOp, error) {
var op MergeOp
if t == nil || len(t) == 1 {
return op, fmt.Errorf("invalid merge tuple %v, need at least target and to-merge labels", t)
}
op.Target = t[0]
op.Merged = make(Set, len(t)-1)
for _, label := range t[1:] {
if label == 0 {
return op, fmt.Errorf("invalid merge tuple %v -- cannot contain background label 0", t)
}
op.Merged[label] = struct{}{}
}
return op, nil
}
// DeleteBlock encapsulates data necessary to delete blocks of labels.
type DeleteBlock imageblk.Block
// DeltaNewSize is a new label being introduced.
type DeltaNewSize struct {
Label uint64
Size uint64
}
// DeltaDeleteSize gives info to delete a label's size.
type DeltaDeleteSize struct {
Label uint64
OldSize uint64
OldKnown bool // true if OldSize is valid, otherwise delete all size k/v for this label.
}
// DeltaModSize gives info to modify an existing label size without knowing the old size.
type DeltaModSize struct {
Label uint64
SizeChange int64 // Adds to old label size
}
// DeltaReplaceSize gives info to precisely remove an old label size and add the updated size.
type DeltaReplaceSize struct {
Label uint64
OldSize uint64
NewSize uint64
}
// DeltaMerge describes the labels and blocks affected by a merge operation. It is sent
// during a MergeBlockEvent.
type DeltaMerge struct {
MergeOp
Blocks map[dvid.IZYXString]struct{}
}
// DeltaMergeStart is the data sent during a MergeStartEvent.
type DeltaMergeStart struct {
MergeOp
}
// DeltaMergeEnd is the data sent during a MergeEndEvent.
type DeltaMergeEnd struct {
MergeOp
}
// DeltaSplit describes the voxels modified during a split operation.
// The Split field may be null if this is a coarse split only defined by block indices.
type DeltaSplit struct {
OldLabel uint64
NewLabel uint64
Split dvid.BlockRLEs
SortedBlocks dvid.IZYXSlice
}
// DeltaSplitStart is the data sent during a SplitStartEvent.
type DeltaSplitStart struct {
OldLabel uint64
NewLabel uint64
}
// DeltaSplitEnd is the data sent during a SplitEndEvent.
type DeltaSplitEnd struct {
OldLabel uint64
NewLabel uint64
}
// DeltaSparsevol describes a change to an existing label.
type DeltaSparsevol struct {
Label uint64
Mods dvid.BlockRLEs
}
// Label change event identifiers
const (
IngestBlockEvent = imageblk.IngestBlockEvent
MutateBlockEvent = imageblk.MutateBlockEvent
DeleteBlockEvent = imageblk.DeleteBlockEvent
SparsevolStartEvent = "SPARSEVOL_START"
SparsevolModEvent = "SPARSEVOL_MOD"
SparsevolEndEvent = "SPARSEVOL_END"
ChangeSizeEvent = "LABEL_SIZE_CHANGE"
MergeStartEvent = "MERGE_START"
MergeBlockEvent = "MERGE_BLOCK"
MergeEndEvent = "MERGE_END"
SplitStartEvent = "SPLIT_START"
SplitLabelEvent = "SPLIT_LABEL"
SplitEndEvent = "SPLIT_END"
) | datatype/common/labels/events.go | 0.677047 | 0.558387 | events.go | starcoder |
package rx
import (
"sync"
"sync/atomic"
)
//jig:template Merge<Foo>
//jig:needs Observable<Foo> Merge
// MergeFoo combines multiple Observables into one by merging their emissions.
// An error from any of the observables will terminate the merged observables.
func MergeFoo(observables ...ObservableFoo) ObservableFoo {
if len(observables) == 0 {
return EmptyFoo()
}
return observables[0].MergeWith(observables[1:]...)
}
//jig:template Observable<Foo> MergeWith
// MergeWith combines multiple Observables into one by merging their emissions.
// An error from any of the observables will terminate the merged observables.
func (o ObservableFoo) MergeWith(other ...ObservableFoo) ObservableFoo {
if len(other) == 0 {
return o
}
observable := func(observe FooObserver, subscribeOn Scheduler, subscriber Subscriber) {
var observers struct {
sync.Mutex
done bool
len int
}
observer := func(next foo, err error, done bool) {
observers.Lock()
defer observers.Unlock()
if !observers.done {
switch {
case !done:
observe(next, nil, false)
case err != nil:
observers.done = true
var zero foo
observe(zero, err, true)
default:
if observers.len--; observers.len == 0 {
var zero foo
observe(zero, nil, true)
}
}
}
}
observers.len = 1 + len(other)
o.AutoUnsubscribe()(observer, subscribeOn, subscriber)
for _, o := range other {
if subscriber.Subscribed() {
o.AutoUnsubscribe()(observer, subscribeOn, subscriber)
}
}
}
return observable
}
//jig:template Observable<Foo> MergeMap<Bar>
// MergeMapBar transforms the items emitted by an ObservableFoo by applying a
// function to each item an returning an ObservableBar. The stream of ObservableBar
// items is then merged into a single stream of Bar items using the MergeAll operator.
func (o ObservableFoo) MergeMapBar(project func(foo) ObservableBar) ObservableBar {
return o.MapObservableBar(project).MergeAll()
}
//jig:template Observable<Foo> MergeMapAs<Bar>
// MergeMapAsBar transforms the items emitted by an ObservableFoo by applying a
// function to each item an returning an ObservableBar. The stream of ObservableBar
// items is then merged into a single stream of Bar items using the MergeAll operator.
func (o ObservableFoo) MergeMapAsBar(project func(foo) ObservableBar) ObservableBar {
return o.MapObservableBar(project).MergeAll()
}
//jig:template Observable<Foo> MergeMapTo<Bar>
// MergeMapToBar maps every entry emitted by the ObservableFoo into a single
// ObservableBar. The stream of ObservableBar items is then merged into a
// single stream of Bar items using the MergeAll operator.
func (o ObservableFoo) MergeMapToBar(inner ObservableBar) ObservableBar {
project := func(foo) ObservableBar { return inner }
return o.MapObservableBar(project).MergeAll()
}
//jig:template ObservableObservable<Foo> MergeAll
// MergeAll flattens a higher order observable by merging the observables it emits.
func (o ObservableObservableFoo) MergeAll() ObservableFoo {
observable := func(observe FooObserver, subscribeOn Scheduler, subscriber Subscriber) {
var observers struct {
sync.Mutex
done bool
len int32
}
observer := func(next foo, err error, done bool) {
observers.Lock()
defer observers.Unlock()
if !observers.done {
switch {
case !done:
observe(next, nil, false)
case err != nil:
observers.done = true
var zero foo
observe(zero, err, true)
default:
if atomic.AddInt32(&observers.len, -1) == 0 {
var zero foo
observe(zero, nil, true)
}
}
}
}
merger := func(next ObservableFoo, err error, done bool) {
if !done {
atomic.AddInt32(&observers.len, 1)
next.AutoUnsubscribe()(observer, subscribeOn, subscriber)
} else {
var zero foo
observer(zero, err, true)
}
}
observers.len += 1
o.AutoUnsubscribe()(merger, subscribeOn, subscriber)
}
return observable
}
//jig:template MergeDelayError<Foo>
//jig:needs Observable<Foo> MergeDelayError
// MergeDelayErrorFoo combines multiple Observables into one by merging their emissions.
// Any error will be deferred until all observables terminate.
func MergeDelayErrorFoo(observables ...ObservableFoo) ObservableFoo {
if len(observables) == 0 {
return EmptyFoo()
}
return observables[0].MergeDelayErrorWith(observables[1:]...)
}
//jig:template Observable<Foo> MergeDelayErrorWith
// MergeDelayError combines multiple Observables into one by merging their emissions.
// Any error will be deferred until all observables terminate.
func (o ObservableFoo) MergeDelayErrorWith(other ...ObservableFoo) ObservableFoo {
if len(other) == 0 {
return o
}
observable := func(observe FooObserver, subscribeOn Scheduler, subscriber Subscriber) {
var observers struct {
sync.Mutex
len int
err error
}
observer := func(next foo, err error, done bool) {
observers.Lock()
defer observers.Unlock()
if !done {
observe(next, nil, false)
} else {
if err != nil {
observers.err = err
}
if observers.len--; observers.len == 0 {
var zero foo
observe(zero, observers.err, true)
}
}
}
observers.len = 1 + len(other)
o.AutoUnsubscribe()(observer, subscribeOn, subscriber)
for _, o := range other {
if subscriber.Subscribed() {
o.AutoUnsubscribe()(observer, subscribeOn, subscriber)
}
}
}
return observable
} | generic/merging.go | 0.716913 | 0.438665 | merging.go | starcoder |
package mission
import (
"encoding/json"
"fmt"
"math/big"
"time"
"git.sr.ht/~kisom/proxima/physics"
"git.sr.ht/~kisom/proxima/rat"
)
const (
proximaLY = 4.247
)
var (
Heliopause = physics.AstronomicalUnit(120.0)
MarsDistance = physics.AstronomicalUnit(0.52)
JupiterDistance = physics.AstronomicalUnit(4.20)
SaturnDistance = physics.AstronomicalUnit(8.58)
UranusDistance = physics.AstronomicalUnit(18.20)
NeptuneDistance = physics.AstronomicalUnit(29.05)
PlutoDistance = physics.AstronomicalUnit(38.48)
TerminationShock = physics.AstronomicalUnit(90.0)
fiftyAU = physics.AstronomicalUnit(50.0)
)
var conn *Mission
type Action uint8
func (a Action) String() string {
switch a {
case ActionAccelerate:
return "accelerating"
case ActionCoast:
return "coasting"
case ActionDecelerate:
return "decelerating"
case ActionExplore:
return "exploring"
default:
return "mission error"
}
}
const (
ActionAccelerate Action = iota + 1
ActionCoast
ActionDecelerate
ActionExplore
)
const clockFormat = "2006-01-02 15:04 MST"
var (
// ProximaDistance is the distance to proxima in meters.
ProximaDistance = physics.LightyearsToMeters(proximaLY)
VelocityEscape = rat.UInt64(11180)
VelocityCruise = physics.PercentCToVelocity(0.999)
VelocityExplore = rat.Float(18975.1)
DecelerationTargetDistance *big.Rat
)
func decelerationTargetDistance() {
if DecelerationTargetDistance != nil {
return
}
// How long does it take to transition from cruising velocity to
// exploration velocity?
t := rat.Div(rat.Sub(VelocityExplore, VelocityCruise), physics.NegGee)
// How much distance is covered during the deceleration, assuming we want
// to end up about 5 AU from Proxima Centauri?
dx := physics.AccelerationDistance(physics.AstronomicalUnit(5), VelocityCruise, physics.NegGee, t)
// Compute the point at which we need to start decelerating.
DecelerationTargetDistance = rat.Sub(ProximaDistance, dx)
}
type Mission struct {
state physics.State
clock *physics.Clock
action Action
}
func (m *Mission) Stage() Action {
return m.action
}
func (m *Mission) InFlight() bool {
return m.action != ActionExplore
}
func (m *Mission) distanceFromProximaCentauri() *big.Rat {
return rat.Sub(ProximaDistance, m.state.X)
}
func (m *Mission) String() string {
remaining := m.distanceFromProximaCentauri()
return fmt.Sprintf(`Phase: %s
Ship time: %s
Earth time: %s
Velocity: %s km/s (%0.3fc)
Traveled: %s
Remaining: %s
`,
m.action,
m.clock.Observer.Now().Format(clockFormat),
m.clock.Relative.Now().Format(clockFormat),
rat.Div(m.state.V, rat.K).FloatString(1),
physics.VelocityToPercentC(m.state.V),
physics.DistanceString(m.state.X),
physics.DistanceString(remaining),
)
}
func (m *Mission) Lines() []string {
remaining := m.distanceFromProximaCentauri()
lines := make([]string, 6)
lines[0] = "Phase: " + m.action.String()
lines[1] = fmt.Sprintf(" Ship time: %s", m.clock.Observer.Now().Format(clockFormat))
lines[2] = fmt.Sprintf("Earth time: %s", m.clock.Relative.Now().Format(clockFormat))
lines[3] = fmt.Sprintf(" Velocity: %s km/s (%0.3fc)",
rat.Div(m.state.V, rat.K).FloatString(1),
physics.VelocityToPercentC(m.state.V))
lines[4] = fmt.Sprintf(" Traveled: %s", physics.DistanceString(m.state.X))
lines[5] = fmt.Sprintf("Remaining: %s", physics.DistanceString(remaining))
return lines
}
func Initialize() *Mission {
decelerationTargetDistance()
conn = &Mission{
state: physics.State{
X: rat.Zero,
V: VelocityEscape,
},
clock: physics.Now(),
action: ActionAccelerate,
}
return conn
}
func (m *Mission) Plan(d time.Duration) {
seconds := d.Seconds()
switch m.action {
case ActionAccelerate:
m.state = m.state.Accelerate(physics.Gee, seconds)
if m.state.V.Cmp(VelocityCruise) >= 0 {
m.action = ActionCoast
}
case ActionCoast:
m.state = m.state.Coast(seconds)
if m.state.X.Cmp(DecelerationTargetDistance) >= 0 {
m.action = ActionDecelerate
}
case ActionDecelerate:
m.state = m.state.Accelerate(physics.NegGee, seconds)
if m.state.V.Cmp(VelocityExplore) <= 0 {
m.action = ActionExplore
}
case ActionExplore:
// nothing to do
}
m.clock.Update(seconds, m.state.V)
}
func (m *Mission) DrawInterval() time.Duration {
switch m.action {
case ActionAccelerate:
if m.state.X.Cmp(JupiterDistance) < 0 {
return time.Second
}
if m.state.X.Cmp(Heliopause) < 0 {
return time.Minute
}
case ActionDecelerate:
remaining := rat.Sub(ProximaDistance, m.state.X)
if remaining.Cmp(fiftyAU) < 0 {
return time.Minute
}
if remaining.Cmp(JupiterDistance) < 0 {
return time.Second
}
}
return time.Hour
}
// SyncClock syncs the clock to the system time.
func (m *Mission) SyncClock() {
m.clock.Sync()
}
func (m *Mission) MarshalJSON() ([]byte, error) {
v := map[string]interface{}{}
v["action"] = m.action.String()
v["state"] = map[string]string{
"x": m.state.X.FloatString(0),
"v": m.state.V.FloatString(0),
}
v["clock"] = map[string]interface{}{
"launched": m.clock.Launch.Format(clockFormat),
"observer": m.clock.Observer.Now().Format(clockFormat),
"observer_et": m.clock.Observer.Now().Sub(m.clock.Launch).Seconds(),
"relative": m.clock.Relative.Now().Format(clockFormat),
"relative_et": m.clock.Observer.Now().Sub(m.clock.Launch).Seconds(),
}
return json.Marshal(v)
}
// Distance returns the mission's current distance.
func (m *Mission) Distance() *big.Rat {
x := rat.Rat()
x.Set(m.state.X)
return x
}
func (m *Mission) Drift() time.Duration {
return m.clock.Drift()
}
func (m *Mission) Elapsed() time.Duration {
return m.clock.Observer.Since(m.clock.Launch)
} | mission/mission.go | 0.750553 | 0.508422 | mission.go | starcoder |
package patgen
import (
"math/rand"
"github.com/emer/emergent/erand"
"github.com/emer/etable/etensor"
)
// PermutedBinary sets the given tensor to contain nOn onVal values and the
// remainder are offVal values, using a permuted order of tensor elements (i.e.,
// randomly shuffled or permuted).
func PermutedBinary(tsr etensor.Tensor, nOn int, onVal, offVal float64) {
ln := tsr.Len()
if ln == 0 {
return
}
pord := rand.Perm(ln)
for i := 0; i < ln; i++ {
if i < nOn {
tsr.SetFloat1D(pord[i], onVal)
} else {
tsr.SetFloat1D(pord[i], offVal)
}
}
}
// PermutedBinaryRows treats the tensor as a column of rows as in a etable.Table
// and sets each row to contain nOn onVal values and the remainder are offVal values,
// using a permuted order of tensor elements (i.e., randomly shuffled or permuted).
func PermutedBinaryRows(tsr etensor.Tensor, nOn int, onVal, offVal float64) {
rows, cells := tsr.RowCellSize()
if rows == 0 || cells == 0 {
return
}
pord := rand.Perm(cells)
for rw := 0; rw < rows; rw++ {
stidx := rw * cells
for i := 0; i < cells; i++ {
if i < nOn {
tsr.SetFloat1D(stidx+pord[i], onVal)
} else {
tsr.SetFloat1D(stidx+pord[i], offVal)
}
}
erand.PermuteInts(pord)
}
}
/*
bool taDataGen::PermutedBinary_MinDist(DataTable* data, const String& col_nm, int n_on,
float dist, taMath::DistMetric metric,
bool norm, float tol, int thr_no)
{
if(!data) return false;
if(col_nm.empty()) {
bool rval = true;
for(int pn = 0;pn<data->data.size;pn++) {
DataCol* da = data->data.FastEl(pn);
if(da->is_matrix && da->valType() == VT_FLOAT) {
if(!PermutedBinary_MinDist(data, da->name, n_on, dist, metric, norm, tol, thr_no))
rval = false;
}
}
return rval;
}
DataCol* da = GetFloatMatrixDataCol(data, col_nm);
if(!da) return false;
bool larger_further = taMath::dist_larger_further(metric);
int bogus_count = 0;
data->DataUpdate(true);
for(int i =0;i<da->rows();i++) {
float_Matrix* mat = (float_Matrix*)da->GetValAsMatrix(i);
taBase::Ref(mat);
int cnt = 100 + (10 * (i + 1)); // 100 plus 10 more for every new stim
bool ok = false;
float min_d;
do {
PermutedBinaryMat(mat, n_on, 1.0f, 0.0f, thr_no);
min_d = LastMinDist(da, i, metric, norm, tol);
cnt--;
if(larger_further)
ok = (min_d >= dist);
else
ok = (min_d <= dist);
} while(!ok && (cnt > 0));
taBase::unRefDone(mat);
if(cnt == 0) {
taMisc::Warning("*** PermutedBinary_MinDist row:", String(i), "dist of:", (String)min_d,
"under dist limit:", (String)dist);
bogus_count++;
}
if(bogus_count > 5) {
taMisc::Warning("PermutedBinary_MinDist Giving up after 5 stimuli under the limit, set limits lower");
data->DataUpdate(false);
return false;
}
}
data->DataUpdate(false);
return true;
}
*/ | patgen/permuted.go | 0.564819 | 0.485234 | permuted.go | starcoder |
package intervals
import (
"math"
)
const (
defaultMinLow = 0
defaultMaxHigh = math.MaxInt64
defaultLowInclusive = true
defaultHighInclusive = true
defaultSelfAdjustMinLow = false
defaultSelfAdjustMaxHigh = true
)
// Intervals is an interface to handle Interval structures discovering the existence of gaps or overlays
type Intervals interface {
// Add creates an interval from the input parameters and call AddInterval
Add(low, high int, obj interface{}) error
// AddInterval appends a new interval to the list. If the interval range (low, high) is invalid, it returns an error
AddInterval(itvl *Interval) error
// HasGaps returns true if exists gaps for the introduced intervals between MinLow and MaxHigh
HasGaps() bool
// Get returns the interval list
GetIntervals() []*Interval
// Gaps returns the interval gaps
Gaps() []*Interval
// Merge fussion together overlapped intervals
Merge() []*Interval
// HasOverlapped returns true if exists overlapping for the introduced intervals
HasOverlapped() bool
// Overlapped returns the overlapped intervals
Overlapped() []*Interval
// FindIntervalsForValue returns all the intervals which contains the passed value
FindIntervalsForValue(value int) []*Interval
// IsLowInclusive indicates if the Low part of the interval is included, e. g. (3,5) --> the 3 is included as part of the interval
IsLowInclusive() bool
// IsHighInclusive indicates if the High part of the interval is included, e. g. (3,5) --> the 5 is included as part of the interval
IsHighInclusive() bool
// GetMinLow returns the minimal Low, either the one configured in the constructor, or the self-adjusted calculated if SelfAdjustMinLow=true
GetMinLow() int
// GetMaxHigh returns the maximal High, either the one configured in the constructor, or the self-adjusted calculated if SelfAdjustMaxHigh=true
GetMaxHigh() int
}
// intervals implements Intervals interface
type intervals struct {
Intervals []*Interval
GapsList []*Interval
OverlappedList []*Interval
MergeList []*Interval
MinLow int
MaxHigh int
Sorted bool
LowInclusive bool
HighInclusive bool
SelfAdjustMinLow bool // set the minLow to the minimal Low value passed in Add or AddInterval methods
SelfAdjustMaxHigh bool // set the maxHigh to the maximal High value passed in Add or AddInterval methods
}
// String implements Stringer.Interface Interval
func (itvls *intervals) String() string {
return itvls.report()
}
// NewIntervalsDefault is a constructor that returns an instance of the Intervals interface with default values
func NewIntervalsDefault() Intervals {
return New(defaultMinLow, defaultMaxHigh, defaultLowInclusive, defaultHighInclusive, defaultSelfAdjustMinLow, defaultSelfAdjustMaxHigh)
}
// New is a constructor that returns an instance of the Intervals interface
func New(minLow, maxHigh int, lowInclusive, highInclusive, selfAdjustMinLow, selfAdjustMaxHigh bool) Intervals {
return &intervals{
MinLow: minLow,
MaxHigh: maxHigh,
Intervals: []*Interval{},
Sorted: false,
LowInclusive: lowInclusive,
HighInclusive: highInclusive,
SelfAdjustMinLow: selfAdjustMinLow,
SelfAdjustMaxHigh: selfAdjustMaxHigh,
}
}
func (itvls *intervals) IsLowInclusive() bool {
return itvls.LowInclusive
}
func (itvls *intervals) IsHighInclusive() bool {
return itvls.HighInclusive
}
func (itvls *intervals) GetMinLow() int {
return itvls.MinLow
}
func (itvls *intervals) GetMaxHigh() int {
return itvls.MaxHigh
} | intervals.go | 0.814459 | 0.427158 | intervals.go | starcoder |
package wparams
// ParamStorer is a type that stores safe and unsafe parameters. Keys should be unique across both SafeParams and
// UnsafeParams (that is, if a key occurs in one map, it should not occur in the other).
type ParamStorer interface {
SafeParams() map[string]interface{}
UnsafeParams() map[string]interface{}
}
// NewParamStorer returns a new ParamStorer that stores all of the params in the provided ParamStorer inputs. The params
// are added from the param storers in the order in which they are provided, and for each individual param storer all of
// the safe params are added before the unsafe params while maintaining key uniqueness across both safe and unsafe
// parameters. This means that, if the same parameter is provided by multiple ParamStorer inputs, the returned
// ParamStorer will have the key (including safe/unsafe type) and value as provided by the last ParamStorer (for
// example, if an unsafe key/value pair is provided by one ParamStorer and a later ParamStorer specifies a safe
// key/value pair with the same key, the returned ParamStorer will store the last safe key/value pair).
func NewParamStorer(paramStorers ...ParamStorer) ParamStorer {
safeParams := make(map[string]interface{})
unsafeParams := make(map[string]interface{})
for _, storer := range paramStorers {
if storer == nil {
continue
}
for k, v := range storer.SafeParams() {
safeParams[k] = v
delete(unsafeParams, k)
}
for k, v := range storer.UnsafeParams() {
unsafeParams[k] = v
delete(safeParams, k)
}
}
return NewSafeAndUnsafeParamStorer(safeParams, unsafeParams)
}
// NewSafeParamStorer returns a new ParamStorer that stores the provided parameters as SafeParams.
func NewSafeParamStorer(safeParams map[string]interface{}) ParamStorer {
return NewSafeAndUnsafeParamStorer(safeParams, nil)
}
// NewUnsafeParamStorer returns a new ParamStorer that stores the provided parameters as UnsafeParams.
func NewUnsafeParamStorer(unsafeParams map[string]interface{}) ParamStorer {
return NewSafeAndUnsafeParamStorer(nil, unsafeParams)
}
// NewSafeAndUnsafeParamStorer returns a new ParamStorer that stores the provided safe parameters as SafeParams and the
// unsafe parameters as UnsafeParams. If the safeParams and unsafeParams have any keys in common, the key/value pairs in
// the unsafeParams will be used (the conflicting key/value pairs provided by safeParams will be ignored).
func NewSafeAndUnsafeParamStorer(safeParams, unsafeParams map[string]interface{}) ParamStorer {
storer := &mapParamStorer{
safeParams: make(map[string]interface{}),
unsafeParams: make(map[string]interface{}),
}
for k, v := range safeParams {
storer.safeParams[k] = v
}
for k, v := range unsafeParams {
storer.unsafeParams[k] = v
delete(storer.safeParams, k)
}
return storer
}
type mapParamStorer struct {
safeParams map[string]interface{}
unsafeParams map[string]interface{}
}
func (m *mapParamStorer) SafeParams() map[string]interface{} {
return m.safeParams
}
func (m *mapParamStorer) UnsafeParams() map[string]interface{} {
return m.unsafeParams
} | vendor/github.com/palantir/witchcraft-go-params/paramstorer.go | 0.657758 | 0.514034 | paramstorer.go | starcoder |
package common
import (
"math"
"math/rand"
"gorgonia.org/tensor"
)
func AddVector(dense, vec *tensor.Dense) {
imax, _ := dense.Info().Shape().DimSize(0)
jmax, _ := dense.Info().Shape().DimSize(1)
for i := 0; i < imax; i++ {
for j := 0; j < jmax; j++ {
aij, _ := dense.At(i, j)
vj, _ := vec.At(j)
dense.SetAt(aij.(float64)+vj.(float64), i, j)
}
}
}
func Softmax(a *tensor.Dense) *tensor.Dense {
imax, _ := a.Info().Shape().DimSize(0)
jmax, _ := a.Info().Shape().DimSize(1)
ret := a.Clone().(*tensor.Dense)
amax, _ := a.Max(1)
for i := 0; i < imax; i++ {
s := 0.0
for j := 0; j < jmax; j++ {
aij, _ := ret.At(i, j)
mi, _ := amax.At(i)
v := math.Exp(aij.(float64) - mi.(float64))
s += v
ret.SetAt(v, i, j)
}
for j := 0; j < jmax; j++ {
aij, _ := ret.At(i, j)
ret.SetAt(aij.(float64)/s, i, j)
}
}
return ret
}
func CrossEntropyError(y, t *tensor.Dense) float64 {
delta := 1e-7
yShape := y.Info().Shape()
batchSize, _ := yShape.DimSize(0)
cy, _ := yShape.DimSize(1)
tShape := t.Info().Shape()
rt, _ := tShape.DimSize(0)
ct, _ := tShape.DimSize(1)
if batchSize == rt && cy == ct {
labels := []float64{}
argmaxs, _ := t.Argmax(1)
for i := 0; i < batchSize; i++ {
a, _ := argmaxs.At(i)
f := float64(a.(int))
labels = append(labels, f)
}
t = tensor.New(tensor.WithShape(batchSize, 1), tensor.WithBacking(labels))
}
sum := 0.0
for i := 0; i < batchSize; i++ {
j, _ := t.At(i, 0)
yij, _ := y.At(i, int(j.(float64)))
sum += math.Log(yij.(float64) + delta)
}
return -sum / float64(batchSize)
}
func NumericalGradient(f func(*tensor.Dense) float64, x *tensor.Dense) *tensor.Dense {
h := 1e-4
grad := x.Clone().(*tensor.Dense)
it := x.Iterator()
for !it.Done() {
i, _ := it.Next()
tmpVal := x.Get(i).(float64)
x.Set(i, tmpVal+h)
fxh1 := f(x)
x.Set(i, tmpVal-h)
fxh2 := f(x)
grad.Set(i, (fxh1-fxh2)/(2*h))
x.Set(i, tmpVal)
}
return grad
}
func gaussian() float64 {
// https://en.wikipedia.org/wiki/Normal_distribution
x := rand.Float64()
y := rand.Float64()
return math.Sqrt(-1*math.Log(x)) * math.Cos(2*math.Pi*y)
}
func RandomDense(dims ...int) *tensor.Dense {
total := 1
for _, i := range dims {
total *= i
}
data := []float64{}
for i := 0; i < total; i++ {
//data = append(data, float64(total)*rand.Float64())
data = append(data, gaussian())
//data = append(data, 1.0) // debug
}
return tensor.New(tensor.WithShape(dims...), tensor.WithBacking(data))
}
func Byte2Float64(bs []byte) []float64 {
ret := []float64{}
for _, b := range bs {
ret = append(ret, float64(b))
}
return ret
}
func Byte2Float64Mat(data [][]byte) [][]float64 {
ret := [][]float64{}
for _, bs := range data {
row := []float64{}
for _, b := range bs {
row = append(row, float64(b))
}
ret = append(ret, row)
}
return ret
} | common/common.go | 0.611034 | 0.459986 | common.go | starcoder |
package main
import "math"
// A camera represents a view into a world.
type Camera struct {
// The width of the camera's view in pixels.
Width int
// The height of the camera's view in pixels.
Height int
// The angular width of what the camera sees specified in radians.
FieldOfView float64
// A matrix indicating how the *world* is oriented with respect to the
// camera.
Transform Matrix
// Half the width of the view in world-space units.
halfWidth float64
// Half the height of the view in world-space units.
halfHeight float64
// The size of a pixel in world-space units.
pixelSize float64
}
func MakeCamera(width, height int, fov float64) Camera {
camera := Camera{
Width: width,
Height: height,
FieldOfView: fov,
Transform: IdentityMatrix4,
}
camera.computeCameraPixelSize()
return camera
}
func (c Camera) MakeRayForPixel(x, y int) Ray {
// Compute offsets from the edge of the canvas to the pixel's center.
offsetX := (float64(x) + 0.5) * c.pixelSize
offsetY := (float64(y) + 0.5) * c.pixelSize
// The untransformed coordinates of the pixel in world-space.
worldX := c.halfWidth - offsetX
worldY := c.halfHeight - offsetY
// Transform the canvas point and origin, remembering that the canvas is at
// z = -1.
inverseCameraTransform := c.Transform.Inverted()
pixel := inverseCameraTransform.TupleMultiply(MakePoint(worldX, worldY, -1))
origin := inverseCameraTransform.TupleMultiply(MakePoint(0, 0, 0))
direction := pixel.Subtract(origin).Normalized()
return MakeRay(origin, direction)
}
// Compute the size of a pixel in world-space units for a camera with the given
// width and height. The camera assumes that the canvas is exactly one world-
// space unit away from the camera.
func (c *Camera) computeCameraPixelSize() {
// Compute the size of half the view in world-space units. This can
// represent either half the view's height or half the view's width
// depending on the aspect ratio of the camera. Think of
halfView := math.Tan(c.FieldOfView / 2)
aspectRatio := float64(c.Width) / float64(c.Height)
if aspectRatio >= 1 {
// Horizontal canvas
c.halfWidth = halfView
c.halfHeight = halfView / aspectRatio
} else {
// Vertical canvas
c.halfWidth = halfView * aspectRatio
c.halfHeight = halfView
}
c.pixelSize = c.halfWidth * 2 / float64(c.Width)
} | camera.go | 0.917108 | 0.744958 | camera.go | starcoder |
package rj32
import "fmt"
func reg(r int) string {
switch r {
case 0:
return "ra"
case 15:
return "sp"
case 14:
return "gp"
default:
if r < 4 {
return fmt.Sprintf("a%d", r-1)
}
if r < 7 {
return fmt.Sprintf("s%d", r-4)
}
return fmt.Sprintf("t%d", r-8)
}
}
// String returns the disassembled instruction as a string
func (ir Inst) String() string {
switch ir.Fmt() {
case FmtRR:
return fmt.Sprintf("%-5s %s, %s", ir.Op(), reg(ir.Rd()), reg(ir.Rs()))
case FmtI11:
return fmt.Sprintf("%-5s %d", ir.Op(), signExtend(ir.Imm(), 13))
case FmtI12:
return fmt.Sprintf("%-5s %d", ir.Op(), signExtend(ir.Imm(), 13))
case FmtRI6, FmtRI8:
return fmt.Sprintf("%-5s %s, %d", ir.Op(), reg(ir.Rd()), signExtend(ir.Imm(), 13))
case FmtLS:
if ir.Op() == Load || ir.Op() == Loadb {
return fmt.Sprintf("%-5s %s, [%s, %d]", ir.Op(), reg(ir.Rd()), reg(ir.Rs()), ir.Imm())
}
return fmt.Sprintf("%-5s [%s, %d], %s", ir.Op(), reg(ir.Rs()), ir.Imm(), reg(ir.Rd()))
default:
panic("not impl")
}
}
// PreTrace returns a pre-execution debug string
func (ir Inst) PreTrace(cpu *CPU) string {
switch ir.Fmt() {
case FmtRR:
return fmt.Sprintf("%s:%d %s:%d", reg(ir.Rd()), cpu.Reg[ir.Rd()], reg(ir.Rs()), cpu.Reg[ir.Rs()])
case FmtI11:
return fmt.Sprintf("pc:%04x rsval:%d", cpu.PC, cpu.rsval(ir))
case FmtI12:
return fmt.Sprintf("rsval:%d", cpu.rsval(ir))
case FmtRI6, FmtRI8:
return fmt.Sprintf("%s:%d rsval:%d", reg(ir.Rd()), cpu.Reg[ir.Rd()], cpu.rsval(ir))
case FmtLS:
return fmt.Sprintf("%s:%d off:%d", reg(ir.Rs()), cpu.Reg[ir.Rs()], cpu.off(ir.Imm()))
default:
panic("not impl")
}
}
// PostTrace returns a post execution debug string
func (ir Inst) PostTrace(cpu *CPU) string {
switch ir.Fmt() {
case FmtRR, FmtRI6, FmtRI8:
if ir.Op() >= IfEq {
return fmt.Sprintf(" skip <- %v", cpu.Skip)
}
return fmt.Sprintf(" r%d <- %d", ir.Rd(), cpu.Reg[ir.Rd()])
case FmtI11:
return fmt.Sprintf(" pc <- %04x", cpu.PC)
case FmtI12:
return fmt.Sprintf(" imm <- %d", cpu.Imm)
case FmtLS:
if ir.Op() == Load || ir.Op() == Loadb {
return fmt.Sprintf(" r%d <- %d", ir.Rd(), cpu.Reg[ir.Rd()])
}
return fmt.Sprintf(" mem <- %d", cpu.rsval(ir))
default:
panic("not impl")
}
} | emurj/rj32/trace.go | 0.509276 | 0.469095 | trace.go | starcoder |
package mathlib
import (
"errors"
"math"
)
// PI Returns the constant value of pi
func PI() float64 {
return 3.14159265358979
}
// SqrtPI Returns the square root of a supplied number multiplied by pi
// Returns the square root of (number * pi).
func SqrtPI(number float64) (float64, error) {
// Validate Number - Common Errors
if number < 0 {
return 0.0, errors.New("#NUM! - Occurred because the supplied number argument is < 0")
}
if math.IsNaN(number) {
return 0.0, errors.New("#VALUE! - Occurred because the supplied number argument is non-numeric")
}
return math.Sqrt(number * PI()), nil
}
// Degree Converts radians into degrees.
func Degree(radians float64) (float64, error) {
if math.IsNaN(radians) {
return 0.0, errors.New("#VALUE! - Occurred because the supplied number argument is non-numeric")
}
return radians * (180 / PI()), nil
}
// Radians Converts Degrees to Radians
func Radians(degree float64) (float64, error) {
if math.IsNaN(degree) {
return 0.0, errors.New("#VALUE! - Occurred because the supplied number argument is non-numeric")
}
return degree * (PI() / 180), nil
}
// Cos returns the Cosine of a given angle
func Cos(number float64) (float64, error) {
if math.IsNaN(number) {
return 0.0, errors.New("#VALUE! - Occurred because the supplied number argument is non-numeric")
}
// Math returns the cosine of the radian argument x.
return math.Cos(number), nil
}
//Acos returns the Arccosine of a number
func Acos(number float64) (float64, error) {
// Validate Number - Common Errors
if number < -1 || number > 1 {
return 0.0, errors.New("#NUM! - Occurred because the supplied number argument is outside of the range -1 to +1")
}
if math.IsNaN(number) {
return 0.0, errors.New("#VALUE! - Occurred because the supplied number argument is non-numeric")
}
return math.Acos(number), nil
}
//CosH returns the hyperbolic cosine of x
func CosH(number float64) (float64, error) {
if math.IsNaN(number) {
return 0.0, errors.New("#VALUE! - Occurred because the supplied number argument is non-numeric")
}
return math.Cosh(number), nil
}
//AcosH returns the inverse hyperbolic cosine of x
func AcosH(number float64) (float64, error) {
if number < 1 {
return 0.0, errors.New("#NUM! - Occurred because the supplied number argument is less than 1")
}
if math.IsNaN(number) {
return 0.0, errors.New("#VALUE! - Occurred because the supplied number argument is non-numeric")
}
return math.Acosh(number), nil
}
//Sec returns the secant of an angle
func Sec(number float64) (float64, error) {
if number < -134217728 || number > 134217728 {
return 0.0, errors.New("#NUM! - Occurred because the supplied number argument is less than -2^27 or is greater than 2^27")
}
if math.IsNaN(number) {
return 0.0, errors.New("#VALUE! - Occurred because the supplied number argument is non-numeric")
}
return (1 / math.Cos(number)), nil
}
//SecH returns the hyperbolic secant of an angle
func SecH(number float64) (float64, error) {
if number < -134217728 || number > 134217728 {
return 0.0, errors.New("#NUM! - Occurred because the supplied number argument is less than -2^27 or is greater than 2^27")
}
if math.IsNaN(number) {
return 0.0, errors.New("#VALUE! - Occurred because the supplied number argument is non-numeric")
}
return (1 / math.Cosh(number)), nil
}
// Sin returns the Sine of a given angle
func Sin(number float64) (float64, error) {
if math.IsNaN(number) {
return 0.0, errors.New("#VALUE! - Occurred because the supplied number argument is non-numeric")
}
// Math returns the sine of the radian argument x.
return math.Sin(number), nil
}
// Asin returns the arcsine, in radians, of x
func Asin(number float64) (float64, error) {
// Validate Number - Common Errors
if number < -1 || number > 1 {
return 0.0, errors.New("#NUM! - Occurred because the supplied number argument is outside of the range -1 to +1")
}
if math.IsNaN(number) {
return 0.0, errors.New("#VALUE! - Occurred because the supplied number argument is non-numeric")
}
return math.Asin(number), nil
}
// SinH returns the Cosine of a given angle
func SinH(number float64) (float64, error) {
if math.IsNaN(number) {
return 0.0, errors.New("#VALUE! - Occurred because the supplied number argument is non-numeric")
}
// Sinh returns the hyperbolic sine of x.
return math.Sinh(number), nil
}
// AsinH function returns the inverse hyperbolic sine. Therefore: ASINH( SINH( z ) ) = z
func AsinH(number float64) (float64, error) {
if math.IsNaN(number) {
return 0.0, errors.New("#VALUE! - Occurred because the supplied number argument is non-numeric")
}
return math.Asinh(number), nil
}
// Csc returns the cosecant of an angle
func Csc(number float64) (float64, error) {
if number == 0 {
return 0.0, errors.New("#DIV/0! - Occurred because the supplied number argument is equal to zero")
}
if number < -134217728 || number > 134217728 {
return 0.0, errors.New("#NUM! - Occurred because the supplied number argument is less than -2^27 or is greater than 2^27")
}
if math.IsNaN(number) {
return 0.0, errors.New("#VALUE! - Occurred because the supplied number argument is non-numeric")
}
return 1 / math.Sin(number), nil
}
//CscH Returns the hyperbolic cosecant of an angle
func CscH(number float64) (float64, error) {
if number == 0 {
return 0.0, errors.New("#DIV/0! - Occurred because the supplied number argument is equal to zero")
}
if number < -134217728 || number > 134217728 {
return 0.0, errors.New("#NUM! - Occurred because the supplied number argument is less than -2^27 or is greater than 2^27")
}
if math.IsNaN(number) {
return 0.0, errors.New("#VALUE! - Occurred because the supplied number argument is non-numeric")
}
return 1 / math.Sinh(number), nil
}
// Tan function calculates the tangent of a given angle
func Tan(number float64) (float64, error) {
if math.IsNaN(number) {
return 0.0, errors.New("#VALUE! - Occurred because the supplied number argument is non-numeric")
}
return math.Tan(number), nil
}
// Atan returns the Arctangent of a given number
func Atan(number float64) (float64, error) {
if math.IsNaN(number) {
return 0.0, errors.New("#VALUE! - Occurred because the supplied number argument is non-numeric")
}
return math.Atan(number), nil
}
//Atan2 Returns the Arctangent of a given pair of x and y coordinates
func Atan2(x, y float64) (float64, error) {
if math.IsNaN(x) || math.IsNaN(y) {
return 0.0, errors.New("#VALUE! - Occurred because the supplied number argument is non-numeric")
}
if x == 0 && y == 0 {
return 0.0, errors.New("#DIV/0! - Occurred because the supplied x_num and y_num arguments are both equal to 0")
}
return math.Atan2(x, y), nil
}
//TanH Returns the Hyperbolic Tangent of a given number
func TanH(number float64) (float64, error) {
sinh, err := SinH(number)
if err != nil {
return 0.0, err
}
cosh, err := CosH(number)
if err != nil {
return 0.0, err
}
return sinh / cosh, nil
}
// AtanH function calculates the inverse hyperbolic tangent of a supplied number
func AtanH(number float64) (float64, error) {
// Validate Number - Common Errors
if number <= -1 || number >= 1 {
return 0.0, errors.New("#NUM! - Occurred because the supplied number argument is ≤ -1 or ≥ 1")
}
if math.IsNaN(number) {
return 0.0, errors.New("#VALUE! - Occurred because the supplied number argument is non-numeric")
}
return math.Atanh(number), nil
}
// Cot returns the cotangent of an angle
func Cot(number float64) (float64, error) {
if number == 0 {
return 0.0, errors.New("#DIV/0! - Occurred because the supplied number argument is equal to zero")
}
if number < -134217728 || number > 134217728 {
return 0.0, errors.New("#NUM! - Occurred because the supplied number argument is less than -2^27 or is greater than 2^27")
}
if math.IsNaN(number) {
return 0.0, errors.New("#VALUE! - Occurred because the supplied number argument is non-numeric")
}
return 1 / math.Tan(number), nil
}
//CotH returns the hyperbolic cotangent of an angle
func CotH(number float64) (float64, error) {
if number == 0 {
return 0.0, errors.New("#DIV/0! - Occurred because the supplied number argument is equal to zero")
}
if number < -134217728 || number > 134217728 {
return 0.0, errors.New("#NUM! - Occurred because the supplied number argument is less than -2^27 or is greater than 2^27")
}
if math.IsNaN(number) {
return 0.0, errors.New("#VALUE! - Occurred because the supplied number argument is non-numeric")
}
tanh, err := TanH(number)
if err != nil {
return 0.0, err
}
return (1 / tanh), nil
}
// Acot (Arccotangent) returns the arccotangent of a number
// Inverse cotangent in radians
func Acot(number float64) (float64, error) {
if math.IsNaN(number) {
return 0.0, errors.New("#VALUE! - Occurred because the supplied number argument is non-numeric")
}
number = 1 / number
v, err := Atan(number)
if err != nil {
return 0.0, err
}
return v, nil
}
//AcotH returns the hyperbolic arccotangent of a number
func AcotH(number float64) (float64, error) {
// Validate Number - Common Errors
if number >= -1 && number <= 1 {
return 0.0, errors.New("#NUM! - Occurred because supplied number argument is between -1 and +1 (inclusive)")
}
if math.IsNaN(number) {
return 0.0, errors.New("#VALUE! - Occurred because the supplied number argument is non-numeric")
}
v, err := LN((number + 1) / (number - 1))
if err != nil {
return 0.0, err
}
return (0.5 * v), nil
} | math/trigonometry.go | 0.910356 | 0.555194 | trigonometry.go | starcoder |
package detour
import (
"unsafe"
)
type DtNodeFlags uint8
const (
DT_NODE_OPEN DtNodeFlags = 0x01
DT_NODE_CLOSED DtNodeFlags = 0x02
DT_NODE_PARENT_DETACHED DtNodeFlags = 0x04 // parent of the node is not adjacent. Found using raycast.
)
type DtNodeIndex uint16
const DT_NULL_IDX DtNodeIndex = ^DtNodeIndex(0)
const DT_NODE_PARENT_BITS uint32 = 24
const DT_NODE_STATE_BITS uint32 = 2
const DT_NODE_FLAGS_BITS uint32 = 3
type DtNode struct {
Pos [3]float32 ///< Position of the node.
Cost float32 ///< Cost from previous node to current node.
Total float32 ///< Cost up to the node.
Pidx uint32 ///< Index to parent node.
State uint8 ///< extra state information. A polyRef can have multiple nodes with different extra info. see DT_MAX_STATES_PER_NODE
Flags DtNodeFlags ///< Node flags. A combination of dtNodeFlags.
Id DtPolyRef ///< Polygon ref the node corresponds to.
}
const DT_MAX_STATES_PER_NODE int = 1 << DT_NODE_STATE_BITS // number of extra states per node. See dtNode::state
var sizeofNode = uint32(unsafe.Sizeof(DtNode{}))
type DtNodePool struct {
m_nodes []DtNode
m_first []DtNodeIndex
m_next []DtNodeIndex
m_maxNodes uint32
m_hashSize uint32
m_nodeCount uint32
base uintptr
}
func (this *DtNodePool) GetNodeIdx(node *DtNode) uint32 {
if node == nil {
return 0
}
current := uintptr(unsafe.Pointer(node))
return (uint32)(current-this.base)/sizeofNode + 1
}
func (this *DtNodePool) GetNodeAtIdx(idx uint32) *DtNode {
if idx == 0 {
return nil
}
return &this.m_nodes[idx-1]
}
func (this *DtNodePool) GetMemUsed() uint32 {
return uint32(unsafe.Sizeof(*this)) +
uint32(unsafe.Sizeof(&this.m_nodes[0]))*this.m_maxNodes +
uint32(unsafe.Sizeof(&this.m_next[0]))*this.m_maxNodes +
uint32(unsafe.Sizeof(&this.m_first[0]))*this.m_hashSize
}
func (this *DtNodePool) GetMaxNodes() uint32 { return this.m_maxNodes }
func (this *DtNodePool) GetHashSize() uint32 { return this.m_hashSize }
func (this *DtNodePool) GetFirst(bucket int) DtNodeIndex { return this.m_first[bucket] }
func (this *DtNodePool) GetNext(i int) DtNodeIndex { return this.m_next[i] }
func (this *DtNodePool) GetNodeCount() uint32 { return this.m_nodeCount }
func DtAllocNodePool(maxNodes, hashSize uint32) *DtNodePool {
pool := &DtNodePool{}
pool.constructor(maxNodes, hashSize)
return pool
}
func DtFreeNodePool(pool *DtNodePool) {
if pool == nil {
return
}
pool.destructor()
}
type DtNodeQueue struct {
m_heap []*DtNode
m_capacity int
m_size int
}
func (this *DtNodeQueue) Clear() { this.m_size = 0 }
func (this *DtNodeQueue) Top() *DtNode { return this.m_heap[0] }
func (this *DtNodeQueue) Pop() *DtNode {
result := this.m_heap[0]
this.m_size--
this.trickleDown(0, this.m_heap[this.m_size])
return result
}
func (this *DtNodeQueue) Push(node *DtNode) {
this.m_size++
this.bubbleUp(this.m_size-1, node)
}
func (this *DtNodeQueue) Modify(node *DtNode) {
for i := 0; i < this.m_size; i++ {
if this.m_heap[i] == node {
this.bubbleUp(i, node)
return
}
}
}
func (this *DtNodeQueue) Empty() bool { return this.m_size == 0 }
func (this *DtNodeQueue) GetMemUsed() uint32 {
return uint32(unsafe.Sizeof(*this)) +
uint32(unsafe.Sizeof(&this.m_heap[0]))*uint32(this.m_capacity+1)
}
func (this *DtNodeQueue) GetCapacity() int { return this.m_capacity }
func DtAllocNodeQueue(n int) *DtNodeQueue {
queue := &DtNodeQueue{}
queue.constructor(n)
return queue
}
func DtFreeNodeQueue(queue *DtNodeQueue) {
if queue == nil {
return
}
queue.destructor()
} | server/game/nav/NavNode.go | 0.589007 | 0.492493 | NavNode.go | starcoder |
package generator
import (
"github.com/pkg/errors"
"gopkg.in/yaml.v2"
"io/ioutil"
)
// Inventory acts as a collection of categorized Tokens which can be queried for both randomized
// and parameterized selection.
type Inventory struct {
dictionary map[string][]Token
selectRange map[string]float64
}
// CreateInventory creates a new, empty Inventory.
func CreateInventory() *Inventory {
i := Inventory{
dictionary: make(map[string][]Token),
selectRange: make(map[string]float64),
}
return &i
}
// AddToken creates and adds a new Token to this Inventory. The created token is returned to support
// chaining and to make it interchangeable with Add.
func (i *Inventory) AddToken(id string, content string, rarity float64, tags map[string]string) *Token {
x := BuildToken(id, content, rarity, tags)
return i.Add(x)
}
// Add adds an existing Token to this Inventory. The added token is returned, to help support chaining
// and make it interchangeable with AddToken.
func (i *Inventory) Add(t Token) *Token {
i.dictionary[t.Category] = append(i.dictionary[t.Category], t)
i.selectRange[t.Category] += t.Rarity
return &t
}
// getTokens retrieves tokens that match the supplied Selector.
func (i *Inventory) getTokens(selector *Selector) ([]Token, float64) {
idList, idFound := i.dictionary[selector.Category]
if !idFound {
return []Token{}, 0.0
}
var taggedList []Token
selectRange := 0.0
if selector.IsSimple() {
return idList, i.selectRange[selector.Category]
}
for _, x := range idList {
if selector.MatchesToken(&x) {
taggedList = append(taggedList, x)
selectRange += x.Rarity
}
}
return taggedList, selectRange
}
// Pick selects a random Token from the inventory which matches the given Selector. If no matching
// Tokens are found, then nil is returned.
func (i *Inventory) Pick(selector *Selector, offset float64) *Token {
taggedList, selectRange := i.getTokens(selector)
// Pick the first value that exceeds the offset value
selectValue := offset * selectRange
var lastToken *Token
for len(taggedList) > 0 && selectValue >= 0 {
lastToken = &taggedList[0]
taggedList = taggedList[1:]
selectValue -= lastToken.Rarity
}
return lastToken
}
// Load adds Tokens to the Inventory from a YAML file containing an array of Token definitions.
func (i *Inventory) Load(path string) error {
// Read the file
data, err := ioutil.ReadFile(path)
if err != nil {
return errors.Wrap(err, "Failed to read inventory file.")
}
// Parse the YAML tokens
var tokens []Token
err = yaml.Unmarshal(data, &tokens)
if err != nil {
return errors.Wrap(err, "Failed to parse yaml file")
}
// Add all the tokens
for _, t := range tokens {
t.Normalize()
if t.IsValid() {
i.Add(t)
}
}
return nil
} | generator/inventory.go | 0.689096 | 0.401658 | inventory.go | starcoder |
package set
const SetMapToParamFunctions = `
// Set:MapTo[{{.TypeParameter}}]
{{if .TypeParameter.IsBasic}}
// MapTo{{.TypeParameter.LongName}} transforms {{.TName}}Set to []{{.TypeParameter.Name}}.
func (set {{.TName}}Set) MapTo{{.TypeParameter.LongName}}(fn func({{.PName}}) {{.TypeParameter}}) []{{.TypeParameter.Name}} {
result := make([]{{.TypeParameter}}, 0, len(set))
for v := range set {
u := fn(v)
result = append(result, {{.Addr}}u)
}
return result
}
// FlatMapTo{{.TypeParameter.LongName}} transforms {{.TName}}Set to []{{.TypeParameter}}, by repeatedly
// calling the supplied function and concatenating the results as a single flat slice.
func (set {{.TName}}Set) FlatMapTo{{.TypeParameter.LongName}}(fn func({{.PName}}) []{{.TypeParameter}}) []{{.TypeParameter}} {
result := make([]{{.TypeParameter}}, 0, len(set))
for v := range set {
u := fn(v)
if len(u) > 0 {
result = append(result, u...)
}
}
return result
}
{{else}}
// MapTo{{.TypeParameter.LongName}} transforms {{.TName}}Set to {{.TypeParameter}}Set.
func (set {{.TName}}Set) MapTo{{.TypeParameter.LongName}}(fn func({{.PName}}) {{.TypeParameter}}) {{.TypeParameter.Name}}Collection {
result := make(map[{{.TypeParameter}}]struct{})
for v := range set {
u := fn(v)
result[u] = struct{}{}
}
return {{.TypeParameter.Name}}Set(result)
}
// FlatMapTo{{.TypeParameter.LongName}} transforms {{.TName}}Set to {{.TypeParameter.Name}}Set, by
// calling the supplied function on each of the enclosed set elements, and returning a new set.
func (set {{.TName}}Set) FlatMapTo{{.TypeParameter.LongName}}(fn func({{.PName}}) {{.TypeParameter.Name}}Collection) {{.TypeParameter.Name}}Collection {
result := make(map[{{.TypeParameter}}]struct{})
for a := range set {
b := fn(a)
b.Foreach(func (c {{.TypeParameter}}) {
result[c] = struct{}{}
})
}
return {{.TypeParameter.Name}}Set(result)
}
{{if .TypeParameter.Comparable}}
// GroupBy{{.TypeParameter.LongName}} groups elements into a map keyed by {{.TypeParameter}}.
// This method requires {{.TypeParameter.Name}} be comparable.
func (set {{.TName}}Set) GroupBy{{.TypeParameter.LongName}}(fn func({{.PName}}) {{.TypeParameter}}) map[{{.TypeParameter}}]{{.TName}}Set {
result := make(map[{{.TypeParameter}}]{{.TName}}Set)
for v := range set {
key := fn(v)
group, exists := result[key]
if !exists {
group = New{{.TName}}Set()
}
group[v] = struct{}{}
result[key] = group
}
return result
}
{{end}}
{{end}}
` | internal/set/mapToT.go | 0.762778 | 0.626895 | mapToT.go | starcoder |
package envoy
import (
"fmt"
"net"
"time"
"github.com/openservicemesh/osm/pkg/certificate"
)
// Proxy is a representation of an Envoy proxy connected to the xDS server.
// This should at some point have a 1:1 match to an Endpoint (which is a member of a meshed service).
type Proxy struct {
certificate.CommonName
net.Addr
announcements chan interface{}
// The time this Proxy connected to the OSM control plane
connectedAt time.Time
lastSentVersion map[TypeURI]uint64
lastAppliedVersion map[TypeURI]uint64
lastNonce map[TypeURI]string
// Records metadata around the Kubernetes Pod on which this Envoy Proxy is installed.
// This could be nil if the Envoy is not operating in a Kubernetes cluster (VM for example)
// NOTE: This field may be not be set at the time Proxy struct is initialized. This would
// eventually be set when the metadata arrives via the xDS protocol.
podMetadata *PodMetadata
}
// PodMetadata is a struct holding information on the Pod on which a given Envoy proxy is installed
// This struct is initialized *eventually*, when the metadata arrives via xDS.
type PodMetadata struct {
UID string
Namespace string
IP string
ServiceAccount string
Cluster string
EnvoyNodeID string
}
// HasPodMetadata answers the question - has the Pod metadata been recorded for the given Envoy proxy
func (p *Proxy) HasPodMetadata() bool {
return p.podMetadata != nil
}
func (p *Proxy) SetMetadata(podUID, podNamespace, podIP, podServiceAccountName, envoyNodeID string) {
p.podMetadata = &PodMetadata{
UID: podUID,
Namespace: podNamespace,
IP: podIP,
ServiceAccount: podServiceAccountName,
Cluster: "", // TODO
EnvoyNodeID: envoyNodeID,
}
}
// SetLastAppliedVersion records the version of the given Envoy proxy that was last acknowledged.
func (p *Proxy) SetLastAppliedVersion(typeURI TypeURI, version uint64) {
p.lastAppliedVersion[typeURI] = version
}
// GetLastAppliedVersion returns the last version successfully applied to the given Envoy proxy.
func (p Proxy) GetLastAppliedVersion(typeURI TypeURI) uint64 {
return p.lastAppliedVersion[typeURI]
}
// GetLastSentVersion returns the last sent version.
func (p Proxy) GetLastSentVersion(typeURI TypeURI) uint64 {
return p.lastSentVersion[typeURI]
}
// IncrementLastSentVersion increments last sent version.
func (p *Proxy) IncrementLastSentVersion(typeURI TypeURI) uint64 {
p.lastSentVersion[typeURI]++
return p.GetLastSentVersion(typeURI)
}
// SetLastSentVersion records the version of the given config last sent to the proxy.
func (p *Proxy) SetLastSentVersion(typeURI TypeURI, ver uint64) {
p.lastSentVersion[typeURI] = ver
}
// GetLastSentNonce returns last sent nonce.
func (p *Proxy) GetLastSentNonce(typeURI TypeURI) string {
nonce, ok := p.lastNonce[typeURI]
if !ok {
p.lastNonce[typeURI] = ""
return ""
}
return nonce
}
// SetNewNonce sets and returns a new nonce.
func (p *Proxy) SetNewNonce(typeURI TypeURI) string {
p.lastNonce[typeURI] = fmt.Sprintf("%d", time.Now().UnixNano())
return p.lastNonce[typeURI]
}
// String returns the CommonName of the proxy.
func (p Proxy) String() string {
return string(p.GetCommonName())
}
// GetCommonName returns the Subject Common Name from the mTLS certificate of the Envoy proxy connected to xDS.
func (p Proxy) GetCommonName() certificate.CommonName {
return p.CommonName
}
// GetConnectedAt returns the timestamp of when the given proxy connected to the control plane.
func (p Proxy) GetConnectedAt() time.Time {
return p.connectedAt
}
// GetIP returns the IP address of the Envoy proxy connected to xDS.
func (p Proxy) GetIP() net.Addr {
return p.Addr
}
// GetAnnouncementsChannel returns the announcement channel for the given Envoy proxy.
func (p Proxy) GetAnnouncementsChannel() chan interface{} {
return p.announcements
}
// NewProxy creates a new instance of an Envoy proxy connected to the xDS servers.
func NewProxy(cn certificate.CommonName, ip net.Addr) *Proxy {
return &Proxy{
CommonName: cn,
Addr: ip,
connectedAt: time.Now(),
announcements: make(chan interface{}),
lastNonce: make(map[TypeURI]string),
lastSentVersion: make(map[TypeURI]uint64),
lastAppliedVersion: make(map[TypeURI]uint64),
}
} | pkg/envoy/proxy.go | 0.619126 | 0.406479 | proxy.go | starcoder |
package types
import (
"bytes"
"encoding/hex"
"fmt"
"github.com/noah-blockchain/noah-go-node/hexutil"
"math/big"
"math/rand"
"reflect"
)
const (
HashLength = 32
AddressLength = 20
CoinSymbolLength = 10
)
var (
hashT = reflect.TypeOf(Hash{})
addressT = reflect.TypeOf(Address{})
)
func ReplaceAtIndex(in string, r byte, i int) string {
out := []byte(in)
out[i] = r
return string(out)
}
// Hash represents the 32 byte Keccak256 hash of arbitrary data.
type Hash [HashLength]byte
func BytesToHash(b []byte) Hash {
var h Hash
h.SetBytes(b)
return h
}
func StringToHash(s string) Hash { return BytesToHash([]byte(s)) }
func BigToHash(b *big.Int) Hash { return BytesToHash(b.Bytes()) }
func HexToHash(s string) Hash { return BytesToHash(FromHex(s, "Mh")) }
// Get the string representation of the underlying hash
func (h Hash) Str() string { return string(h[:]) }
func (h Hash) Bytes() []byte { return h[:] }
func (h Hash) Big() *big.Int { return new(big.Int).SetBytes(h[:]) }
func (h Hash) Hex() string { return hexutil.Encode(h[:]) }
// TerminalString implements log.TerminalStringer, formatting a string for console
// output during logging.
func (h Hash) TerminalString() string {
return fmt.Sprintf("%x…%x", h[:3], h[29:])
}
// String implements the stringer interface and is used also by the logger when
// doing full logging into a file.
func (h Hash) String() string {
return h.Hex()
}
// Format implements fmt.Formatter, forcing the byte slice to be formatted as is,
// without going through the stringer interface used for logging.
func (h Hash) Format(s fmt.State, c rune) {
fmt.Fprintf(s, "%"+string(c), h[:])
}
// UnmarshalText parses a hash in hex syntax.
func (h *Hash) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedText("Hash", input, h[:])
}
// UnmarshalJSON parses a hash in hex syntax.
func (h *Hash) UnmarshalJSON(input []byte) error {
return hexutil.UnmarshalFixedJSON(hashT, input, h[:])
}
// MarshalText returns the hex representation of h.
func (h Hash) MarshalText() ([]byte, error) {
return hexutil.Bytes(h[:]).MarshalText()
}
// Sets the hash to the value of b. If b is larger than len(h), 'b' will be cropped (from the left).
func (h *Hash) SetBytes(b []byte) {
if len(b) > len(h) {
b = b[len(b)-HashLength:]
}
copy(h[HashLength-len(b):], b)
}
// Set string `s` to h. If s is larger than len(h) s will be cropped (from left) to fit.
func (h *Hash) SetString(s string) { h.SetBytes([]byte(s)) }
// Sets h to other
func (h *Hash) Set(other Hash) {
for i, v := range other {
h[i] = v
}
}
// Generate implements testing/quick.Generator.
func (h Hash) Generate(rand *rand.Rand, size int) reflect.Value {
m := rand.Intn(len(h))
for i := len(h) - 1; i > m; i-- {
h[i] = byte(rand.Uint32())
}
return reflect.ValueOf(h)
}
func EmptyHash(h Hash) bool {
return h == Hash{}
}
// UnprefixedHash allows marshaling a Hash without 0x prefix.
type UnprefixedHash Hash
// UnmarshalText decodes the hash from hex. The 0x prefix is optional.
func (h *UnprefixedHash) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedUnprefixedText("UnprefixedHash", input, h[:])
}
// MarshalText encodes the hash as hex.
func (h UnprefixedHash) MarshalText() ([]byte, error) {
return []byte(hex.EncodeToString(h[:])), nil
}
/////////// Coin
type CoinSymbol [CoinSymbolLength]byte
func (c CoinSymbol) String() string { return string(bytes.Trim(c[:], "\x00")) }
func (c CoinSymbol) Bytes() []byte { return c[:] }
func (c CoinSymbol) MarshalJSON() ([]byte, error) {
buffer := bytes.NewBufferString("\"")
buffer.WriteString(c.String())
buffer.WriteString("\"")
return buffer.Bytes(), nil
}
func (c *CoinSymbol) UnmarshalJSON(input []byte) error {
*c = StrToCoinSymbol(string(input[1 : len(input)-1]))
return nil
}
func (c CoinSymbol) Compare(c2 CoinSymbol) int {
return bytes.Compare(c.Bytes(), c2.Bytes())
}
func (c CoinSymbol) IsBaseCoin() bool {
return c.Compare(GetBaseCoin()) == 0
}
func StrToCoinSymbol(s string) CoinSymbol {
var symbol CoinSymbol
copy(symbol[:], []byte(s))
return symbol
}
/////////// Address
// Address represents the 20 byte address of an Ethereum account.
type Address [AddressLength]byte
func BytesToAddress(b []byte) Address {
var a Address
a.SetBytes(b)
return a
}
func StringToAddress(s string) Address { return BytesToAddress([]byte(s)) }
func BigToAddress(b *big.Int) Address { return BytesToAddress(b.Bytes()) }
func HexToAddress(s string) Address { return BytesToAddress(FromHex(s, "NOAHx")) }
// IsHexAddress verifies whether a string can represent a valid hex-encoded
// Noah address or not.
func IsHexAddress(s string) bool {
if hasHexPrefix(s, "NOAHx") {
s = s[5:]
}
return len(s) == 5*AddressLength && isHex(s)
}
// Get the string representation of the underlying address
func (a Address) Str() string { return string(a[:]) }
func (a Address) Bytes() []byte { return a[:] }
func (a Address) Big() *big.Int { return new(big.Int).SetBytes(a[:]) }
func (a Address) Hash() Hash { return BytesToHash(a[:]) }
func (a Address) Hex() string {
return "NOAHx" + hex.EncodeToString(a[:])
}
// String implements the stringer interface and is used also by the logger.
func (a Address) String() string {
return a.Hex()
}
// Format implements fmt.Formatter, forcing the byte slice to be formatted as is,
// without going through the stringer interface used for logging.
func (a Address) Format(s fmt.State, c rune) {
_, _ = fmt.Fprintf(s, "%"+string(c), a[:])
}
// Sets the address to the value of b. If b is larger than len(a) it will panic
func (a *Address) SetBytes(b []byte) {
if len(b) > len(a) {
b = b[len(b)-AddressLength:]
}
copy(a[AddressLength-len(b):], b)
}
// Set string `s` to a. If s is larger than len(a) it will panic
func (a *Address) SetString(s string) { a.SetBytes([]byte(s)) }
// Sets a to other
func (a *Address) Set(other Address) {
for i, v := range other {
a[i] = v
}
}
// MarshalText returns the hex representation of a.
func (a Address) MarshalText() ([]byte, error) {
return hexutil.Bytes(a[:]).MarshalText()
}
// UnmarshalText parses a hash in hex syntax.
func (a *Address) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedText("Address", input, a[:])
}
func (a *Address) Unmarshal(input []byte) error {
copy(a[:], input)
return nil
}
func (a Address) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf("\"%s\"", a.String())), nil
}
// UnmarshalJSON parses a hash in hex syntax.
func (a *Address) UnmarshalJSON(input []byte) error {
return hexutil.UnmarshalFixedJSON(addressT, input, a[:])
}
func (a *Address) Compare(a2 Address) int {
return bytes.Compare(a.Bytes(), a2.Bytes())
}
// UnprefixedHash allows marshaling an Address without 0x prefix.
type UnprefixedAddress Address
// UnmarshalText decodes the address from hex. The 0x prefix is optional.
func (a *UnprefixedAddress) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedUnprefixedText("UnprefixedAddress", input, a[:])
}
// MarshalText encodes the address as hex.
func (a UnprefixedAddress) MarshalText() ([]byte, error) {
return []byte(hex.EncodeToString(a[:])), nil
}
type Pubkey []byte
func (p Pubkey) String() string {
return fmt.Sprintf("Np%x", []byte(p))
}
func (p Pubkey) MarshalText() ([]byte, error) {
return []byte(p.String()), nil
}
func (p Pubkey) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf("\"%s\"", p.String())), nil
}
func (p *Pubkey) UnmarshalJSON(input []byte) error {
b, err := hex.DecodeString(string(input)[3 : len(input)-1])
*p = Pubkey(b)
return err
}
func (p Pubkey) Compare(p2 Pubkey) int {
return bytes.Compare(p, p2)
} | core/types/types.go | 0.775605 | 0.404096 | types.go | starcoder |
package scene
import "math"
// The is a copy of fogleman/fauxl/matrix.go
type Matrix struct {
X00, X01, X02, X03 float64
X10, X11, X12, X13 float64
X20, X21, X22, X23 float64
X30, X31, X32, X33 float64
}
func Identity() Matrix {
return Matrix{
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1}
}
func Translate(v Vector) Matrix {
return Matrix{
1, 0, 0, v.X,
0, 1, 0, v.Y,
0, 0, 1, v.Z,
0, 0, 0, 1}
}
func Scale(v Vector) Matrix {
return Matrix{
v.X, 0, 0, 0,
0, v.Y, 0, 0,
0, 0, v.Z, 0,
0, 0, 0, 1}
}
func Rotate(v Vector, a float64) Matrix {
v = v.Normalize()
s := math.Sin(a)
c := math.Cos(a)
m := 1 - c
return Matrix{
m*v.X*v.X + c, m*v.X*v.Y + v.Z*s, m*v.Z*v.X - v.Y*s, 0,
m*v.X*v.Y - v.Z*s, m*v.Y*v.Y + c, m*v.Y*v.Z + v.X*s, 0,
m*v.Z*v.X + v.Y*s, m*v.Y*v.Z - v.X*s, m*v.Z*v.Z + c, 0,
0, 0, 0, 1}
}
func RotateTo(a, b Vector) Matrix {
dot := b.Dot(a)
if dot == 1 {
return Identity()
} else if dot == -1 {
return Rotate(a.Perpendicular(), math.Pi)
} else {
angle := math.Acos(dot)
v := b.Cross(a).Normalize()
return Rotate(v, angle)
}
}
func Orient(position, size, up Vector, rotation float64) Matrix {
m := Rotate(Vector{0, 0, 1}, rotation)
m = m.Scale(size)
m = m.RotateTo(Vector{0, 0, 1}, up)
m = m.Translate(position)
return m
}
func Frustum(l, r, b, t, n, f float64) Matrix {
t1 := 2 * n
t2 := r - l
t3 := t - b
t4 := f - n
return Matrix{
t1 / t2, 0, (r + l) / t2, 0,
0, t1 / t3, (t + b) / t3, 0,
0, 0, (-f - n) / t4, (-t1 * f) / t4,
0, 0, -1, 0}
}
func Orthographic(l, r, b, t, n, f float64) Matrix {
return Matrix{
2 / (r - l), 0, 0, -(r + l) / (r - l),
0, 2 / (t - b), 0, -(t + b) / (t - b),
0, 0, -2 / (f - n), -(f + n) / (f - n),
0, 0, 0, 1}
}
func Perspective(fovy, aspect, near, far float64) Matrix {
ymax := near * math.Tan(fovy*math.Pi/360)
xmax := ymax * aspect
return Frustum(-xmax, xmax, -ymax, ymax, near, far)
}
func LookAt(eye, center, up Vector) Matrix {
z := eye.Sub(center).Normalize()
x := up.Cross(z).Normalize()
y := z.Cross(x)
return Matrix{
x.X, x.Y, x.Z, -x.Dot(eye),
y.X, y.Y, y.Z, -y.Dot(eye),
z.X, z.Y, z.Z, -z.Dot(eye),
0, 0, 0, 1,
}
}
func LookAtDirection(forward, up Vector) Matrix {
z := forward.Normalize()
x := up.Cross(z).Normalize()
y := z.Cross(x)
return Matrix{
x.X, x.Y, x.Z, 0,
y.X, y.Y, y.Z, 0,
z.X, z.Y, z.Z, 0,
0, 0, 0, 1,
}
}
func Screen(w, h int) Matrix {
w2 := float64(w) / 2
h2 := float64(h) / 2
return Matrix{
w2, 0, 0, w2,
0, -h2, 0, h2,
0, 0, 0.5, 0.5,
0, 0, 0, 1,
}
}
func Viewport(x, y, w, h float64) Matrix {
l := x
b := y
r := x + w
t := y + h
return Matrix{
(r - l) / 2, 0, 0, (r + l) / 2,
0, (t - b) / 2, 0, (t + b) / 2,
0, 0, 0.5, 0.5,
0, 0, 0, 1,
}
}
func (m Matrix) Translate(v Vector) Matrix {
return Translate(v).Mul(m)
}
func (m Matrix) Scale(v Vector) Matrix {
return Scale(v).Mul(m)
}
func (m Matrix) Rotate(v Vector, a float64) Matrix {
return Rotate(v, a).Mul(m)
}
func (m Matrix) RotateTo(a, b Vector) Matrix {
return RotateTo(a, b).Mul(m)
}
func (m Matrix) Frustum(l, r, b, t, n, f float64) Matrix {
return Frustum(l, r, b, t, n, f).Mul(m)
}
func (m Matrix) Orthographic(l, r, b, t, n, f float64) Matrix {
return Orthographic(l, r, b, t, n, f).Mul(m)
}
func (m Matrix) Perspective(fovy, aspect, near, far float64) Matrix {
return Perspective(fovy, aspect, near, far).Mul(m)
}
func (m Matrix) LookAt(eye, center, up Vector) Matrix {
return LookAt(eye, center, up).Mul(m)
}
func (m Matrix) Viewport(x, y, w, h float64) Matrix {
return Viewport(x, y, w, h).Mul(m)
}
func (a Matrix) MulScalar(b float64) Matrix {
return Matrix{
a.X00 * b, a.X01 * b, a.X02 * b, a.X03 * b,
a.X10 * b, a.X11 * b, a.X12 * b, a.X13 * b,
a.X20 * b, a.X21 * b, a.X22 * b, a.X23 * b,
a.X30 * b, a.X31 * b, a.X32 * b, a.X33 * b,
}
}
func (a Matrix) Mul(b Matrix) Matrix {
m := Matrix{}
m.X00 = a.X00*b.X00 + a.X01*b.X10 + a.X02*b.X20 + a.X03*b.X30
m.X10 = a.X10*b.X00 + a.X11*b.X10 + a.X12*b.X20 + a.X13*b.X30
m.X20 = a.X20*b.X00 + a.X21*b.X10 + a.X22*b.X20 + a.X23*b.X30
m.X30 = a.X30*b.X00 + a.X31*b.X10 + a.X32*b.X20 + a.X33*b.X30
m.X01 = a.X00*b.X01 + a.X01*b.X11 + a.X02*b.X21 + a.X03*b.X31
m.X11 = a.X10*b.X01 + a.X11*b.X11 + a.X12*b.X21 + a.X13*b.X31
m.X21 = a.X20*b.X01 + a.X21*b.X11 + a.X22*b.X21 + a.X23*b.X31
m.X31 = a.X30*b.X01 + a.X31*b.X11 + a.X32*b.X21 + a.X33*b.X31
m.X02 = a.X00*b.X02 + a.X01*b.X12 + a.X02*b.X22 + a.X03*b.X32
m.X12 = a.X10*b.X02 + a.X11*b.X12 + a.X12*b.X22 + a.X13*b.X32
m.X22 = a.X20*b.X02 + a.X21*b.X12 + a.X22*b.X22 + a.X23*b.X32
m.X32 = a.X30*b.X02 + a.X31*b.X12 + a.X32*b.X22 + a.X33*b.X32
m.X03 = a.X00*b.X03 + a.X01*b.X13 + a.X02*b.X23 + a.X03*b.X33
m.X13 = a.X10*b.X03 + a.X11*b.X13 + a.X12*b.X23 + a.X13*b.X33
m.X23 = a.X20*b.X03 + a.X21*b.X13 + a.X22*b.X23 + a.X23*b.X33
m.X33 = a.X30*b.X03 + a.X31*b.X13 + a.X32*b.X23 + a.X33*b.X33
return m
}
func (a Matrix) MulPosition(b Vector) Vector {
x := a.X00*b.X + a.X01*b.Y + a.X02*b.Z + a.X03
y := a.X10*b.X + a.X11*b.Y + a.X12*b.Z + a.X13
z := a.X20*b.X + a.X21*b.Y + a.X22*b.Z + a.X23
return Vector{x, y, z}
}
func (a Matrix) MulPositionW(b Vector) VectorW {
x := a.X00*b.X + a.X01*b.Y + a.X02*b.Z + a.X03
y := a.X10*b.X + a.X11*b.Y + a.X12*b.Z + a.X13
z := a.X20*b.X + a.X21*b.Y + a.X22*b.Z + a.X23
w := a.X30*b.X + a.X31*b.Y + a.X32*b.Z + a.X33
return VectorW{x, y, z, w}
}
func (a Matrix) MulDirection(b Vector) Vector {
x := a.X00*b.X + a.X01*b.Y + a.X02*b.Z
y := a.X10*b.X + a.X11*b.Y + a.X12*b.Z
z := a.X20*b.X + a.X21*b.Y + a.X22*b.Z
return Vector{x, y, z}.Normalize()
}
/*
func (a Matrix) MulBox(box Box) Box {
// http://dev.theomader.com/transform-bounding-boxes/
r := Vector{a.X00, a.X10, a.X20}
u := Vector{a.X01, a.X11, a.X21}
b := Vector{a.X02, a.X12, a.X22}
t := Vector{a.X03, a.X13, a.X23}
xa := r.MulScalar(box.Min.X)
xb := r.MulScalar(box.Max.X)
ya := u.MulScalar(box.Min.Y)
yb := u.MulScalar(box.Max.Y)
za := b.MulScalar(box.Min.Z)
zb := b.MulScalar(box.Max.Z)
xa, xb = xa.Min(xb), xa.Max(xb)
ya, yb = ya.Min(yb), ya.Max(yb)
za, zb = za.Min(zb), za.Max(zb)
min := xa.Add(ya).Add(za).Add(t)
max := xb.Add(yb).Add(zb).Add(t)
return Box{min, max}
}
*/
func (a Matrix) Transpose() Matrix {
return Matrix{
a.X00, a.X10, a.X20, a.X30,
a.X01, a.X11, a.X21, a.X31,
a.X02, a.X12, a.X22, a.X32,
a.X03, a.X13, a.X23, a.X33}
}
func (a Matrix) Determinant() float64 {
return (a.X00*a.X11*a.X22*a.X33 - a.X00*a.X11*a.X23*a.X32 +
a.X00*a.X12*a.X23*a.X31 - a.X00*a.X12*a.X21*a.X33 +
a.X00*a.X13*a.X21*a.X32 - a.X00*a.X13*a.X22*a.X31 -
a.X01*a.X12*a.X23*a.X30 + a.X01*a.X12*a.X20*a.X33 -
a.X01*a.X13*a.X20*a.X32 + a.X01*a.X13*a.X22*a.X30 -
a.X01*a.X10*a.X22*a.X33 + a.X01*a.X10*a.X23*a.X32 +
a.X02*a.X13*a.X20*a.X31 - a.X02*a.X13*a.X21*a.X30 +
a.X02*a.X10*a.X21*a.X33 - a.X02*a.X10*a.X23*a.X31 +
a.X02*a.X11*a.X23*a.X30 - a.X02*a.X11*a.X20*a.X33 -
a.X03*a.X10*a.X21*a.X32 + a.X03*a.X10*a.X22*a.X31 -
a.X03*a.X11*a.X22*a.X30 + a.X03*a.X11*a.X20*a.X32 -
a.X03*a.X12*a.X20*a.X31 + a.X03*a.X12*a.X21*a.X30)
}
func (a Matrix) Inverse() Matrix {
m := Matrix{}
d := a.Determinant()
m.X00 = (a.X12*a.X23*a.X31 - a.X13*a.X22*a.X31 + a.X13*a.X21*a.X32 - a.X11*a.X23*a.X32 - a.X12*a.X21*a.X33 + a.X11*a.X22*a.X33) / d
m.X01 = (a.X03*a.X22*a.X31 - a.X02*a.X23*a.X31 - a.X03*a.X21*a.X32 + a.X01*a.X23*a.X32 + a.X02*a.X21*a.X33 - a.X01*a.X22*a.X33) / d
m.X02 = (a.X02*a.X13*a.X31 - a.X03*a.X12*a.X31 + a.X03*a.X11*a.X32 - a.X01*a.X13*a.X32 - a.X02*a.X11*a.X33 + a.X01*a.X12*a.X33) / d
m.X03 = (a.X03*a.X12*a.X21 - a.X02*a.X13*a.X21 - a.X03*a.X11*a.X22 + a.X01*a.X13*a.X22 + a.X02*a.X11*a.X23 - a.X01*a.X12*a.X23) / d
m.X10 = (a.X13*a.X22*a.X30 - a.X12*a.X23*a.X30 - a.X13*a.X20*a.X32 + a.X10*a.X23*a.X32 + a.X12*a.X20*a.X33 - a.X10*a.X22*a.X33) / d
m.X11 = (a.X02*a.X23*a.X30 - a.X03*a.X22*a.X30 + a.X03*a.X20*a.X32 - a.X00*a.X23*a.X32 - a.X02*a.X20*a.X33 + a.X00*a.X22*a.X33) / d
m.X12 = (a.X03*a.X12*a.X30 - a.X02*a.X13*a.X30 - a.X03*a.X10*a.X32 + a.X00*a.X13*a.X32 + a.X02*a.X10*a.X33 - a.X00*a.X12*a.X33) / d
m.X13 = (a.X02*a.X13*a.X20 - a.X03*a.X12*a.X20 + a.X03*a.X10*a.X22 - a.X00*a.X13*a.X22 - a.X02*a.X10*a.X23 + a.X00*a.X12*a.X23) / d
m.X20 = (a.X11*a.X23*a.X30 - a.X13*a.X21*a.X30 + a.X13*a.X20*a.X31 - a.X10*a.X23*a.X31 - a.X11*a.X20*a.X33 + a.X10*a.X21*a.X33) / d
m.X21 = (a.X03*a.X21*a.X30 - a.X01*a.X23*a.X30 - a.X03*a.X20*a.X31 + a.X00*a.X23*a.X31 + a.X01*a.X20*a.X33 - a.X00*a.X21*a.X33) / d
m.X22 = (a.X01*a.X13*a.X30 - a.X03*a.X11*a.X30 + a.X03*a.X10*a.X31 - a.X00*a.X13*a.X31 - a.X01*a.X10*a.X33 + a.X00*a.X11*a.X33) / d
m.X23 = (a.X03*a.X11*a.X20 - a.X01*a.X13*a.X20 - a.X03*a.X10*a.X21 + a.X00*a.X13*a.X21 + a.X01*a.X10*a.X23 - a.X00*a.X11*a.X23) / d
m.X30 = (a.X12*a.X21*a.X30 - a.X11*a.X22*a.X30 - a.X12*a.X20*a.X31 + a.X10*a.X22*a.X31 + a.X11*a.X20*a.X32 - a.X10*a.X21*a.X32) / d
m.X31 = (a.X01*a.X22*a.X30 - a.X02*a.X21*a.X30 + a.X02*a.X20*a.X31 - a.X00*a.X22*a.X31 - a.X01*a.X20*a.X32 + a.X00*a.X21*a.X32) / d
m.X32 = (a.X02*a.X11*a.X30 - a.X01*a.X12*a.X30 - a.X02*a.X10*a.X31 + a.X00*a.X12*a.X31 + a.X01*a.X10*a.X32 - a.X00*a.X11*a.X32) / d
m.X33 = (a.X01*a.X12*a.X20 - a.X02*a.X11*a.X20 + a.X02*a.X10*a.X21 - a.X00*a.X12*a.X21 - a.X01*a.X10*a.X22 + a.X00*a.X11*a.X22) / d
return m
} | scene/matrix.go | 0.875734 | 0.624508 | matrix.go | starcoder |
// This file implements the compressed encoding of source
// positions using a lookup table.
package src
// XPos is a more compact representation of Pos.
type XPos struct {
index int32
lico
}
// NoXPos is a valid unknown position.
var NoXPos XPos
// IsKnown reports whether the position p is known.
// XPos.IsKnown() matches Pos.IsKnown() for corresponding
// positions.
func (p XPos) IsKnown() bool {
return p.index != 0 || p.Line() != 0
}
// Before reports whether the position p comes before q in the source.
// For positions with different bases, ordering is by base index.
func (p XPos) Before(q XPos) bool {
n, m := p.index, q.index
return n < m || n == m && p.lico < q.lico
}
// SameFile reports whether p and q are positions in the same file.
func (p XPos) SameFile(q XPos) bool {
return p.index == q.index
}
// After reports whether the position p comes after q in the source.
// For positions with different bases, ordering is by base index.
func (p XPos) After(q XPos) bool {
n, m := p.index, q.index
return n > m || n == m && p.lico > q.lico
}
// WithNotStmt returns the same location to be marked with DWARF is_stmt=0
func (p XPos) WithNotStmt() XPos {
p.lico = p.lico.withNotStmt()
return p
}
// WithDefaultStmt returns the same location with undetermined is_stmt
func (p XPos) WithDefaultStmt() XPos {
p.lico = p.lico.withDefaultStmt()
return p
}
// WithIsStmt returns the same location to be marked with DWARF is_stmt=1
func (p XPos) WithIsStmt() XPos {
p.lico = p.lico.withIsStmt()
return p
}
// WithBogusLine returns a bogus line that won't match any recorded for the source code.
// Its use is to disrupt the statements within an infinite loop so that the debugger
// will not itself loop infinitely waiting for the line number to change.
// gdb chooses not to display the bogus line; delve shows it with a complaint, but the
// alternative behavior is to hang.
func (p XPos) WithBogusLine() XPos {
p.lico = makeBogusLico()
return p
}
// WithXlogue returns the same location but marked with DWARF function prologue/epilogue
func (p XPos) WithXlogue(x PosXlogue) XPos {
p.lico = p.lico.withXlogue(x)
return p
}
func (p XPos) LineNumber() string {
if !p.IsKnown() {
return "?"
}
return p.lico.lineNumber()
}
func (p XPos) LineNumberHTML() string {
if !p.IsKnown() {
return "?"
}
return p.lico.lineNumberHTML()
}
// AtColumn1 returns the same location but shifted to column 1.
func (p XPos) AtColumn1() XPos {
p.lico = p.lico.atColumn1()
return p
}
// A PosTable tracks Pos -> XPos conversions and vice versa.
// Its zero value is a ready-to-use PosTable.
type PosTable struct {
baseList []*PosBase
indexMap map[*PosBase]int
}
// XPos returns the corresponding XPos for the given pos,
// adding pos to t if necessary.
func (t *PosTable) XPos(pos Pos) XPos {
m := t.indexMap
if m == nil {
// Create new list and map and populate with nil
// base so that NoPos always gets index 0.
t.baseList = append(t.baseList, nil)
m = map[*PosBase]int{nil: 0}
t.indexMap = m
}
i, ok := m[pos.base]
if !ok {
i = len(t.baseList)
t.baseList = append(t.baseList, pos.base)
t.indexMap[pos.base] = i
}
return XPos{int32(i), pos.lico}
}
// Pos returns the corresponding Pos for the given p.
// If p cannot be translated via t, the function panics.
func (t *PosTable) Pos(p XPos) Pos {
var base *PosBase
if p.index != 0 {
base = t.baseList[p.index]
}
return Pos{base, p.lico}
} | src/cmd/internal/src/xpos.go | 0.803521 | 0.597138 | xpos.go | starcoder |
package main
import "sort"
/*****************************************************************************************************
*
* You are given a series of video clips from a sporting event that lasted T seconds. These video
* clips can be overlapping with each other and have varied lengths.
*
* Each video clip clips[i] is an interval: it starts at time clips[i][0] and ends at time
* clips[i][1]. We can cut these clips into segments freely: for example, a clip [0, 7] can be cut
* into segments [0, 1] + [1, 3] + [3, 7].
*
* Return the minimum number of clips needed so that we can cut the clips into segments that cover the
* entire sporting event ([0, T]). If the task is impossible, return -1.
*
* Example 1:
*
* Input: clips = [[0,2],[4,6],[8,10],[1,9],[1,5],[5,9]], T = 10
* Output: 3
* Explanation:
* We take the clips [0,2], [8,10], [1,9]; a total of 3 clips.
* Then, we can reconstruct the sporting event as follows:
* We cut [1,9] into segments [1,2] + [2,8] + [8,9].
* Now we have segments [0,2] + [2,8] + [8,10] which cover the sporting event [0, 10].
*
* Example 2:
*
* Input: clips = [[0,1],[1,2]], T = 5
* Output: -1
* Explanation:
* We can't cover [0,5] with only [0,1] and [1,2].
*
* Example 3:
*
* Input: clips =
* [[0,1],[6,8],[0,2],[5,6],[0,4],[0,3],[6,7],[1,3],[4,7],[1,4],[2,5],[2,6],[3,4],[4,5],[5,7],[6,9]],
* T = 9
* Output: 3
* Explanation:
* We can take clips [0,4], [4,7], and [6,9].
*
* Example 4:
*
* Input: clips = [[0,4],[2,8]], T = 5
* Output: 2
* Explanation:
* Notice you can have extra video after the event ends.
*
* Constraints:
*
* 1 <= clips.length <= 100
* 0 <= clips[i][0] <= clips[i][1] <= 100
* 0 <= T <= 100
******************************************************************************************************/
// 思路1:动态规划
// dp[i]:覆盖i时,需要的最小的片段数
// 状态转移方程:dp[i] = min(dp[i],dp[j]+1); 这里省略了排序,需要全部遍历一遍
// 初始条件(baseCase): dp[0] = 0;长度为0,不要要剪辑
// 复杂度分析
//time:O(T*len(clips)) mem:O(T)
//rank
//time:100 mem:19.05
func videoStitching(clips [][]int, T int) int {
const MAX = 110
dp := make([]int, T+1)
// 初始化dp
for i := 0; i < len(dp); i++ {
dp[i] = MAX
}
min := func(x, y int) int {
if x < y {
return x
}
return y
}
// basecase
dp[0] = 0
for i := 1; i <= T; i++ {
for j := 0; j < len(clips); j++ {
if clips[j][0] <= i && clips[j][1] >= i {
dp[i] = min(dp[i], dp[clips[j][0]]+1)
}
}
}
if dp[T] == MAX {
return -1
}
return dp[T]
}
// 思路2:贪心
// 将重叠部分切开,
// todo Err
func videoStitchingV2(clips [][]int, T int) int {
// sort
sort.Slice(clips, func(i, j int) bool {
if clips[i][0] == clips[j][0]{
return clips[i][1]>clips[j][1]
}
return clips[i][0]<clips[j][0]
})
// 贪心遍历
ans:=0
now:=clips[0][1]
for i:=1;i<len(clips);i++{
if clips[i][0]<now{
continue
}
// c存在间隙
if clips[i-1][1]<clips[i][0]{
return -1
}else if clips[i-1][1]==clips[i][0]{
now = clips[i][1]
continue
}else{
ans++
now = clips[i][1]
}
}
return ans
} | basic/Algorithm/dynamic_programming/1024.video_stitching/1024.VideoStitching_zhangsl.go | 0.551091 | 0.504455 | 1024.VideoStitching_zhangsl.go | starcoder |
package main
import (
"flag"
"fmt"
"math"
"os"
"sync"
"time"
"github.com/ChristopherRabotin/gokalman"
"github.com/ChristopherRabotin/smd"
"github.com/gonum/matrix/mat64"
)
const (
ekfTrigger = -15 // Number of measurements prior to switching to EKF mode.
ekfDisableTime = 1200 // Seconds between measurements to switch back to CKF. Set as negative to ignore.
sncEnabled = false // Set to false to disable SNC.
sncDisableTime = 1200 // Number of seconds between measurements to skip using SNC noise.
sncRIC = true // Set to true if the noise should be considered defined in PQW frame.
timeBasedPlot = false // Set to true to plot time, or false to plot on measurements.
smoothing = false // Set to true to smooth the CKF.
)
var (
σQExponent float64
wg sync.WaitGroup
)
func init() {
flag.Float64Var(&σQExponent, "sigmaExp", 6, "exponent for the Q sigma (default is 6, so sigma=1e-6).")
}
func main() {
flag.Parse()
// Define the times
startDT := time.Now()
endDT := startDT.Add(time.Duration(24) * time.Hour)
// Define the orbits
leo := smd.NewOrbitFromOE(7000, 0.001, 30, 80, 40, 0, smd.Earth)
// Define the stations
σρ := math.Pow(1e-3, 2) // m , but all measurements in km.
σρDot := math.Pow(1e-3, 2) // m/s , but all measurements in km/s.
st1 := NewStation("st1", 0, -35.398333, 148.981944, σρ, σρDot)
st2 := NewStation("st2", 0, 40.427222, 355.749444, σρ, σρDot)
st3 := NewStation("st3", 0, 35.247164, 243.205, σρ, σρDot)
stations := []Station{st1, st2, st3}
// Vector of measurements
measurements := []Measurement{}
// Define the special export functions
export := smd.ExportConfig{Filename: "LEO", Cosmo: false, AsCSV: true, Timestamp: false}
export.CSVAppendHdr = func() string {
hdr := "secondsSinceEpoch,"
for _, st := range stations {
hdr += fmt.Sprintf("%sRange,%sRangeRate,%sNoisyRange,%sNoisyRangeRate,", st.name, st.name, st.name, st.name)
}
return hdr[:len(hdr)-1] // Remove trailing comma
}
export.CSVAppend = func(state smd.State) string {
Δt := state.DT.Sub(startDT).Seconds()
str := fmt.Sprintf("%f,", Δt)
θgst := Δt * smd.EarthRotationRate
// Compute visibility for each station.
for _, st := range stations {
_, measurement := st.PerformMeasurement(θgst, state)
if measurement.Visible {
measurements = append(measurements, measurement)
str += measurement.CSV()
} else {
str += ",,,,"
}
}
return str[:len(str)-1] // Remove trailing comma
}
// Generate the perturbed orbit
scName := "LEO"
smd.NewPreciseMission(smd.NewEmptySC(scName, 0), leo, startDT, endDT, smd.Perturbations{Jn: 3}, 2*time.Second, false, export).Propagate()
// Take care of the measurements:
fmt.Printf("\n[INFO] Generated %d measurements\n", len(measurements))
// Let's mark those as the truth so we can plot that.
stateTruth := make([]*mat64.Vector, len(measurements))
truthMeas := make([]*mat64.Vector, len(measurements))
residuals := make([]*mat64.Vector, len(measurements))
for measNo, measurement := range measurements {
orbit := make([]float64, 6)
R, V := measurement.State.Orbit.RV()
for i := 0; i < 3; i++ {
orbit[i] = R[i]
orbit[i+3] = V[i]
}
stateTruth[measNo] = mat64.NewVector(6, orbit)
truthMeas[measNo] = measurement.StateVector()
}
truth := gokalman.NewBatchGroundTruth(stateTruth, truthMeas)
// Perturbations in the estimate
estPerts := smd.Perturbations{Jn: 2}
// Initialize the KF noise
σQx := math.Pow(10, -2*σQExponent)
var σQy, σQz float64
if !sncRIC {
σQy = σQx
σQz = σQx
}
noiseQ := mat64.NewSymDense(3, []float64{σQx, 0, 0, 0, σQy, 0, 0, 0, σQz})
noiseR := mat64.NewSymDense(2, []float64{σρ, 0, 0, σρDot})
noiseKF := gokalman.NewNoiseless(noiseQ, noiseR)
// Take care of measurements.
estHistory := make([]*gokalman.HybridKFEstimate, len(measurements))
stateHistory := make([]*mat64.Vector, len(measurements)) // Stores the histories of the orbit estimate (to post compute the truth)
estChan := make(chan (gokalman.Estimate), 1)
go processEst("hybridkf", estChan)
prevXHat := mat64.NewVector(6, nil)
prevP := mat64.NewSymDense(6, nil)
var covarDistance float64 = 50
var covarVelocity float64 = 1
for i := 0; i < 3; i++ {
prevP.SetSym(i, i, covarDistance)
prevP.SetSym(i+3, i+3, covarVelocity)
}
visibilityErrors := 0
var orbitEstimate *smd.OrbitEstimate
if smoothing {
fmt.Println("[INFO] Smoothing enabled")
}
if ekfTrigger < 0 {
fmt.Println("[WARNING] EKF disabled")
} else {
if smoothing {
fmt.Println("[ERROR] Enabling smooth has NO effect because EKF is enabled")
}
if ekfTrigger < 10 {
fmt.Println("[WARNING] EKF may be turned on too early")
} else {
fmt.Printf("[INFO] EKF will turn on after %d measurements\n", ekfTrigger)
}
}
var kf *gokalman.HybridKF
var prevStationName = ""
var prevDT time.Time
var ckfMeasNo = 0
for measNo, measurement := range measurements {
if !measurement.Visible {
panic("why is there a non visible measurement?!")
}
ΔtDuration := measurement.State.DT.Sub(prevDT)
Δt := ΔtDuration.Seconds() // Everything is in seconds.
if measNo == 0 {
prevDT = measurement.State.DT
orbitEstimate = smd.NewOrbitEstimate("estimator", measurement.State.Orbit, estPerts, measurement.State.DT, 10*time.Second)
var err error
kf, _, err = gokalman.NewHybridKF(prevXHat, prevP, noiseKF, 2)
if err != nil {
panic(fmt.Errorf("%s", err))
}
}
if !kf.EKFEnabled() && ckfMeasNo == ekfTrigger {
// Switch KF to EKF mode
kf.EnableEKF()
fmt.Printf("[INFO] #%04d EKF now enabled\n", measNo)
} else if kf.EKFEnabled() && ekfDisableTime > 0 && Δt > ekfDisableTime {
// Switch KF back to CKF mode
kf.DisableEKF()
ckfMeasNo = 0
fmt.Printf("[INFO] #%04d EKF now disabled (Δt=%s)\n", measNo, ΔtDuration)
}
if timeBasedPlot {
// Propagate and predict for each time step until next measurement.
for prevDT.Before(measurement.State.DT) {
nextDT := prevDT.Add(10 * time.Second)
orbitEstimate.PropagateUntil(nextDT) // This leads to Φ(ti+1, ti)
// Only do a prediction.
kf.Prepare(orbitEstimate.Φ, nil)
est, perr := kf.Predict()
if perr != nil {
panic(fmt.Errorf("[error] (#%04d)\n%s", measNo, perr))
}
stateEst := mat64.NewVector(6, nil)
R, V := orbitEstimate.State().Orbit.RV()
for i := 0; i < 3; i++ {
stateEst.SetVec(i, R[i])
stateEst.SetVec(i+3, V[i])
}
fmt.Printf("%s\n\n", est)
//fmt.Printf("%+v\n", mat64.Formatted(stateEst.T()))
estChan <- truth.ErrorWithOffset(measNo, est, stateEst)
prevDT = nextDT
}
continue
}
// Propagate the reference trajectory until the next measurement time.
orbitEstimate.PropagateUntil(measurement.State.DT) // This leads to Φ(ti+1, ti)
if measurement.Station.name != prevStationName {
fmt.Printf("[INFO] #%04d %s in visibility of %s (T+%s)\n", measNo, scName, measurement.Station.name, measurement.State.DT.Sub(startDT))
prevStationName = measurement.Station.name
}
// Compute "real" measurement
vis, computedObservation := measurement.Station.PerformMeasurement(measurement.θgst, orbitEstimate.State())
if !vis {
fmt.Printf("[WARNING] station %s should see the SC but does not\n", measurement.Station.name)
visibilityErrors++
}
Htilde := computedObservation.HTilde()
kf.Prepare(orbitEstimate.Φ, Htilde)
if sncEnabled {
if Δt < sncDisableTime {
if sncRIC {
// Build the RIC DCM
rUnit := unit(orbitEstimate.Orbit.R())
cUnit := unit(orbitEstimate.Orbit.H())
iUnit := unit(cross(rUnit, cUnit))
dcmVals := make([]float64, 9)
for i := 0; i < 3; i++ {
dcmVals[i] = rUnit[i]
dcmVals[i+3] = cUnit[i]
dcmVals[i+6] = iUnit[i]
}
// Update the Q matrix in the PQW
dcm := mat64.NewDense(3, 3, dcmVals)
var QECI, QECI0 mat64.Dense
QECI0.Mul(noiseQ, dcm.T())
QECI.Mul(dcm, &QECI0)
QECISym, err := gokalman.AsSymDense(&QECI)
if err != nil {
fmt.Printf("[error] QECI is not symmertric!")
panic(err)
}
kf.SetNoise(gokalman.NewNoiseless(QECISym, noiseR))
}
// Only enable SNC for small time differences between measurements.
Γtop := gokalman.ScaledDenseIdentity(3, math.Pow(Δt, 2)/2)
Γbot := gokalman.ScaledDenseIdentity(3, Δt)
Γ := mat64.NewDense(6, 3, nil)
Γ.Stack(Γtop, Γbot)
kf.PreparePNT(Γ)
}
}
estI, err := kf.Update(measurement.StateVector(), computedObservation.StateVector())
if err != nil {
panic(fmt.Errorf("[error] %s", err))
}
est := estI.(*gokalman.HybridKFEstimate)
prevXHat = est.State()
prevP = est.Covariance().(*mat64.SymDense)
stateEst := mat64.NewVector(6, nil)
R, V := orbitEstimate.State().Orbit.RV()
for i := 0; i < 3; i++ {
stateEst.SetVec(i, R[i])
stateEst.SetVec(i+3, V[i])
}
stateEst.AddVec(stateEst, est.State())
// Compute residual
residual := mat64.NewVector(2, nil)
residual.MulVec(Htilde, est.State())
residual.AddScaledVec(residual, -1, est.ObservationDev())
residual.ScaleVec(-1, residual)
residuals[measNo] = residual
if smoothing {
// Save to history in order to perform smoothing.
estHistory[measNo] = est
stateHistory[measNo] = stateEst
} else {
// Stream to CSV file
estChan <- truth.ErrorWithOffset(measNo, est, stateEst)
}
prevDT = measurement.State.DT
// If in EKF, update the reference trajectory.
if kf.EKFEnabled() {
// Update the state from the error.
state := est.State()
R, V := orbitEstimate.Orbit.RV()
for i := 0; i < 3; i++ {
R[i] += state.At(i, 0)
V[i] += state.At(i+3, 0)
}
orbitEstimate = smd.NewOrbitEstimate("estimator", *smd.NewOrbitFromRV(R, V, smd.Earth), estPerts, measurement.State.DT, 10*time.Second)
}
ckfMeasNo++
}
if smoothing {
fmt.Println("[INFO] Smoothing started")
// Perform the smoothing. First, play back all the estimates backward, and then replay the smoothed estimates forward to compute the difference.
if err := kf.SmoothAll(estHistory); err != nil {
panic(err)
}
// Replay forward
for estNo, estimate := range estHistory {
estChan <- truth.ErrorWithOffset(estNo, estimate, stateHistory[estNo])
}
fmt.Println("[INFO] Smoothing completed")
}
close(estChan)
wg.Wait()
severity := "INFO"
if visibilityErrors > 0 {
severity = "WARNING"
}
fmt.Printf("[%s] %d visibility errors\n", severity, visibilityErrors)
// Write the residuals to a CSV file
fname := "hkf"
f, err := os.Create(fmt.Sprintf("./%s-residuals.csv", fname))
if err != nil {
panic(err)
}
defer f.Close()
f.WriteString("rho,rhoDot\n")
for _, residual := range residuals {
csv := fmt.Sprintf("%f,%f\n", residual.At(0, 0), residual.At(1, 0))
if _, err := f.WriteString(csv); err != nil {
panic(err)
}
}
}
func processEst(fn string, estChan chan (gokalman.Estimate)) {
wg.Add(1)
// We also compute the RMS here.
numMeasurements := 0
rmsPosition := 0.0
rmsVelocity := 0.0
ce, _ := gokalman.NewCustomCSVExporter([]string{"x", "y", "z", "xDot", "yDot", "zDot"}, ".", fn+".csv", 3)
for {
est, more := <-estChan
if !more {
ce.Close()
wg.Done()
break
}
numMeasurements++
for i := 0; i < 3; i++ {
rmsPosition += math.Pow(est.State().At(i, 0), 2)
rmsVelocity += math.Pow(est.State().At(i+3, 0), 2)
}
ce.Write(est)
}
// Compute RMS.
rmsPosition /= float64(numMeasurements)
rmsVelocity /= float64(numMeasurements)
rmsPosition = math.Sqrt(rmsPosition)
rmsVelocity = math.Sqrt(rmsVelocity)
fmt.Printf("=== RMS ===\nPosition = %f\tVelocity = %f\n", rmsPosition, rmsVelocity)
} | examples/statOD/hwmain/main.go | 0.592195 | 0.403684 | main.go | starcoder |
package misc
// LinkCutTree represents a Link-Cut tree.
type LinkCutTree struct {
nodes []*linkCutNode
}
// NewLinkCutTree instantiates a new Link-Cut tree.
func NewLinkCutTree(size int) *LinkCutTree {
t := &LinkCutTree{
nodes: make([]*linkCutNode, size),
}
for i := range t.nodes {
t.nodes[i].id = i
t.nodes[i].update()
}
return t
}
// Link will link two nodes i and j.
func (t *LinkCutTree) Link(i, j int) {
a := t.nodes[i]
b := t.nodes[j]
a.link(b)
}
// Cut will cut i node from others.
func (t *LinkCutTree) Cut(i int) {
t.nodes[i].cut()
}
// IsConnected returns true whether nodes i and j are connected.
func (t *LinkCutTree) IsConnected(i, j int) bool {
a := t.nodes[i]
b := t.nodes[j]
return a.id == b.id
}
// Root returns a root of a node i.
func (t *LinkCutTree) Root(i int) int {
return t.nodes[i].root().id
}
// Depth returns a depth of node i.
func (t *LinkCutTree) Depth(i int) int {
return t.nodes[i].depth()
}
// LCA returns least common ancestor of nodes i and j.
func (t *LinkCutTree) LCA(i, j int) int {
a := t.nodes[i]
b := t.nodes[j]
return a.lca(b).id
}
type linkCutNode struct {
id int
size int
parent *linkCutNode
pathParent *linkCutNode
left, right *linkCutNode
}
func newNode() *linkCutNode {
n := &linkCutNode{}
return n
}
func (x *linkCutNode) update() {
x.size = 1
if x.left != nil {
x.size += x.left.size
}
if x.right != nil {
x.size += x.right.size
}
}
func (x *linkCutNode) rotateRight() {
y := x.parent
z := y.parent
y.left = x.right
if y.left != nil {
y.left.parent = y
}
x.right = y
y.parent = x
x.parent = z
if x.parent != nil {
if y == z.left {
z.left = x
} else {
z.right = x
}
}
x.pathParent = y.pathParent
y.pathParent = nil
y.update()
}
func (x *linkCutNode) rotateLeft() {
y := x.parent
z := y.parent
y.right = x.left
if y.right != nil {
y.right.parent = y
}
x.left = y
y.parent = x
x.parent = z
if x.parent != nil {
if y == z.left {
z.left = x
} else {
z.right = x
}
}
x.pathParent = y.pathParent
y.pathParent = nil
y.update()
}
func (x *linkCutNode) splay() {
for x.parent != nil {
y := x.parent
if y.parent == nil {
if x == y.left {
x.rotateRight()
} else {
x.rotateLeft()
}
} else {
z := y.parent
if y == z.left {
if x == y.left {
y.rotateRight()
x.rotateRight()
} else {
x.rotateLeft()
x.rotateRight()
}
} else {
if x == y.right {
y.rotateLeft()
x.rotateLeft()
} else {
x.rotateRight()
x.rotateLeft()
}
}
}
}
x.update()
}
func (x *linkCutNode) access() *linkCutNode {
x.splay()
if x.right != nil {
x.right.pathParent = x
x.right.parent = nil
x.right = nil
x.update()
}
last := x
for x.pathParent != nil {
y := x.pathParent
last = y
y.splay()
if y.right != nil {
y.right.pathParent = y
y.right.parent = nil
}
y.right = x
x.parent = y
x.pathParent = nil
y.update()
x.splay()
}
return last
}
func (x *linkCutNode) root() *linkCutNode {
x.access()
for x.left != nil {
x = x.left
}
x.splay()
return x
}
func (x *linkCutNode) cut() {
x.access()
x.left.parent = nil
x.left = nil
x.update()
}
func (x *linkCutNode) link(y *linkCutNode) {
x.access()
y.access()
x.left = y
y.parent = x
x.update()
}
func (x *linkCutNode) lca(y *linkCutNode) *linkCutNode {
x.access()
return y.access()
}
func (x *linkCutNode) depth() int {
x.access()
return x.size - 1
} | misc/linkcut.go | 0.837254 | 0.502258 | linkcut.go | starcoder |
package inboundmiddleware
import (
"context"
"go.uber.org/yarpc/api/middleware"
"go.uber.org/yarpc/api/transport"
)
// UnaryChain combines a series of `UnaryInbound`s into a single `InboundMiddleware`.
func UnaryChain(mw ...middleware.UnaryInbound) middleware.UnaryInbound {
unchained := make([]middleware.UnaryInbound, 0, len(mw))
for _, m := range mw {
if m == nil {
continue
}
if c, ok := m.(unaryChain); ok {
unchained = append(unchained, c...)
continue
}
unchained = append(unchained, m)
}
switch len(unchained) {
case 0:
return middleware.NopUnaryInbound
case 1:
return unchained[0]
default:
return unaryChain(unchained)
}
}
type unaryChain []middleware.UnaryInbound
func (c unaryChain) Handle(ctx context.Context, req *transport.Request, resw transport.ResponseWriter, h transport.UnaryHandler) error {
return unaryChainExec{
Chain: []middleware.UnaryInbound(c),
Final: h,
}.Handle(ctx, req, resw)
}
// unaryChainExec adapts a series of `UnaryInbound`s into a UnaryHandler.
// It is scoped to a single request to the `Handler` and is not thread-safe.
type unaryChainExec struct {
Chain []middleware.UnaryInbound
Final transport.UnaryHandler
}
func (x unaryChainExec) Handle(ctx context.Context, req *transport.Request, resw transport.ResponseWriter) error {
if len(x.Chain) == 0 {
return x.Final.Handle(ctx, req, resw)
}
next := x.Chain[0]
x.Chain = x.Chain[1:]
return next.Handle(ctx, req, resw, x)
}
// OnewayChain combines a series of `OnewayInbound`s into a single `InboundMiddleware`.
func OnewayChain(mw ...middleware.OnewayInbound) middleware.OnewayInbound {
unchained := make([]middleware.OnewayInbound, 0, len(mw))
for _, m := range mw {
if m == nil {
continue
}
if c, ok := m.(onewayChain); ok {
unchained = append(unchained, c...)
continue
}
unchained = append(unchained, m)
}
switch len(unchained) {
case 0:
return middleware.NopOnewayInbound
case 1:
return unchained[0]
default:
return onewayChain(unchained)
}
}
type onewayChain []middleware.OnewayInbound
func (c onewayChain) HandleOneway(ctx context.Context, req *transport.Request, h transport.OnewayHandler) error {
return onewayChainExec{
Chain: []middleware.OnewayInbound(c),
Final: h,
}.HandleOneway(ctx, req)
}
// onewayChainExec adapts a series of `OnewayInbound`s into a OnewayHandler.
// It is scoped to a single request to the `Handler` and is not thread-safe.
type onewayChainExec struct {
Chain []middleware.OnewayInbound
Final transport.OnewayHandler
}
func (x onewayChainExec) HandleOneway(ctx context.Context, req *transport.Request) error {
if len(x.Chain) == 0 {
return x.Final.HandleOneway(ctx, req)
}
next := x.Chain[0]
x.Chain = x.Chain[1:]
return next.HandleOneway(ctx, req, x)
}
// StreamChain combines a series of `StreamInbound`s into a single `InboundMiddleware`.
func StreamChain(mw ...middleware.StreamInbound) middleware.StreamInbound {
unchained := make([]middleware.StreamInbound, 0, len(mw))
for _, m := range mw {
if m == nil {
continue
}
if c, ok := m.(streamChain); ok {
unchained = append(unchained, c...)
continue
}
unchained = append(unchained, m)
}
switch len(unchained) {
case 0:
return middleware.NopStreamInbound
case 1:
return unchained[0]
default:
return streamChain(unchained)
}
}
type streamChain []middleware.StreamInbound
func (c streamChain) HandleStream(s *transport.ServerStream, h transport.StreamHandler) error {
return streamChainExec{
Chain: []middleware.StreamInbound(c),
Final: h,
}.HandleStream(s)
}
// streamChainExec adapts a series of `StreamInbound`s into a StreamHandler.
// It is scoped to a single request to the `Handler` and is not thread-safe.
type streamChainExec struct {
Chain []middleware.StreamInbound
Final transport.StreamHandler
}
func (x streamChainExec) HandleStream(s *transport.ServerStream) error {
if len(x.Chain) == 0 {
return x.Final.HandleStream(s)
}
next := x.Chain[0]
x.Chain = x.Chain[1:]
return next.HandleStream(s, x)
} | internal/inboundmiddleware/chain.go | 0.765506 | 0.471102 | chain.go | starcoder |
package graphics
import (
"errors"
"image"
"image/draw"
"math"
"github.com/Lealen/graphics-go/graphics/interp"
)
// I is the identity Affine transform matrix.
var I = Affine{
1, 0, 0,
0, 1, 0,
0, 0, 1,
}
// Affine is a 3x3 2D affine transform matrix.
// M(i,j) is Affine[i*3+j].
type Affine [9]float64
// Mul returns the multiplication of two affine transform matrices.
func (a Affine) Mul(b Affine) Affine {
return Affine{
a[0]*b[0] + a[1]*b[3] + a[2]*b[6],
a[0]*b[1] + a[1]*b[4] + a[2]*b[7],
a[0]*b[2] + a[1]*b[5] + a[2]*b[8],
a[3]*b[0] + a[4]*b[3] + a[5]*b[6],
a[3]*b[1] + a[4]*b[4] + a[5]*b[7],
a[3]*b[2] + a[4]*b[5] + a[5]*b[8],
a[6]*b[0] + a[7]*b[3] + a[8]*b[6],
a[6]*b[1] + a[7]*b[4] + a[8]*b[7],
a[6]*b[2] + a[7]*b[5] + a[8]*b[8],
}
}
func (a Affine) transformRGBA(dst *image.RGBA, src *image.RGBA, i interp.RGBA) error {
srcb := src.Bounds()
b := dst.Bounds()
for y := b.Min.Y; y < b.Max.Y; y++ {
for x := b.Min.X; x < b.Max.X; x++ {
sx, sy := a.pt(x, y)
if inBounds(srcb, sx, sy) {
c := i.RGBA(src, sx, sy)
off := (y-dst.Rect.Min.Y)*dst.Stride + (x-dst.Rect.Min.X)*4
dst.Pix[off+0] = c.R
dst.Pix[off+1] = c.G
dst.Pix[off+2] = c.B
dst.Pix[off+3] = c.A
}
}
}
return nil
}
// Transform applies the affine transform to src and produces dst.
func (a Affine) Transform(dst draw.Image, src image.Image, i interp.Interp) error {
if dst == nil {
return errors.New("graphics: dst is nil")
}
if src == nil {
return errors.New("graphics: src is nil")
}
// RGBA fast path.
dstRGBA, dstOk := dst.(*image.RGBA)
srcRGBA, srcOk := src.(*image.RGBA)
interpRGBA, interpOk := i.(interp.RGBA)
if dstOk && srcOk && interpOk {
return a.transformRGBA(dstRGBA, srcRGBA, interpRGBA)
}
srcb := src.Bounds()
b := dst.Bounds()
for y := b.Min.Y; y < b.Max.Y; y++ {
for x := b.Min.X; x < b.Max.X; x++ {
sx, sy := a.pt(x, y)
if inBounds(srcb, sx, sy) {
dst.Set(x, y, i.Interp(src, sx, sy))
}
}
}
return nil
}
func inBounds(b image.Rectangle, x, y float64) bool {
if x < float64(b.Min.X) || x >= float64(b.Max.X) {
return false
}
if y < float64(b.Min.Y) || y >= float64(b.Max.Y) {
return false
}
return true
}
func (a Affine) pt(x0, y0 int) (x1, y1 float64) {
fx := float64(x0) + 0.5
fy := float64(y0) + 0.5
x1 = fx*a[0] + fy*a[1] + a[2]
y1 = fx*a[3] + fy*a[4] + a[5]
return x1, y1
}
// TransformCenter applies the affine transform to src and produces dst.
// Equivalent to
// a.CenterFit(dst, src).Transform(dst, src, i).
func (a Affine) TransformCenter(dst draw.Image, src image.Image, i interp.Interp) error {
if dst == nil {
return errors.New("graphics: dst is nil")
}
if src == nil {
return errors.New("graphics: src is nil")
}
return a.CenterFit(dst.Bounds(), src.Bounds()).Transform(dst, src, i)
}
// Scale produces a scaling transform of factors x and y.
func (a Affine) Scale(x, y float64) Affine {
return a.Mul(Affine{
1 / x, 0, 0,
0, 1 / y, 0,
0, 0, 1,
})
}
// Rotate produces a clockwise rotation transform of angle, in radians.
func (a Affine) Rotate(angle float64) Affine {
s, c := math.Sincos(angle)
return a.Mul(Affine{
+c, +s, +0,
-s, +c, +0,
+0, +0, +1,
})
}
// Shear produces a shear transform by the slopes x and y.
func (a Affine) Shear(x, y float64) Affine {
d := 1 - x*y
return a.Mul(Affine{
+1 / d, -x / d, 0,
-y / d, +1 / d, 0,
0, 0, 1,
})
}
// Translate produces a translation transform with pixel distances x and y.
func (a Affine) Translate(x, y float64) Affine {
return a.Mul(Affine{
1, 0, -x,
0, 1, -y,
0, 0, +1,
})
}
// Center produces the affine transform, centered around the provided point.
func (a Affine) Center(x, y float64) Affine {
return I.Translate(-x, -y).Mul(a).Translate(x, y)
}
// CenterFit produces the affine transform, centered around the rectangles.
// It is equivalent to
// I.Translate(-<center of src>).Mul(a).Translate(<center of dst>)
func (a Affine) CenterFit(dst, src image.Rectangle) Affine {
dx := float64(dst.Min.X) + float64(dst.Dx())/2
dy := float64(dst.Min.Y) + float64(dst.Dy())/2
sx := float64(src.Min.X) + float64(src.Dx())/2
sy := float64(src.Min.Y) + float64(src.Dy())/2
return I.Translate(-sx, -sy).Mul(a).Translate(dx, dy)
} | graphics/affine.go | 0.776496 | 0.566978 | affine.go | starcoder |
package test
import (
"testing"
"github.com/muecoin/multiwallet/model"
)
func ValidateTransaction(tx, expectedTx model.Transaction, t *testing.T) {
if tx.Txid != expectedTx.Txid {
t.Error("Returned invalid transaction")
}
if tx.Version != expectedTx.Version {
t.Error("Returned invalid transaction")
}
if tx.Locktime != expectedTx.Locktime {
t.Error("Returned invalid transaction")
}
if tx.Time != expectedTx.Time {
t.Error("Returned invalid transaction")
}
if tx.BlockHash != expectedTx.BlockHash {
t.Error("Returned invalid transaction")
}
if tx.BlockHeight != expectedTx.BlockHeight {
t.Error("Returned invalid transaction")
}
if tx.Confirmations != expectedTx.Confirmations {
t.Error("Returned invalid transaction")
}
if len(tx.Inputs) != 1 {
t.Error("Returned incorrect number of inputs")
return
}
if tx.Inputs[0].Txid != expectedTx.Inputs[0].Txid {
t.Error("Returned invalid transaction")
}
if tx.Inputs[0].Value != 0.04294455 {
t.Error("Returned invalid transaction")
}
if tx.Inputs[0].Satoshis != expectedTx.Inputs[0].Satoshis {
t.Error("Returned invalid transaction")
}
if tx.Inputs[0].Addr != expectedTx.Inputs[0].Addr {
t.Error("Returned invalid transaction")
}
if tx.Inputs[0].Sequence != expectedTx.Inputs[0].Sequence {
t.Error("Returned invalid transaction")
}
if tx.Inputs[0].Vout != expectedTx.Inputs[0].Vout {
t.Error("Returned invalid transaction")
}
if tx.Inputs[0].ScriptSig.Hex != expectedTx.Inputs[0].ScriptSig.Hex {
t.Error("Returned invalid transaction")
}
if len(tx.Outputs) != 2 {
t.Error("Returned incorrect number of outputs")
return
}
if tx.Outputs[0].Value != 0.01398175 {
t.Error("Returned invalid transaction")
}
if tx.Outputs[0].ScriptPubKey.Hex != expectedTx.Outputs[0].ScriptPubKey.Hex {
t.Error("Returned invalid transaction")
}
if tx.Outputs[0].ScriptPubKey.Type != expectedTx.Outputs[0].ScriptPubKey.Type {
t.Error("Returned invalid transaction")
}
if tx.Outputs[0].ScriptPubKey.Addresses[0] != expectedTx.Outputs[0].ScriptPubKey.Addresses[0] {
t.Error("Returned invalid transaction")
}
if tx.Outputs[1].Value != 0.02717080 {
t.Error("Returned invalid transaction")
}
if tx.Outputs[1].ScriptPubKey.Hex != expectedTx.Outputs[1].ScriptPubKey.Hex {
t.Error("Returned invalid transaction")
}
if tx.Outputs[1].ScriptPubKey.Type != expectedTx.Outputs[1].ScriptPubKey.Type {
t.Error("Returned invalid transaction")
}
if tx.Outputs[1].ScriptPubKey.Addresses[0] != expectedTx.Outputs[1].ScriptPubKey.Addresses[0] {
t.Error("Returned invalid transaction")
}
} | test/helper.go | 0.570331 | 0.439266 | helper.go | starcoder |
package utils
// NextIndex returns the index of the element that comes after the given number
func NextIndex(numbers []int, currentNumber int) int {
for index, number := range numbers {
if number > currentNumber {
return index
}
}
return len(numbers) - 1
}
// PrevIndex returns the index that comes before the given number, cycling if we reach the end
func PrevIndex(numbers []int, currentNumber int) int {
end := len(numbers) - 1
for i := end; i >= 0; i-- {
if numbers[i] < currentNumber {
return i
}
}
return 0
}
// NextIntInCycle returns the next int in a slice, returning to the first index if we've reached the end
func NextIntInCycle(sl []int, current int) int {
for i, val := range sl {
if val == current {
if i == len(sl)-1 {
return sl[0]
}
return sl[i+1]
}
}
return sl[0]
}
// PrevIntInCycle returns the prev int in a slice, returning to the first index if we've reached the end
func PrevIntInCycle(sl []int, current int) int {
for i, val := range sl {
if val == current {
if i > 0 {
return sl[i-1]
}
return sl[len(sl)-1]
}
}
return sl[len(sl)-1]
}
func StringArraysOverlap(strArrA []string, strArrB []string) bool {
for _, first := range strArrA {
for _, second := range strArrB {
if first == second {
return true
}
}
}
return false
}
func Limit(values []string, limit int) []string {
if len(values) > limit {
return values[:limit]
}
return values
}
func LimitStr(value string, limit int) string {
n := 0
for i := range value {
if n >= limit {
return value[:i]
}
n++
}
return value
}
// Similar to a regular GroupBy, except that each item can be grouped under multiple keys,
// so the callback returns a slice of keys instead of just one key.
func MuiltiGroupBy[T any, K comparable](slice []T, f func(T) []K) map[K][]T {
result := map[K][]T{}
for _, item := range slice {
for _, key := range f(item) {
if _, ok := result[key]; !ok {
result[key] = []T{item}
} else {
result[key] = append(result[key], item)
}
}
}
return result
} | pkg/utils/slice.go | 0.768212 | 0.50891 | slice.go | starcoder |
package wineregdiff
import (
"encoding/hex"
"fmt"
"regexp"
"strconv"
"strings"
)
type DataType int
const (
// https://github.com/wine-mirror/wine/blob/e909986e6ea5ecd49b2b847f321ad89b2ae4f6f1/include/winnt.h#L5571
DataTypeRegNone DataType = 0
DataTypeRegSZ DataType = 1
DataTypeRegExpandSZ DataType = 2
DataTypeRegBinary DataType = 3
DataTypeRegDWord DataType = 4
DataTypeRegDwordBigEndian DataType = 5
DataTypeRegLink DataType = 6
DataTypeRegMultiSZ DataType = 7
)
func (t DataType) String() string {
switch t {
case DataTypeRegSZ:
return "REG_SZ"
case DataTypeRegExpandSZ:
return "REG_EXPAND_SZ"
case DataTypeRegBinary:
return "REG_BINARY"
case DataTypeRegDWord:
return "REG_DWORD"
case DataTypeRegMultiSZ:
return "REG_MULTI_SZ"
default:
return "REG_NONE"
}
}
type Data interface {
fmt.Stringer
DataType() DataType
CommandString() string
}
var (
stringTagPattern = regexp.MustCompile(`^str\(([0-9a-fA-F]+)\):(.+)`)
unknownDataTagPattern = regexp.MustCompile(`^hex\(([0-9a-fA-F]+)\):(.+)`)
)
type StringData string
func (d StringData) DataType() DataType {
return DataTypeRegSZ
}
func (d StringData) String() string {
return string(d)
}
func (d StringData) CommandString() string {
return escapeString(string(d))
}
type ExpandStringData string
func (d ExpandStringData) DataType() DataType {
return DataTypeRegExpandSZ
}
func (d ExpandStringData) String() string {
return string(d)
}
func (d ExpandStringData) CommandString() string {
return escapeString(string(d))
}
type MultiStringData []string
func (d MultiStringData) DataType() DataType {
return DataTypeRegMultiSZ
}
func (d MultiStringData) String() string {
return strings.Join(d, `\0`)
}
func (d MultiStringData) CommandString() string {
return escapeString(d.String())
}
type DwordData uint32
func (d DwordData) DataType() DataType {
return DataTypeRegDWord
}
func (d DwordData) String() string {
return fmt.Sprintf("dword:%08x", uint32(d))
}
func (d DwordData) CommandString() string {
return fmt.Sprintf("%d", uint32(d))
}
type BinaryData []byte
func (d BinaryData) DataType() DataType {
return DataTypeRegBinary
}
func (d BinaryData) String() string {
return fmt.Sprintf("hex:%s", asHex(d))
}
func (d BinaryData) CommandString() string {
return hex.EncodeToString(d)
}
// REG_NONE, REG_EXPAND_SZ, REG_MULTI_SZ, ...
// https://github.com/wine-mirror/wine/blob/60a3e0106246cb91d598a815d4fadf2791011142/programs/reg/export.c#L200-L204
type UnknownData struct {
dataType DataType
Data []byte
}
func (d *UnknownData) DataType() DataType {
return d.dataType
}
func (d *UnknownData) String() string {
return fmt.Sprintf("hex(%x):%s", int(d.DataType()), asHex(d.Data))
}
func (d *UnknownData) CommandString() string {
return hex.EncodeToString(d.Data)
}
// https://github.com/wine-mirror/wine/blob/60a3e0106246cb91d598a815d4fadf2791011142/programs/reg/import.c#L249
func ParseData(s string) (Data, error) {
if strings.HasPrefix(s, `"`) {
return StringData(parseQuotedString(s)), nil
}
s = strings.ToLower(s)
if strings.HasPrefix(s, "dword:") {
d, err := strconv.ParseUint(strings.TrimPrefix(s, "dword:"), 16, 32)
if err != nil {
return nil, fmt.Errorf("failed to parse as REG_DWORD('%s'): %+v", s, err)
}
return DwordData(d), nil
}
if strings.HasPrefix(s, "hex:") {
data, err := parseHex(strings.TrimPrefix(s, "hex:"))
if err != nil {
return nil, err
}
return BinaryData(data), nil
}
matches := stringTagPattern.FindStringSubmatch(s)
if len(matches) > 2 {
dt, err := strconv.ParseUint(matches[1], 16, 32)
if err != nil {
return nil, fmt.Errorf("failed to parse unknown type as hex('%s'): %+v", s, err)
}
dataType := DataType(dt)
switch dataType {
case DataTypeRegExpandSZ:
data := parseQuotedString(matches[2])
return ExpandStringData(data), nil
case DataTypeRegMultiSZ:
data := strings.Split(parseQuotedString(matches[2]), `\0`)
return MultiStringData(data), nil
}
}
matches = unknownDataTagPattern.FindStringSubmatch(s)
if len(matches) > 2 {
dataType, err := strconv.ParseUint(matches[1], 16, 32)
if err != nil {
return nil, fmt.Errorf("failed to parse unknown type as hex('%s'): %+v", s, err)
}
data, err := parseHex(matches[2])
if err != nil {
return nil, err
}
return &UnknownData{Data: data, dataType: DataType(dataType)}, nil
}
return nil, fmt.Errorf("unknown input: '%s'", s)
}
func parseHex(s string) ([]byte, error) {
digits := strings.Split(s, ",")
var data []byte
for _, d := range digits {
hexd, err := strconv.ParseUint(d, 16, 8)
if err != nil {
return nil, fmt.Errorf("failed to parse as binary('%s'): %+v", s, err)
}
data = append(data, byte(hexd))
}
return data, nil
}
func asHex(data []byte) string {
var ss []string
for _, b := range data {
ss = append(ss, fmt.Sprintf("%02x", b))
}
return strings.Join(ss, ",")
} | data.go | 0.635336 | 0.543833 | data.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// AssignmentFilterSupportedProperty represents the information about the property which is supported in crafting the rule of AssignmentFilter.
type AssignmentFilterSupportedProperty struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// The data type of the property.
dataType *string
// Indicates whether the property is a collection type or not.
isCollection *bool
// Name of the property.
name *string
// Regex string to do validation on the property value.
propertyRegexConstraint *string
// List of all supported operators on this property.
supportedOperators []AssignmentFilterOperator
// List of all supported values for this propery, empty if everything is supported.
supportedValues []string
}
// NewAssignmentFilterSupportedProperty instantiates a new assignmentFilterSupportedProperty and sets the default values.
func NewAssignmentFilterSupportedProperty()(*AssignmentFilterSupportedProperty) {
m := &AssignmentFilterSupportedProperty{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreateAssignmentFilterSupportedPropertyFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateAssignmentFilterSupportedPropertyFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewAssignmentFilterSupportedProperty(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *AssignmentFilterSupportedProperty) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetDataType gets the dataType property value. The data type of the property.
func (m *AssignmentFilterSupportedProperty) GetDataType()(*string) {
if m == nil {
return nil
} else {
return m.dataType
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *AssignmentFilterSupportedProperty) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["dataType"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetDataType(val)
}
return nil
}
res["isCollection"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetIsCollection(val)
}
return nil
}
res["name"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetName(val)
}
return nil
}
res["propertyRegexConstraint"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetPropertyRegexConstraint(val)
}
return nil
}
res["supportedOperators"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfEnumValues(ParseAssignmentFilterOperator)
if err != nil {
return err
}
if val != nil {
res := make([]AssignmentFilterOperator, len(val))
for i, v := range val {
res[i] = *(v.(*AssignmentFilterOperator))
}
m.SetSupportedOperators(res)
}
return nil
}
res["supportedValues"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfPrimitiveValues("string")
if err != nil {
return err
}
if val != nil {
res := make([]string, len(val))
for i, v := range val {
res[i] = *(v.(*string))
}
m.SetSupportedValues(res)
}
return nil
}
return res
}
// GetIsCollection gets the isCollection property value. Indicates whether the property is a collection type or not.
func (m *AssignmentFilterSupportedProperty) GetIsCollection()(*bool) {
if m == nil {
return nil
} else {
return m.isCollection
}
}
// GetName gets the name property value. Name of the property.
func (m *AssignmentFilterSupportedProperty) GetName()(*string) {
if m == nil {
return nil
} else {
return m.name
}
}
// GetPropertyRegexConstraint gets the propertyRegexConstraint property value. Regex string to do validation on the property value.
func (m *AssignmentFilterSupportedProperty) GetPropertyRegexConstraint()(*string) {
if m == nil {
return nil
} else {
return m.propertyRegexConstraint
}
}
// GetSupportedOperators gets the supportedOperators property value. List of all supported operators on this property.
func (m *AssignmentFilterSupportedProperty) GetSupportedOperators()([]AssignmentFilterOperator) {
if m == nil {
return nil
} else {
return m.supportedOperators
}
}
// GetSupportedValues gets the supportedValues property value. List of all supported values for this propery, empty if everything is supported.
func (m *AssignmentFilterSupportedProperty) GetSupportedValues()([]string) {
if m == nil {
return nil
} else {
return m.supportedValues
}
}
// Serialize serializes information the current object
func (m *AssignmentFilterSupportedProperty) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteStringValue("dataType", m.GetDataType())
if err != nil {
return err
}
}
{
err := writer.WriteBoolValue("isCollection", m.GetIsCollection())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("name", m.GetName())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("propertyRegexConstraint", m.GetPropertyRegexConstraint())
if err != nil {
return err
}
}
if m.GetSupportedOperators() != nil {
err := writer.WriteCollectionOfStringValues("supportedOperators", SerializeAssignmentFilterOperator(m.GetSupportedOperators()))
if err != nil {
return err
}
}
if m.GetSupportedValues() != nil {
err := writer.WriteCollectionOfStringValues("supportedValues", m.GetSupportedValues())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *AssignmentFilterSupportedProperty) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetDataType sets the dataType property value. The data type of the property.
func (m *AssignmentFilterSupportedProperty) SetDataType(value *string)() {
if m != nil {
m.dataType = value
}
}
// SetIsCollection sets the isCollection property value. Indicates whether the property is a collection type or not.
func (m *AssignmentFilterSupportedProperty) SetIsCollection(value *bool)() {
if m != nil {
m.isCollection = value
}
}
// SetName sets the name property value. Name of the property.
func (m *AssignmentFilterSupportedProperty) SetName(value *string)() {
if m != nil {
m.name = value
}
}
// SetPropertyRegexConstraint sets the propertyRegexConstraint property value. Regex string to do validation on the property value.
func (m *AssignmentFilterSupportedProperty) SetPropertyRegexConstraint(value *string)() {
if m != nil {
m.propertyRegexConstraint = value
}
}
// SetSupportedOperators sets the supportedOperators property value. List of all supported operators on this property.
func (m *AssignmentFilterSupportedProperty) SetSupportedOperators(value []AssignmentFilterOperator)() {
if m != nil {
m.supportedOperators = value
}
}
// SetSupportedValues sets the supportedValues property value. List of all supported values for this propery, empty if everything is supported.
func (m *AssignmentFilterSupportedProperty) SetSupportedValues(value []string)() {
if m != nil {
m.supportedValues = value
}
} | models/assignment_filter_supported_property.go | 0.705075 | 0.501404 | assignment_filter_supported_property.go | starcoder |
package pgmodel
import (
"fmt"
"github.com/prometheus/prometheus/prompb"
)
const (
metricNameLabelName = "__name__"
)
var (
errNoMetricName = fmt.Errorf("metric name missing")
)
// SeriesID represents a globally unique id for the series. This should be equivalent
// to the PostgreSQL type in the series table (currently BIGINT).
type SeriesID int64
// inserter is responsible for inserting label, series and data into the storage.
type inserter interface {
InsertNewData(newSeries []seriesWithCallback, rows map[string][]*samplesInfo) (uint64, error)
Close()
}
type seriesWithCallback struct {
Series Labels
Callback func(l Labels, id SeriesID) error
}
// Cache provides a caching mechanism for labels and series.
type Cache interface {
GetSeries(lset Labels) (SeriesID, error)
SetSeries(lset Labels, id SeriesID) error
}
type samplesInfo struct {
seriesID SeriesID
samples []prompb.Sample
}
// DBIngestor ingest the TimeSeries data into Timescale database.
type DBIngestor struct {
cache Cache
db inserter
}
// Ingest transforms and ingests the timeseries data into Timescale database.
func (i *DBIngestor) Ingest(tts []prompb.TimeSeries) (uint64, error) {
newSeries, data, totalRows, err := i.parseData(tts)
if err != nil {
return 0, err
}
rowsInserted, err := i.db.InsertNewData(newSeries, data)
if err == nil && int(rowsInserted) != totalRows {
return rowsInserted, fmt.Errorf("Failed to insert all the data! Expected: %d, Got: %d", totalRows, rowsInserted)
}
return rowsInserted, err
}
func (i *DBIngestor) parseData(tts []prompb.TimeSeries) ([]seriesWithCallback, map[string][]*samplesInfo, int, error) {
var seriesToInsert []seriesWithCallback
dataSamples := make(map[string][]*samplesInfo)
rows := 0
for _, t := range tts {
if len(t.Samples) == 0 {
continue
}
seriesLabels, metricName, err := labelProtosToLabels(t.Labels)
if err != nil {
return nil, nil, rows, err
}
if metricName == "" {
return nil, nil, rows, errNoMetricName
}
var seriesID SeriesID
newSeries := false
seriesID, err = i.cache.GetSeries(seriesLabels)
if err != nil {
if err != ErrEntryNotFound {
return nil, nil, rows, err
}
newSeries = true
}
sample := samplesInfo{
seriesID,
t.Samples,
}
rows += len(t.Samples)
if newSeries {
seriesToInsert = append(seriesToInsert, seriesWithCallback{
Series: seriesLabels,
Callback: func(l Labels, id SeriesID) error {
sample.seriesID = id
return i.cache.SetSeries(l, id)
},
})
}
dataSamples[metricName] = append(dataSamples[metricName], &sample)
}
return seriesToInsert, dataSamples, rows, nil
}
// Close closes the ingestor
func (i *DBIngestor) Close() {
i.db.Close()
} | pkg/pgmodel/ingestor.go | 0.705379 | 0.474753 | ingestor.go | starcoder |
package stdutil
import (
"fmt"
"regexp"
"strconv"
"strings"
"time"
)
// AnyToString - convert any variable to string
func AnyToString(value interface{}) string {
var b string
if value == nil {
return ""
}
switch t := value.(type) {
case string:
b = t
case int:
b = strconv.FormatInt(int64(t), 10)
case int8:
b = strconv.FormatInt(int64(t), 10)
case int16:
b = strconv.FormatInt(int64(t), 10)
case int32:
b = strconv.FormatInt(int64(t), 10)
case int64:
b = strconv.FormatInt(t, 10)
case uint:
b = strconv.FormatUint(uint64(t), 10)
case uint8:
b = strconv.FormatUint(uint64(t), 10)
case uint16:
b = strconv.FormatUint(uint64(t), 10)
case uint32:
b = strconv.FormatUint(uint64(t), 10)
case uint64:
b = strconv.FormatUint(uint64(t), 10)
case float32:
b = fmt.Sprintf("%f", t)
case float64:
b = fmt.Sprintf("%f", t)
case bool:
if t {
return "true"
} else {
return "false"
}
case time.Time:
b = "'" + t.Format(time.RFC3339) + "'"
case *string:
b = *t
case *int:
b = strconv.FormatInt(int64(*t), 10)
case *int8:
b = strconv.FormatInt(int64(*t), 10)
case *int16:
b = strconv.FormatInt(int64(*t), 10)
case *int32:
b = strconv.FormatInt(int64(*t), 10)
case *int64:
b = strconv.FormatInt(*t, 10)
case *uint:
b = strconv.FormatUint(uint64(*t), 10)
case *uint8:
b = strconv.FormatUint(uint64(*t), 10)
case *uint16:
b = strconv.FormatUint(uint64(*t), 10)
case *uint32:
b = strconv.FormatUint(uint64(*t), 10)
case *uint64:
b = strconv.FormatUint(uint64(*t), 10)
case *float32:
b = fmt.Sprintf("%f", *t)
case *float64:
b = fmt.Sprintf("%f", *t)
case *bool:
if *t {
return "true"
} else {
return "false"
}
case *time.Time:
tm := *t
b = "'" + tm.Format(time.RFC3339) + "'"
}
return b
}
// Itos is a shortcut to AnyToString
func Itos(value interface{}) string {
return AnyToString(value)
}
// IntToInterfaceArray - converts a name value array to interface array
func IntToInterfaceArray(values int) []interface{} {
args := make([]interface{}, 1)
args[0] = values
return args
}
//IsNumeric - checks if a string is numeric
func IsNumeric(s string) bool {
_, err := strconv.ParseFloat(s, 64)
return err == nil
}
// NameValuesToInterfaceArray - converts name values to interface array
func NameValuesToInterfaceArray(values NameValues) []interface{} {
args := make([]interface{}, len(values.Pair))
i := 0
for _, v := range values.Pair {
args[i] = v.Value
i++
}
return args
}
// NameValuesToValidationExpressionArray - converts name values to ValidationExpression array
func NameValuesToValidationExpressionArray(values NameValues) []ValidationExpression {
args := make([]ValidationExpression, len(values.Pair))
i := 0
for _, v := range values.Pair {
args[i].Name = AnyToString(v.Name)
args[i].Value = AnyToString(v.Value)
args[i].Operator = `=`
i++
}
return args
}
// InterpolateString - Interpolate string with the name value pairs
func InterpolateString(base string, keyValues NameValues) (string, []interface{}) {
retstr := base
hasmatch := false
pattern := `\$\{(\w*)\}` //search for ${*}
re := regexp.MustCompile(pattern)
matches := re.FindAllString(base, -1)
retif := make([]interface{}, len(matches))
for i, match := range matches {
hasmatch = false
for _, vs := range keyValues.Pair {
n := strings.ToLower(vs.Name)
v := vs.Value
if match == `${`+n+`}` {
retstr = strings.Replace(retstr, match, AnyToString(v), -1)
retif[i] = v
hasmatch = true
break
}
}
/* The matches needs to have a default value */
if !hasmatch {
retstr = strings.Replace(retstr, match, "0", -1)
retif[i] = "0" //a string 0 would cater to both string and number columns
}
}
return retstr, retif
}
// ValidateEmail - validate an e-mail address
func ValidateEmail(email string) bool {
re := regexp.MustCompile("^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$")
return re.MatchString(email)
}
// SortByKeyArray - reorder keys and values based on a keyOrder array sequence
func SortByKeyArray(values *NameValues, keyOrder *[]string) NameValues {
ret := NameValues{}
ret.Pair = make([]NameValue, 0)
//If keyorder was specified, the order of keys will be sorted according to the specifications
ko := *keyOrder
if len(ko) > 0 {
for i := 0; i < len(ko); i++ {
for _, v := range values.Pair {
kv := v.Name
if strings.ToLower(ko[i]) == strings.ToLower(kv) {
ret.Pair = append(ret.Pair, v)
break
}
}
}
}
return ret
}
// StripEndingForwardSlash - remove the ending forward slash of a string
func StripEndingForwardSlash(value string) string {
v := strings.TrimSpace(value)
v = strings.ReplaceAll(v, `\`, `/`)
ix := strings.LastIndex(v, `/`)
if ix == (len(v) - 1) {
v = v[0:ix]
}
return v
}
// StripTrailing - strip string of trailing characters after the length
func StripTrailing(value string, length int) string {
if len(value) > length {
return value[0:length]
}
return value
}
// StripLeading - strip string of leading characters by an offset
func StripLeading(value string, offset int) string {
if len(value) > offset {
return value[offset:]
}
return value
}
// NewString initializes a string pointer with an initial value
func NewString(initial string) (init *string) {
init = new(string)
*init = initial
return
}
// NewByte initializes a byte pointer with an initial value
func NewByte(initial byte) (init *byte) {
init = new(byte)
*init = initial
return
}
// NewInt initializes an int pointer with an initial value
func NewInt(initial int) (init *int) {
init = new(int)
*init = initial
return
}
// NewInt32 initializes an int32 pointer with an initial value
func NewInt32(initial int32) (init *int32) {
init = new(int32)
*init = initial
return
}
// NewInt64 initializes an int64 pointer with an initial value
func NewInt64(initial int64) (init *int64) {
init = new(int64)
*init = initial
return
}
// NewBool initializes a bool pointer with an initial value
func NewBool(initial bool) (init *bool) {
init = new(bool)
*init = initial
return
}
// NewFloat32 initializes a float32 pointer with an initial value
func NewFloat32(initial float32) (init *float32) {
init = new(float32)
*init = initial
return
}
// NewFloat64 initializes a float64 pointer with an initial value
func NewFloat64(initial float64) (init *float64) {
init = new(float64)
*init = initial
return
}
// NewTime initializes a time.Time pointer with an initial value
func NewTime(initial *time.Time) (init *time.Time) {
init = new(time.Time)
if initial != nil {
init = initial
}
return
} | stdutil.go | 0.677367 | 0.453141 | stdutil.go | starcoder |
package validation
const (
JSONSchemaTransformDeclarations =
`
{
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": "github.com/jf-tech/omniparser:transform_declarations",
"title": "omniparser schema: transform_declarations",
"type": "object",
"properties": {
"transform_declarations": {
"type": "object",
"properties": {
"FINAL_OUTPUT": {
"oneOf": [
{ "$ref": "#/definitions/const" },
{ "$ref": "#/definitions/external" },
{ "$ref": "#/definitions/field" },
{ "$ref": "#/definitions/object" },
{ "$ref": "#/definitions/custom_func" },
{ "$ref": "#/definitions/custom_parse", "$comment": "Deprecated. Use custom_func." },
{ "$ref": "#/definitions/array" },
{ "$ref": "#/definitions/template" }
]
}
},
"patternProperties": {
"^[_a-zA-Z0-9]+$": {
"oneOf": [
{ "$ref": "#/definitions/const" },
{ "$ref": "#/definitions/external" },
{ "$ref": "#/definitions/field" },
{ "$ref": "#/definitions/object" },
{ "$ref": "#/definitions/custom_func" },
{ "$ref": "#/definitions/custom_parse", "$comment": "Deprecated. Use custom_func." },
{ "$ref": "#/definitions/array" },
{ "$ref": "#/definitions/template" }
]
}
},
"required": [ "FINAL_OUTPUT" ],
"additionalProperties": false
}
},
"required": [ "transform_declarations" ],
"definitions": {
"value_comment": { "type": "string" },
"value_no_trim": { "type": "boolean" },
"value_ignore_error": { "type": "boolean" },
"value_keep_empty_or_null": { "type": "boolean" },
"value_name": {
"type": "string",
"minLength": 1,
"$comment": "name can not be empty string"
},
"value_const": {
"type": "string",
"$comment": "const can be empty string"
},
"value_external": {
"type": "string",
"minLength": 1,
"$comment": "external can not be empty string"
},
"value_xpath": {
"type": "string",
"minLength": 1,
"$comment": "xpath can not be empty string"
},
"value_xpath_dynamic": {
"type": "object",
"items": {
"oneOf": [
{ "$ref": "#/definitions/const" },
{ "$ref": "#/definitions/external" },
{ "$ref": "#/definitions/field" },
{ "$ref": "#/definitions/custom_func" },
{ "$ref": "#/definitions/custom_parse", "$comment": "Deprecated. Use custom_func." },
{ "$ref": "#/definitions/template" }
]
}
},
"value_template": {
"type": "string",
"minLength": 1,
"$comment": "template can not be empty string"
},
"value_object": {
"type": "object",
"patternProperties": {
"^.+$": {
"oneOf": [
{ "$ref": "#/definitions/const" },
{ "$ref": "#/definitions/external" },
{ "$ref": "#/definitions/field" },
{ "$ref": "#/definitions/object" },
{ "$ref": "#/definitions/custom_func" },
{ "$ref": "#/definitions/custom_parse", "$comment": "Deprecated. Use custom_func." },
{ "$ref": "#/definitions/array" },
{ "$ref": "#/definitions/template" }
],
"$comment": "object's field can be any kind of transform"
}
},
"additionalProperties": false
},
"value_custom_func": {
"type": "object",
"properties": {
"name": { "$ref": "#/definitions/value_name" },
"args": {
"type": "array",
"items": {
"oneOf": [
{ "$ref": "#/definitions/const" },
{ "$ref": "#/definitions/external" },
{ "$ref": "#/definitions/field" },
{ "$ref": "#/definitions/custom_func" },
{ "$ref": "#/definitions/custom_parse", "$comment": "Deprecated. Use custom_func." },
{ "$ref": "#/definitions/array" },
{ "$ref": "#/definitions/template" }
]
},
"$comment": "args length can be 0"
},
"ignore_error": { "$ref": "#/definitions/value_ignore_error" }
},
"required": [ "name" ],
"additionalProperties": false
},
"value_custom_parse": {
"type": "string",
"minLength": 1,
"$comment": "custom_parse can not be empty string. Deprecated."
},
"value_type": {
"type": "string",
"enum": [
"boolean",
"float",
"int",
"string"
]
},
"const": {
"type": "object",
"properties": {
"const": { "$ref": "#/definitions/value_const" },
"type": { "$ref": "#/definitions/value_type" },
"no_trim": { "$ref": "#/definitions/value_no_trim" },
"keep_empty_or_null": { "$ref": "#/definitions/value_keep_empty_or_null" },
"_comment": { "$ref": "#/definitions/value_comment" }
},
"required": [ "const" ],
"additionalProperties": false
},
"external": {
"type": "object",
"properties": {
"external": { "$ref": "#/definitions/value_external" },
"type": { "$ref": "#/definitions/value_type" },
"no_trim": { "$ref": "#/definitions/value_no_trim" },
"keep_empty_or_null": { "$ref": "#/definitions/value_keep_empty_or_null" },
"_comment": { "$ref": "#/definitions/value_comment" }
},
"required": [ "external" ],
"additionalProperties": false
},
"field": {
"type": "object",
"properties": {
"xpath": { "$ref": "#/definitions/value_xpath" },
"xpath_dynamic": { "$ref": "#/definitions/value_xpath_dynamic" },
"type": { "$ref": "#/definitions/value_type" },
"no_trim": { "$ref": "#/definitions/value_no_trim" },
"keep_empty_or_null": { "$ref": "#/definitions/value_keep_empty_or_null" },
"_comment": { "$ref": "#/definitions/value_comment" }
},
"additionalProperties": false
},
"object": {
"type": "object",
"properties": {
"xpath": { "$ref": "#/definitions/value_xpath" },
"xpath_dynamic": { "$ref": "#/definitions/value_xpath_dynamic" },
"object": { "$ref": "#/definitions/value_object" },
"keep_empty_or_null": { "$ref": "#/definitions/value_keep_empty_or_null" },
"_comment": { "$ref": "#/definitions/value_comment" }
},
"required": [ "object" ],
"additionalProperties": false
},
"array": {
"type": "object",
"properties": {
"array": {
"type": "array",
"items": {
"oneOf": [
{ "$ref": "#/definitions/const" },
{ "$ref": "#/definitions/external" },
{ "$ref": "#/definitions/field" },
{ "$ref": "#/definitions/object" },
{ "$ref": "#/definitions/custom_func" },
{ "$ref": "#/definitions/custom_parse", "$comment": "Deprecated. Use custom_func." },
{ "$ref": "#/definitions/template" }
],
"$comment": "array's element can be any kind of transform, except array. might support in the future, but not now"
}
},
"keep_empty_or_null": { "$ref": "#/definitions/value_keep_empty_or_null" },
"_comment": { "$ref": "#/definitions/value_comment" }
},
"required": [ "array" ],
"additionalProperties": false
},
"template": {
"type": "object",
"properties": {
"xpath": { "$ref": "#/definitions/value_xpath" },
"xpath_dynamic": { "$ref": "#/definitions/value_xpath_dynamic" },
"template": { "$ref": "#/definitions/value_template" },
"_comment": { "$ref": "#/definitions/value_comment" }
},
"required": [ "template" ],
"additionalProperties": false
},
"custom_func": {
"type": "object",
"properties": {
"xpath": { "$ref": "#/definitions/value_xpath" },
"xpath_dynamic": { "$ref": "#/definitions/value_xpath_dynamic" },
"custom_func": { "$ref": "#/definitions/value_custom_func" },
"type": { "$ref": "#/definitions/value_type" },
"no_trim": { "$ref": "#/definitions/value_no_trim" },
"keep_empty_or_null": { "$ref": "#/definitions/value_keep_empty_or_null" },
"_comment": { "$ref": "#/definitions/value_comment" }
},
"required": [ "custom_func" ],
"additionalProperties": false
},
"custom_parse": {
"type": "object",
"properties": {
"xpath": { "$ref": "#/definitions/value_xpath" },
"xpath_dynamic": { "$ref": "#/definitions/value_xpath_dynamic" },
"custom_parse": { "$ref": "#/definitions/value_custom_parse" },
"type": { "$ref": "#/definitions/value_type" },
"no_trim": { "$ref": "#/definitions/value_no_trim" },
"keep_empty_or_null": { "$ref": "#/definitions/value_keep_empty_or_null" },
"_comment": { "$ref": "#/definitions/value_comment" }
},
"required": [ "custom_parse" ],
"additionalProperties": false,
"$comment": "Deprecated. Use custom_func."
}
}
}
`
) | extensions/omniv21/validation/transformDeclarations.go | 0.586878 | 0.493775 | transformDeclarations.go | starcoder |
package tables
import (
"fmt"
"github.com/sudachen/go-ml/fu"
"github.com/sudachen/go-ml/fu/lazy"
"reflect"
)
func equalf(c interface{}) func(v reflect.Value) bool {
vc := reflect.ValueOf(c)
switch vc.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
vv := vc.Int()
return func(v reflect.Value) bool {
switch v.Kind() {
case reflect.Float64, reflect.Float32:
return int64(v.Float()) == vv
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return int64(v.Uint()) == vv
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == vv
}
return false
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
vv := vc.Uint()
return func(v reflect.Value) bool {
switch v.Kind() {
case reflect.Float64, reflect.Float32:
return uint64(v.Float()) == vv
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return uint64(v.Uint()) == vv
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Uint() == vv
}
return false
}
case reflect.String:
vv := vc.String()
return func(v reflect.Value) bool {
switch v.Kind() {
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return fmt.Sprintf("%d", v.Uint()) == vv
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return fmt.Sprintf("%d", v.Int()) == vv
case reflect.String:
return vv == v.String()
}
return false
}
default:
return func(v reflect.Value) bool {
return reflect.DeepEqual(v, vc)
}
}
}
func lessf(c interface{}) func(v reflect.Value) bool {
vc := reflect.ValueOf(c)
switch vc.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
vv := vc.Int()
return func(v reflect.Value) bool {
switch v.Kind() {
case reflect.Float64, reflect.Float32:
return int64(v.Float()) < vv
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return int64(v.Uint()) < vv
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() < vv
}
return false
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
vv := vc.Uint()
return func(v reflect.Value) bool {
switch v.Kind() {
case reflect.Float64, reflect.Float32:
return uint64(v.Float()) < vv
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return uint64(v.Uint()) < vv
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Uint() < vv
}
return false
}
case reflect.String:
vv := vc.String()
return func(v reflect.Value) bool {
if v.Kind() == reflect.String {
return vv < v.String()
}
return false
}
default:
return func(v reflect.Value) bool {
return fu.Less(v, vc)
}
}
}
func greatf(c interface{}) func(v reflect.Value) bool {
vc := reflect.ValueOf(c)
switch vc.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
vv := vc.Int()
return func(v reflect.Value) bool {
switch v.Kind() {
case reflect.Float64, reflect.Float32:
return int64(v.Float()) > vv
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return int64(v.Uint()) > vv
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() > vv
}
return false
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
vv := vc.Uint()
return func(v reflect.Value) bool {
switch v.Kind() {
case reflect.Float64, reflect.Float32:
return uint64(v.Float()) > vv
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return uint64(v.Uint()) > vv
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Uint() > vv
}
return false
}
case reflect.String:
vv := vc.String()
return func(v reflect.Value) bool {
if v.Kind() > reflect.String {
return vv < v.String()
}
return false
}
default:
return func(v reflect.Value) bool {
return fu.Less(vc, v)
}
}
}
func (zf Lazy) IfEq(c string, v interface{}) Lazy {
vf := reflect.ValueOf(v)
eq := equalf(vf)
return func() lazy.Stream {
z := zf()
nx := fu.AtomicSingleIndex{}
return func(index uint64) (v reflect.Value, err error) {
if v, err = z(index); err != nil || v.Kind() == reflect.Bool {
return
}
lr := v.Interface().(fu.Struct)
j, ok := nx.Get()
if !ok {
j, _ = nx.Set(lr.Pos(c))
}
if eq(lr.ValueAt(j)) {
return
}
return fu.True, nil
}
}
}
func (zf Lazy) IfNe(c string, v interface{}) Lazy {
vf := reflect.ValueOf(v)
eq := equalf(vf)
return func() lazy.Stream {
z := zf()
nx := fu.AtomicSingleIndex{}
return func(index uint64) (v reflect.Value, err error) {
if v, err = z(index); err != nil || v.Kind() == reflect.Bool {
return
}
lr := v.Interface().(fu.Struct)
j, ok := nx.Get()
if !ok {
j, _ = nx.Set(lr.Pos(c))
}
if !eq(lr.ValueAt(j)) {
return
}
return fu.True, nil
}
}
}
func (zf Lazy) IfLt(c string, v interface{}) Lazy {
vf := reflect.ValueOf(v)
lt := lessf(vf)
return func() lazy.Stream {
z := zf()
nx := fu.AtomicSingleIndex{}
return func(index uint64) (v reflect.Value, err error) {
if v, err = z(index); err != nil || v.Kind() == reflect.Bool {
return
}
lr := v.Interface().(fu.Struct)
j, ok := nx.Get()
if !ok {
j, _ = nx.Set(lr.Pos(c))
}
if lt(lr.ValueAt(j)) {
return
}
return fu.True, nil
}
}
}
func (zf Lazy) IfGt(c string, v interface{}) Lazy {
vf := reflect.ValueOf(v)
gt := greatf(vf)
return func() lazy.Stream {
z := zf()
nx := fu.AtomicSingleIndex{}
return func(index uint64) (v reflect.Value, err error) {
if v, err = z(index); err != nil || v.Kind() == reflect.Bool {
return
}
lr := v.Interface().(fu.Struct)
j, ok := nx.Get()
if !ok {
j, _ = nx.Set(lr.Pos(c))
}
if gt(lr.ValueAt(j)) {
return
}
return fu.True, nil
}
}
} | tables/ifxx.go | 0.565779 | 0.457682 | ifxx.go | starcoder |
package scenarios
import (
. "github.com/onsi/ginkgo"
)
var _ = PDescribe("[Dataplane] Adding existing network policy to newly added cluster", func() {
PContext("Registering cluster 3 with kubefed and then creating a pod", func() {
PIt("Should implement existing namespace selector based network policy in newly added cluster", func() {
By("creating listener pod 1 with label 1 in cluster 1 in namespace 1")
By("creating listener pod 4 with label 1 in cluster 1 in namespace 1")
By("creating connecting pod 2 with label 2 in cluster 2 in namespace 2")
By("creating namespace selector based network policy on cluster 1")
By("testing connectivity between pods")
By("Registering cluster 3 to kubefed")
By("creating connecting pod 3 with label 3 in cluster 3 in namespace 2")
By("testing connectivity between pod 1 and 2")
By("testing connectivity between pod 3 and 4")
By("Unregistering cluster 3")
})
})
PContext("[Dataplane] Creating a pod in cluster 3 and then registering it with kubefed", func() {
PIt("Should implement existing namespace selector based network policy in newly added cluster", func() {
By("creating listener pod 1 with label 1 in cluster 1 in namespace 1")
By("creating listener pod 4 with label 1 in cluster 1 in namespace 1")
By("creating connecting pod 2 with label 2 in cluster 2 in namespace 2")
By("creating namespace selector based network policy on cluster 1")
By("testing connectivity between pod 1 and 2")
By("creating connecting pod 3 with label 3 in cluster 3 in namespace 2")
By("testing non connectivity between pod 3 and 4")
By("Registering cluster 3 to kubefed")
By("testing connectivity between pod 3 and 4")
By("Unregistering cluster 3")
})
})
})
var _ = PDescribe("[Ctlplane] Adding existing network policy to newly added cluster", func() {
PContext("Registering cluster 3 with kubefed and then creating a pod", func() {
PIt("Should implement existing namespace selector based network policy in newly added cluster", func() {
By("creating listener pod 1 with label 1 in cluster 1 in namespace 1")
By("creating listener pod 2 with label 2 in cluster 2 in namespace 2")
By("creating network policy with namespace selector on cluster 1")
By("Waiting for a NetworkPolicy to appear in cluster1 containing pod 2 IP in ipBlocks")
By("Registering cluster3 to kubefed")
By("creating listener pod 3 with label 3 in cluster 3 in namespace 2")
By("Waiting for a NetworkPolicy to appear in cluster 1 containing pod 3 IP in ipBlocks")
By("Unregistering cluster3")
})
})
PContext("[Ctlplane] Creating a pod in cluster 3 and then registering it with kubefed", func() {
PIt("Should implement existing namespace selector based network policy in newly added cluster", func() {
By("creating listener pod 1 with label 1 in cluster 1 in namespace 1")
By("creating listener pod 2 with label 2 in cluster 2 in namespace 2")
By("creating network policy with namespace selector on cluster 1")
By("Waiting for a NetworkPolicy to appear in cluster1 containing pod 2 IP in ipBlocks")
By("creating listener pod 3 with label 3 in cluster 3 in namespace 2")
By("Registering cluster3 to kubefed")
By("Waiting for a NetworkPolicy to appear in cluster 1 containing pod 3 IP in ipBlocks")
By("Unregistering cluster3")
})
})
}) | test/e2e/scenarios/add_cluster.go | 0.566618 | 0.747339 | add_cluster.go | starcoder |
package main
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"time"
)
type Flags struct {
ConfigFile string
DbPath string
ReuseDatabase bool
MinPeriod time.Duration
MaxPeriod time.Duration
}
/*
Config contains the configuration information used by the application for customizing its behavior.
The configuration file defaults to a JSON-encoded file named "dns-noise.json" in the current working directory.
It may be overwritten by supplying an alternative filepath using the '-c' or '--conf' command-line option.
e.g. dns-noise -c /usr/local/etc/dns-noise.conf
The configuration must be expressed as strict JSON, so unfortunately comments in the configuration file are not
supported. JSON has an especially unforgiving syntax structure, so careful attention to the brackets, braces, and commas
is necessary. An example configuration file is included which may be edited/revised as desired.
Here is an annotated reference for the configuration file format:
{
The "nameservers" block is *optional* and if omitted the system defaults will be used.
It contains a list of nameservers that will be queried with the noise DNS requests.
The nameservers will be queried in the order written with the primary used for all initial queries
and any additional nameservers used only on failover.
* Each nameserver entry *must* contain an "ip" element with an IP address in either IPv4 or IPv6 format.
* A nameserver entry *may* contain a "port" element with the connection port specified.
The default port (53) will be used if no port is specified.
* A nameserver entry *may* contain a "zone" element *only* with an IPv6 address. The default is to leave the zone unspecified.
"nameservers":[
{ "ip": "127.0.0.1", "port": 53 },
{ "ip": "::1", zone: "eth0", "port": 53 }
],
The "sources" block is *required* and must have at least one entry defining the source and interpretation rules.
A source provides a list of domains that will be randomly selected for querying the DNS servers in order to generate noise.
Each source describes the URL, how to interpret the data, and the refresh policy. All data files must be in CSV form,
although the application can independently unzip the file if necessary.
* Each source entry *must* contain a "url" element specifying the URL for the domains data.
* A source *may* contain a "column" element indicating which column in the data file contains the list of domains.
If unspecified, the default value is 0 which will specify the first column.
* A source *may* contain a "label" element to uniquely identify the dataset associated with the source.
If unspecified, the entire dataset for all sources will be purged when a refresh is triggered.
* A source *may* contain a "refresh" element specifying the interval for the domains data to be reloaded from the URL.
If unspecified, the default behavior will be to never refresh. The interval must be parsable by Go's time.ParseDuration().
"sources": [
{ "url": "http://example.com/domains/domainlist.csv.zip", "column": 1, "label": "source1", "refresh": "24h" }
],
The "noise" block is *optional* and if omitted the system defaults will be used.
It contains a set of attributes that define how the application behaves.
* The "minPeriod" element specifies the minimum interval permitted for queries. The default value is 100ms.
A command-line argument specifying the minPeriod will overwrite the default or configuration value.
The period must be parsable by Go's time.ParseDuration() and be less than that of the maxPeriod.
* The "maxPeriod" element specifies the maximum interval permitted for queries. The default value is 15s.
A command-line argument specifying the maxPeriod will overwrite the default or configuration value.
The period must be parsable by Go's time.ParseDuration() and be greater than that of minPeriod.
* The "dbPath" element specifies the path to locate the database containing the list of domains.
The default location is in the system's tempory directory with the filename of "dns-noise.db".
The location must have permissions for file creation and write access.
A command-line argument specifying the path will overwrite the default or configuration value.
* The "ipv4" element is a boolean flag indicating whether DNS request for the IPv4 address should be utilized.
This is a request for the "A" record from the DNS zone and is not dependent on using an IPv4 or IPv6 network.
The default value is true.
* The "ipv6" element is a boolean flag indicating whether DNS request for the IPv6 address should be utilized.
This is a request for the "AAAA" record from the DNS zone and is not dependent on using an IPv4 or IPv6 network.
The default value is false.
"noise": {
"minPeriod": "100ms",
"maxPeriod": "15s",
"dbPath": "/tmp/dns-noise.db",
"ipv4": true,
"ipv6": true
},
The "pihole" block is *optional* and if omitted the application will not utilize pihole activity for determining noise thresholds.
If the pihole block is incomplete or incorrectly configured, the pihole will not be utilized. If the pihole is not
used to determine the rate of DNS queries, then random values between the minPeriod and maxPeriod will be used. The pihole
authtoken value can be found in the "/etc/pihole/setupVars.conf" file as the value for the "WEBPASSWORD" option. The
token should be treated with appropriate security precautions and restrict access.
* The "host" element *must* specify the hostname or IP address of the pihole server. The pihole must be listening on that interface,
so check the pihole settings especially if running the noise generator on the same host as the pihole.
If the host is not specified, pihole activity will not be enabled.
* The "authToken" element *must* contain the encrypted web password for accessing the pihole's admin API. Please note that the queries
to the pihole are sent *unencrypted* and the token value is accessible to traffic sniffers as the pihole does not support https.
Do *not* use if there is even a remote chance of untrusted actors on the network.
* The "activityPeriod" element *may* specify the time interval used to calculate the running average for the pihole query activity.
The default is use a 5 minute window for examining query activity. The interval must be parsable by Go's time.ParseDuration().
* The "refresh" element *may* specify the frequency the pihole will be queried to calculate the moving average.
The default refresh frequency is 1 minute. The frequency must be parsable by Go's time.ParseDuration().
* The "filter" element *may* specify a hostname that is used to exclude activity from the moving average.
This may be desired in order to exclude the queries originating from the DNS noise host in order to just report on the "live" traffic.
* The "noisePercentage" element *may* be specified and must be in the range of 1-100 for the pihole functionality to be enabled.
This element allows the noise generator to dynamically adjust its traffic levels to the stated percentage of "live" traffic.
The default value is 10. Do not include a percentage sign (%) with the value.
"pihole": {
"host": "pihole.example.com",
"authToken": "<PASSWORD>",
"activityPeriod": "5m",
"refresh": "1m",
"filter": "noise.example.com",
"noisePercentage": 10
}
The "metrics" block is *optional* and if omitted the application will not emit any metrics for scraping.
If the metrics block is incorrectly formatted, it may result in a panic upon service launch or difficulty in scraping.
The metrics are exported on the designated port and path in standard prometheus text format. They can be manually
inspected by pointing your browser to the apprporiate URL. (e.g. "http://noise.example.com:6001/metrics")
* The "enabled" element *may* be specified with a boolean (true/false) value. The default value is false.
* The "port" element *may* be specified. The default value is 6001. Care should be made when selecting a port
to pick a port that is not already in use on that host or in a restricted range.
* The "path" element *may* be specified. The default value is "/metrics" as that is the convential path for Prometheus
log scraping. Access to the path should be restricted to external networks as part of good security practices.
"metrics": {
"enabled": false,
"port": 6001,
"path": "/metrics"
}
}
*/
type Config struct {
NameServers []NameServer `json:"nameservers"`
Noise Noise `json:"noise"`
Sources []Source `json:"sources"`
Pihole Pihole `json:"pihole"`
Metrics Metrics `json:"metrics"`
}
type NameServer struct {
Ip string `json:"ip"`
Zone string `json:"zone"`
Port int `json:"port"`
}
// UnmarshalJSON provides an interface for customized processing of the NameServer struct.
// It performs initialization of select fields to default values prior to the actual unmarshaling.
// The default values will be overwritten if present in the JSON blob.
func (ns *NameServer) UnmarshalJSON(data []byte) error {
ns.Port = 53
// Need to avoid circular looping here
type Alias NameServer
tmp := (*Alias)(ns)
return json.Unmarshal(data, tmp)
}
type Noise struct {
DbPath string `json:"dbPath"`
MinPeriod Duration `json:"minPeriod"`
MaxPeriod Duration `json:"maxPeriod"`
IPv4 bool `json:ipv4"`
IPv6 bool `json:ipv6"`
}
// UnmarshalJSON provides an interface for customized processing of the Noise struct.
// It performs initialization of select fields to default values prior to the actual unmarshaling.
// The default values will be overwritten if present in the JSON blob.
func (n *Noise) UnmarshalJSON(data []byte) error {
n.IPv4 = true
n.DbPath = filepath.Join(os.TempDir(), "dns-noise.db")
n.MinPeriod, _ = parseDuration("100ms")
n.MaxPeriod, _ = parseDuration("15s")
// Need to avoid circular looping here
type Alias Noise
tmp := (*Alias)(n)
return json.Unmarshal(data, tmp)
}
type Source struct {
Label string `json:"label"`
Url string `json:"url"`
Column int `json:"column"`
Refresh Duration `json:"refresh"`
Timestamp time.Time
}
type Pihole struct {
Host string `json:"host"`
AuthToken string `json:"authToken"`
ActivityPeriod Duration `json:"activityPeriod"`
Refresh Duration `json:"refresh"`
Filter string `json:"filter"`
NoisePercentage int `json:"noisePercentage"`
Enabled bool
Timestamp time.Time
SleepPeriod time.Duration
}
// UnmarshalJSON provides an interface for customized processing of the Pihole struct.
// It performs initialization of select fields to default values prior to the actual unmarshaling.
// The default values will be overwritten if present in the JSON blob.
func (p *Pihole) UnmarshalJSON(data []byte) error {
p.NoisePercentage = 10
p.ActivityPeriod, _ = parseDuration("5m")
p.Refresh, _ = parseDuration("1m")
// Need to avoid circular looping here
type Alias Pihole
tmp := (*Alias)(p)
return json.Unmarshal(data, tmp)
}
type Metrics struct {
Enabled bool `json:"enabled"`
Path string `json:"path"`
Port int `json:"port"`
}
// UnmarshalJSON provides an interface for customized processing of the Metrics struct.
// It performs initialization of select fields to default values prior to the actual unmarshaling.
// The default values will be overwritten if present in the JSON blob.
func (m *Metrics) UnmarshalJSON(data []byte) error {
m.Port = 6001
m.Enabled = false
m.Path = "metrics"
type Alias Metrics
tmp := (*Alias)(m)
return json.Unmarshal(data, tmp)
}
// loadFlags parses the CLI arguments passed into the Flags structure.
// Unrecognized flags will be ignored.
// An initialized Flags struct will be returned which contains either the passed in values or defaults.
func loadFlags() *Flags {
f := new(Flags)
// set default interval values
f.MinPeriod, _ = time.ParseDuration("100ms")
f.MaxPeriod, _ = time.ParseDuration("15000ms")
// Duplicate references are permitted for providing long ("--conf") and short ("-c") version of a command line arg
flag.BoolVar(&f.ReuseDatabase, "reusedb", false, "Reuse existing noise database")
flag.BoolVar(&f.ReuseDatabase, "r", false, "Reuse existing noise database (shorthand)")
flag.StringVar(&f.ConfigFile, "conf", "dns-noise.json", "Path to configuration file")
flag.StringVar(&f.ConfigFile, "c", "dns-noise.json", "Path to configuration file (shorthand)")
flag.StringVar(&f.DbPath, "database", "/tmp/dns-noise.db", "Path to noise database file")
flag.StringVar(&f.DbPath, "d", "/tmp/dns-noise.db", "Path to noise database file (shorthand)")
flag.DurationVar(&f.MinPeriod, "min", f.MinPeriod, "Minimum time period for issuing noise queries")
flag.DurationVar(&f.MaxPeriod, "max", f.MaxPeriod, "Maximum time period for issuing noise queries")
// process the flags passed in on the CLI
flag.Parse()
return f
}
// isFlagPassed checks to see if the named flag was explicitly passed on the command line or not.
// It returns a bool reflecting whether is was passed or not.
func isFlagPassed(flagName string) bool {
found := false
flag.Visit(func(f *flag.Flag) {
if f.Name == flagName {
found = true
}
})
return found
}
// loadConfig reads in and parses the named file for the configuration values.
// The file is expected to be in JSON format. Command line flags will overwrite the values (if any) found in the configuration.
// If successful, the processed configuration will be returned. If an error is encountered, it will be treated as a fatal error.
func loadConfig(flags *Flags) *Config {
jsonFile, err := os.Open(flags.ConfigFile)
if err != nil {
log.Fatal(err.Error())
}
defer jsonFile.Close()
byteValue, _ := ioutil.ReadAll(jsonFile)
c := new(Config)
err = json.Unmarshal(byteValue, c)
if err != nil {
log.Fatal(err.Error())
}
// checks to see if necessary elements for Pihole access are present
c.Pihole.Enabled = piholeEnabled(&c.Pihole)
// overwrite config vars that were set explicitly with a command-line flag
if isFlagPassed("min") {
c.Noise.MinPeriod = Duration(flags.MinPeriod)
}
if isFlagPassed("max") {
c.Noise.MaxPeriod = Duration(flags.MaxPeriod)
}
if isFlagPassed("database") || isFlagPassed("d") {
c.Noise.DbPath = flags.DbPath
}
// bad config! no soup for you!
if c.Noise.MinPeriod > c.Noise.MaxPeriod {
log.Fatal("Min period exceeds max period")
}
return c
}
// The Duration type provides enables the JSON module to process strings as time.Durations.
// While time.Duration is available as a native type for CLI flags, it is not for the JSON parser.
// Note that in Go, you cannot define new methods on a non-local type so this workaround is the
// best alternative to hacking directly in the standard Go time module.
type Duration time.Duration
// Duration returns the time.Duration native type of the time module.
// This helper function makes it slightly less tedious to continually typecast a Duration into a time.Duration
func (d Duration) Duration() time.Duration {
return time.Duration(d)
}
// ParseDuration is a helper function to parse a string utilizing the underlying time.ParseDuration functionality.
func parseDuration(s string) (Duration, error) {
td, err := time.ParseDuration(s)
if err != nil {
return Duration(0), err
}
return Duration(td), nil
}
// MarshalJSON supplies an interface for processing Duration values which wrap the standard time.Duration type.
// It returns a byte array and any error encountered.
func (d Duration) MarshalJSON() ([]byte, error) {
return json.Marshal(time.Duration(d).String())
}
// UnmarshalJSON supplies an interface for processing Duration values which wrap the standard time.Duration type.
// It accepts a byte array and returns any error encountered.
func (d *Duration) UnmarshalJSON(b []byte) error {
var v interface{}
err := json.Unmarshal(b, &v)
if err != nil {
return err
}
switch value := v.(type) {
case float64:
*d = Duration(time.Duration(value))
return nil
case string:
tmp, err := time.ParseDuration(value)
if err != nil {
return err
}
*d = Duration(tmp)
return nil
default:
return fmt.Errorf("Invalid Duration specification: '%v'", value)
}
} | config.go | 0.649467 | 0.417628 | config.go | starcoder |
package pagination
import (
"math"
)
// Paginator manages pagination of a data set.
type Paginator struct {
perPage int // The number of items per page.
page int // Which page are we on?
offset int // The current offset to pass to the query.
total int // The total number of items
lastPage int // The number of the last possible page.
}
// calculateOffset sets the offset field based on the current values.
func (p *Paginator) calculateOffset() error {
if p.page == 0 || p.perPage == 0 {
return ErrCalculateOffset
}
p.offset = (p.page - 1) * p.perPage
return nil
}
// calculateLastPage sets the lastPage field based on the current values and
// returns an error if it fails.
func (p *Paginator) calculateLastPage() error {
// If there are no items to paginate return early to prevent divide-by-zero.
// An error is too heavy-handed here and makes the library difficult to use
// when total >= 0.
if p.total == 0 {
return nil
}
// A per-page value of zero on the other hand is a bit crazy, so an error is
// an acceptable response.
if p.perPage == 0 {
return ErrCalculateLastPage
}
p.lastPage = int(math.Ceil(float64(p.total) / float64(p.perPage)))
return nil
}
// GetPerPage returns the number of items per page.
func (p *Paginator) GetPerPage() int {
return p.perPage
}
// SetPerPage defines how many items per page the paginator will return, based
// on the supplied parameter, and will return an error if anything fails.
func (p *Paginator) SetPerPage(perPage int) error {
p.perPage = perPage
if err := p.calculateOffset(); err != nil {
return err
}
return p.calculateLastPage()
}
// GetPage returns the current page index
func (p *Paginator) GetPage() int {
return p.page
}
// SetPage sets the page field to the provided value and returns an error if
// anything fails
func (p *Paginator) SetPage(page int) error {
p.page = page
if err := p.calculateOffset(); err != nil {
return err
}
return p.calculateLastPage()
}
// GetOffset returns the current offset of the paginator.
func (p *Paginator) GetOffset() int {
return p.offset
}
// GetTotal returns the total number of items in the paginator.
func (p *Paginator) GetTotal() int {
return p.total
}
// SetTotal sets the total number of items in the paginator to the provided
// value and returns an error if it fails.
func (p *Paginator) SetTotal(total int) error {
p.total = total
if err := p.calculateOffset(); err != nil {
return err
}
return p.calculateLastPage()
}
// GetLastPage returns the last possible page number.
func (p *Paginator) GetLastPage() int {
return p.lastPage
}
// PrepareResponse returns a prepared pagination response.
func (p *Paginator) PrepareResponse() *Response {
return newResponse(p.total, p.perPage, p.page, p.lastPage)
}
// NewPaginator returns a new Paginator instance with the provided
// parameters set and returns an error if it fails.
func NewPaginator(perPage, page, total int) (paginator *Paginator, err error) {
// Create the paginator.
paginator = &Paginator{
perPage: perPage,
page: page,
total: total,
}
if err = paginator.calculateOffset(); err != nil {
return nil, err
}
if err = paginator.calculateLastPage(); err != nil {
return nil, err
}
return
} | pagination/paginator.go | 0.742795 | 0.409929 | paginator.go | starcoder |
package main
import "fmt"
type Item struct {
Name string
Weight int
Value int
}
// Knapsack problem:
// You are a thief with a knapsick that can carry 4 kilos of goods
// In the store you are about to rob, there are 4 items with different values and weights (see items below)
// Question: What items should you steal to get the maximum amount of value? The items should fit your knapsack, of course.
var (
// Items
stereo *Item = &Item{
Name: "stereo",
Weight: 4,
Value: 3000,
}
laptop *Item = &Item{
Name: "laptop",
Weight: 3,
Value: 2000,
}
guitar *Item = &Item{
Name: "guitar",
Weight: 1,
Value: 1500,
}
iphone *Item = &Item{
Name: "iphone",
Weight: 1,
Value: 2000,
}
)
func main() {
// Creates a matrix with 4 rows (items above)
// and 4 columns (knapsack size)
cells := make([][][]*Item, 4)
for i, _ := range cells {
cells[i] = make([][]*Item, 4)
}
// We want a grid like this
// 1 2 3 4 <- all possible weights for the knapsack
// guitar x x x x <- each row has a specific item
// stereo x x x x
// laptop x x x x
// iphone x x x x
for row, _ := range cells {
item := getItemForRow(row)
for col, _ := range cells[row] {
knapsackWeight := col + 1
// In the first row, we add our current item only if its weight is greater than the current sub knapsack weight
// but we don't do crazy algorithms logic
if row == 0 {
if knapsackWeight >= item.Weight {
cells[row][col] = append(cells[row][col], item)
}
} else {
// In the second row onwards, things get harry!
if knapsackWeight >= item.Weight {
// Gets the sum of the Items' value on the cell above
previousMaxValue := sumItemsValue(cells[row-1][col])
// Do we still have space in your sub knapsack if we steal the current Item?
remainingSpace := knapsackWeight - item.Weight
// If we don't...
if remainingSpace <= 0 {
// Let's check the Item's value
// If it is lesser than the cell right above us, let's keep that value,
// since so far is the maximum value we can fit in our sub knapsack
if item.Value < previousMaxValue {
cells[row][col] = append(cells[row][col], cells[row-1][col]...)
} else {
// If not, let's still our current value
cells[row][col] = append(cells[row][col], item)
}
} else {
// BUT, if even after stealing the current Item we have space left, we can steal more!
// First, we need to figure out how much space we have left in our sub knapsack
spaceLeftInKnapsack := col - item.Weight
// Second, we need to figure out the maximum value possible for that space
// Luckily we already calculated that in our grid in the last row
valueForSpaceLeft := sumItemsValue(cells[row-1][spaceLeftInKnapsack])
// If we steal the current Item + fit all space left, we get the value below
totalValue := item.Value + valueForSpaceLeft
// Is this value bigger than the cell above us?
if previousMaxValue <= totalValue {
// If so, we add our current Item + the Items from the space left
cells[row][col] = append(cells[row][col], item)
cells[row][col] = append(cells[row][col], cells[row-1][spaceLeftInKnapsack]...)
} else {
// Otherwise we add the items from the cell above us
cells[row][col] = append(cells[row][col], cells[row-1][col]...)
}
}
} else {
// Oh, if the current Item is heavier than our sub knapsack can support
// just add the ones from the cell above
cells[row][col] = append(cells[row][col], cells[row-1][col]...)
}
}
}
}
// I'm looping over the cells again, _twice_, to get the the value and print the grid
// I could have kept track of this information in the algorithm above, but I decided
// to make it more clear to read ✨
// Getting the bigger value!
maxRow, maxCol, maxValue := getMaxiumValues(cells)
// Print grid
for row, _ := range cells {
for col, items := range cells[row] {
var s string
for _, item := range items {
s += item.Name + " "
}
if maxRow == row && maxCol == col {
fmt.Printf("Cell(%d, %d) -> %s => THIS IS THE ANSWER! The value is: %d\n", row, col, s, maxValue)
} else {
fmt.Printf("Cell(%d, %d) -> %s\n", row, col, s)
}
}
}
}
func getMaxiumValues(cells [][][]*Item) (maxRow, maxCol, maxValue int) {
for row, _ := range cells {
for col, items := range cells[row] {
maxRow, maxCol, maxValue = row, col, sumItemsValue(cells[row][col])
for _, _ = range items {
m := sumItemsValue(items)
if m > maxValue {
maxRow, maxCol, maxValue = row, col, m
}
}
}
}
return maxRow, maxCol, maxValue
}
// getItemForRow returns the Item for a specific row
func getItemForRow(row int) (item *Item) {
switch row {
case 0:
item = guitar
case 1:
item = stereo
case 2:
item = laptop
case 3:
item = iphone
}
return item
}
// sumItemsValue return the sum of a slice of Items
func sumItemsValue(items []*Item) (sum int) {
for _, item := range items {
sum += item.Value
}
return sum
} | ch9/knapsack.go | 0.634996 | 0.446374 | knapsack.go | starcoder |
package conf
// IntVar defines an int flag and environment variable with specified name, default value, and usage string.
// The argument p points to an int variable in which to store the value of the flag and/or environment variable.
func (c *Configurator) IntVar(p *int, name string, value int, usage string) {
c.env().IntVar(p, name, value, usage)
c.flag().IntVar(p, name, value, usage)
}
// Int defines an int flag and environment variable with specified name, default value, and usage string.
// The return value is the address of an int variable that stores the value of the flag and/or environment variable.
func (c *Configurator) Int(name string, value int, usage string) *int {
p := new(int)
c.IntVar(p, name, value, usage)
return p
}
// IntVarE defines an int environment variable with specified name, default value, and usage string.
// The argument p points to an int variable in which to store the value of the environment variable.
func (c *Configurator) IntVarE(p *int, name string, value int, usage string) {
c.env().IntVar(p, name, value, usage)
}
// IntE defines an int environment variable with specified name, default value, and usage string.
// The return value is the address of an int variable that stores the value of the environment variable.
func (c *Configurator) IntE(name string, value int, usage string) *int {
p := new(int)
c.IntVarE(p, name, value, usage)
return p
}
// IntVarF defines an int flag with specified name, default value, and usage string.
// The argument p points to an int variable in which to store the value of the flag.
func (c *Configurator) IntVarF(p *int, name string, value int, usage string) {
c.flag().IntVar(p, name, value, usage)
}
// IntF defines an int flag with specified name, default value, and usage string.
// The return value is the address of an int variable that stores the value of the flag.
func (c *Configurator) IntF(name string, value int, usage string) *int {
p := new(int)
c.IntVarF(p, name, value, usage)
return p
}
// IntVar defines an int flag and environment variable with specified name, default value, and usage string.
// The argument p points to an int variable in which to store the value of the flag and/or environment variable.
func IntVar(p *int, name string, value int, usage string) {
Global.IntVar(p, name, value, usage)
}
// Int defines an int flag and environment variable with specified name, default value, and usage string.
// The return value is the address of an int variable that stores the value of the flag and/or environment variable.
func Int(name string, value int, usage string) *int {
return Global.Int(name, value, usage)
}
// IntVarE defines an int environment variable with specified name, default value, and usage string.
// The argument p points to an int variable in which to store the value of the environment variable.
func IntVarE(p *int, name string, value int, usage string) {
Global.IntVarE(p, name, value, usage)
}
// IntE defines an int environment variable with specified name, default value, and usage string.
// The return value is the address of an int variable that stores the value of the environment variable.
func IntE(name string, value int, usage string) *int {
return Global.IntE(name, value, usage)
}
// IntVarF defines an int flag with specified name, default value, and usage string.
// The argument p points to an int variable in which to store the value of the flag.
func IntVarF(p *int, name string, value int, usage string) {
Global.IntVarF(p, name, value, usage)
}
// IntF defines an int flag with specified name, default value, and usage string.
// The return value is the address of an int variable that stores the value of the flag.
func IntF(name string, value int, usage string) *int {
return Global.IntF(name, value, usage)
} | value_int.go | 0.772788 | 0.621713 | value_int.go | starcoder |
package vile
// Intern - internalize the name into the global symbol table
func Intern(name string) *Object {
sym, ok := symtab[name]
if !ok {
sym = new(Object)
sym.text = name
if IsValidKeywordName(name) {
sym.Type = KeywordType
} else if IsValidTypeName(name) {
sym.Type = TypeType
} else if IsValidSymbolName(name) {
sym.Type = SymbolType
} else {
panic("invalid symbol/type/keyword name passed to intern: " + name)
}
symtab[name] = sym
}
return sym
}
func IsValidSymbolName(name string) bool {
return len(name) > 0
}
func IsValidTypeName(s string) bool {
n := len(s)
if n > 2 && s[0] == '<' && s[n-1] == '>' {
return true
}
return false
}
func IsValidKeywordName(s string) bool {
n := len(s)
if n > 1 && s[n-1] == ':' {
return true
}
return false
}
func ToKeyword(obj *Object) (*Object, error) {
switch obj.Type {
case KeywordType:
return obj, nil
case TypeType:
return Intern(obj.text[1:len(obj.text)-1] + ":"), nil
case SymbolType:
return Intern(obj.text + ":"), nil
case StringType:
if IsValidKeywordName(obj.text) {
return Intern(obj.text), nil
} else if IsValidSymbolName(obj.text) {
return Intern(obj.text + ":"), nil
}
}
return nil, Error(ArgumentErrorKey, "to-keyword expected a <keyword>, <type>, <symbol>, or <string>, got a ", obj.Type)
}
func typeNameString(s string) string {
return s[1 : len(s)-1]
}
// <type> -> <symbol>
func TypeName(t *Object) (*Object, error) {
if !IsType(t) {
return nil, Error(ArgumentErrorKey, "type-name expected a <type>, got a ", t.Type)
}
return Intern(typeNameString(t.text)), nil
}
// <keyword> -> <symbol>
func KeywordName(t *Object) (*Object, error) {
if !IsKeyword(t) {
return nil, Error(ArgumentErrorKey, "keyword-name expected a <keyword>, got a ", t.Type)
}
return unkeyworded(t)
}
func keywordNameString(s string) string {
return s[:len(s)-1]
}
func unkeywordedString(k *Object) string {
if IsKeyword(k) {
return keywordNameString(k.text)
}
return k.text
}
func unkeyworded(obj *Object) (*Object, error) {
if IsSymbol(obj) {
return obj, nil
}
if IsKeyword(obj) {
return Intern(keywordNameString(obj.text)), nil
}
return nil, Error(ArgumentErrorKey, "Expected <keyword> or <symbol>, got ", obj.Type)
}
func ToSymbol(obj *Object) (*Object, error) {
switch obj.Type {
case KeywordType:
return Intern(keywordNameString(obj.text)), nil
case TypeType:
return Intern(typeNameString(obj.text)), nil
case SymbolType:
return obj, nil
case StringType:
if IsValidSymbolName(obj.text) {
return Intern(obj.text), nil
}
}
return nil, Error(ArgumentErrorKey, "to-symbol expected a <keyword>, <type>, <symbol>, or <string>, got a ", obj.Type)
}
// the global symbol table. symbols for the basic types defined in this file are precached
var symtab = initSymbolTable() /* symtab is a map cause the returned value from initSymbolTable is a map */
func initSymbolTable() map[string]*Object {
syms := make(map[string]*Object, 0)
TypeType = &Object{text: "<type>"} // TypeType was defined in data.go
TypeType.Type = TypeType // mutate to bootstrap type type
syms[TypeType.text] = TypeType
KeywordType = &Object{Type: TypeType, text: "<keyword>"} // KeywordType was defined in data.go
syms[KeywordType.text] = KeywordType
SymbolType = &Object{Type: TypeType, text: "<symbol>"} // SymbolType was defined in data.go
syms[SymbolType.text] = SymbolType
// println("checkpoint symbol.go - 146 line: ", syms[TypeType.text])
return syms
}
// unknowns
func Symbols() []*Object {
syms := make([]*Object, 0, len(symtab))
for _, sym := range symtab {
syms = append(syms, sym)
}
return syms
}
func Symbol(names []*Object) (*Object, error) {
size := len(names)
if size < 1 {
return nil, Error(ArgumentErrorKey, "symbol expected at least 1 argument, got none")
}
name := ""
for i := 0; i < size; i++ {
o := names[i]
s := ""
switch o.Type {
case StringType, SymbolType:
s = o.text
default:
return nil, Error(ArgumentErrorKey, "symbol name component invalid: ", o)
}
name += s
}
return Intern(name), nil
} | src/symbol.go | 0.58166 | 0.433202 | symbol.go | starcoder |
package rootfs
import (
"path/filepath"
"sort"
"strings"
)
// tree is a way to store directory paths for whiteouts.
// It is semi-optimized for reads and non-optimized for writes;
// See Merge() and HasPrefix for trade-offs.
type tree struct {
name string
children []*tree
end bool
}
// newTree creates a new tree from a given path.
func newTree(paths ...string) *tree {
t := &tree{name: ".", children: []*tree{}}
for _, path := range paths {
t.Add(path)
}
return t
}
// Add adds a sequence to a tree
func (t *tree) Add(path string) {
t.add(strings.Split(filepath.Clean(path), "/"))
}
// HasPrefix returns if tree contains a prefix matching a given sequence.
// Search algorithm is naive: it does linear search when going through the
// nodes, whereas binary search would work here too. Since we expect number of
// children to be really small (usually 1 or 2), it does not really matter. If
// you find a real-world container with 30+ whiteout paths on a single path, it
// may make sense to replace the algorithm.
func (t *tree) HasPrefix(path string) bool {
return t.hasprefix(strings.Split(filepath.Clean(path), "/"))
}
// Merge merges adds t2 to t. It is not optimized for speed, since it's walking
// full branch for every other branch.
func (t *tree) Merge(t2 *tree) {
t.merge(t2, []string{})
}
// String stringifies a tree
func (t *tree) String() string {
if len(t.children) == 0 {
return "<empty>"
}
res := &stringer{[]string{}}
res.stringify(t, []string{})
sort.Strings(res.res)
return strings.Join(res.res, ":")
}
func (t *tree) add(nodes []string) {
if len(nodes) == 0 {
t.end = true
return
}
for i := range t.children {
if t.children[i].name == nodes[0] {
t.children[i].add(nodes[1:])
return
}
}
newNode := &tree{name: nodes[0]}
t.children = append(t.children, newNode)
newNode.add(nodes[1:])
}
func (t *tree) hasprefix(nodes []string) bool {
if len(nodes) == 0 {
return t.end
}
if t.end {
return true
}
for i := range t.children {
if t.children[i].name == nodes[0] {
return t.children[i].hasprefix(nodes[1:])
}
}
return false
}
type stringer struct {
res []string
}
func (s *stringer) stringify(t *tree, acc []string) {
if t.name == "" {
return
}
acc = append(acc, t.name)
if t.end {
s.res = append(s.res, strings.Join(acc, "/"))
}
for _, child := range t.children {
s.stringify(child, acc)
}
}
func (t *tree) merge(t2 *tree, acc []string) {
if t2.end {
t.add(append(acc[1:], t2.name))
}
acc = append(acc, t2.name)
for _, child := range t2.children {
t.merge(child, acc)
}
} | rootfs/tree.go | 0.750004 | 0.425725 | tree.go | starcoder |
package vm
// Halt sets the running flag to false.
// The machine will shutdown after the current operation.
func (machine *Machine) Halt() {
machine.keepRunning = false
}
// PerformPush pushes an argument value onto the stack.
func (machine *Machine) PerformPush() error {
var value uint16
var err error
switch machine.flag {
case FLAG_I:
value = machine.args[0]
case FLAG_R:
value, err = machine.Load(machine.args[0])
if err != nil {
return err
}
}
err = machine.push(value)
if err != nil {
return err
}
return nil
}
// PerformPop copies data from the stack into a register.
func (machine *Machine) PerformPop() error {
value, err := machine.pop()
if err != nil {
return err
}
err = machine.Store(machine.args[0], value)
if err != nil {
return err
}
return nil
}
// PerformCall pushes the current code pointer onto the stack and jumps to the specified memory point.
func (machine *Machine) PerformCall() error {
var value uint16
var err error
switch machine.flag {
case FLAG_I:
value = machine.args[0]
case FLAG_R:
value, err = machine.Load(machine.args[0])
if err != nil {
return err
}
}
current, err := machine.Load(CODE_POINTER)
if err != nil {
return err
}
err = machine.push(current)
if err != nil {
return err
}
err = machine.Store(CODE_POINTER, value)
if err != nil {
return err
}
return nil
}
// PerformReturn fetches a memory pointer from the stack and jumps to the code point.
func (machine *Machine) PerformReturn() error {
value, err := machine.pop()
if err != nil {
return err
}
err = machine.Store(CODE_POINTER, value)
if err != nil {
return err
}
return nil
}
// PerformSimpleArithmetic executes a simple arithmetic function with only one parameter.
func (machine *Machine) PerformSimpleArithmetic(carry func(int) int) error {
var value1, result, zeroFlag, carryFlag uint16
value1, err := machine.Load(machine.args[0])
if err != nil {
return err
}
carryResult := carry(int(value1))
result = uint16(carryResult)
zeroFlag = 0
if result == 0 {
zeroFlag = 1
}
err = machine.Store(ZERO_FLAG, zeroFlag)
if err != nil {
return err
}
carryFlag = 0
if int(result) != carryResult {
carryFlag = 1
}
err = machine.Store(CARRY_FLAG, carryFlag)
if err != nil {
return err
}
err = machine.Store(machine.args[0], result)
if err != nil {
return err
}
return nil
}
// PerformSimpleLogic executes a simple logic function with only one parameter.
func (machine *Machine) PerformSimpleLogic(base func(uint16) uint16) error {
var value1, zeroFlag, result uint16
value1, err := machine.Load(machine.args[0])
if err != nil {
return err
}
result = base(value1)
zeroFlag = 0
if result == 0 {
zeroFlag = 1
}
err = machine.Store(ZERO_FLAG, zeroFlag)
if err != nil {
return err
}
err = machine.Store(CARRY_FLAG, 0)
if err != nil {
return err
}
err = machine.Store(machine.args[0], result)
if err != nil {
return err
}
return nil
}
// PerformLogic executes a logic function with two parameters.
func (machine *Machine) PerformLogic(base func(uint16, uint16) uint16) error {
var value1, value2, zeroFlag, result uint16
value1, err := machine.Load(machine.args[0])
if err != nil {
return err
}
if value2 = machine.args[1]; machine.flag == FLAG_RR {
value2, err = machine.Load(machine.args[1])
if err != nil {
return err
}
}
result = base(value1, value2)
zeroFlag = 0
if result == 0 {
zeroFlag = 1
}
err = machine.Store(ZERO_FLAG, zeroFlag)
if err != nil {
return err
}
err = machine.Store(CARRY_FLAG, 0)
if err != nil {
return err
}
err = machine.Store(machine.args[0], result)
if err != nil {
return err
}
return nil
}
// PerformArithmetic executes a arithmetic function with two parameters.
func (machine *Machine) PerformArithmetic(carry func(int, int) int) error {
var value1, value2, result, zeroFlag, carryFlag uint16
value1, err := machine.Load(machine.args[0])
if err != nil {
return err
}
if value2 = machine.args[1]; machine.flag == FLAG_RR {
value2, err = machine.Load(machine.args[1])
if err != nil {
return err
}
}
carryResult := carry(int(value1), int(value2))
result = uint16(carryResult)
zeroFlag = 0
if result == 0 {
zeroFlag = 1
}
err = machine.Store(ZERO_FLAG, zeroFlag)
if err != nil {
return err
}
carryFlag = 0
if int(result) != carryResult {
carryFlag = 1
}
err = machine.Store(CARRY_FLAG, carryFlag)
if err != nil {
return err
}
err = machine.Store(machine.args[0], result)
if err != nil {
return err
}
return nil
}
// PerformJump jumps two the specified code point.
// If JumpAlways is set to false,
// the code pointer will only be changed if the zero flag is 1.
func (machine *Machine) PerformJump(jumpAlways bool) error {
var value uint16
var err error
switch machine.flag {
case FLAG_I:
value = machine.args[0]
case FLAG_R:
value, err = machine.Load(machine.args[0])
if err != nil {
return err
}
}
zeroFlag, err := machine.Load(ZERO_FLAG)
if err != nil {
return err
}
if jumpAlways || zeroFlag == 1 {
err = machine.Store(CODE_POINTER, value)
if err != nil {
return err
}
}
return nil
}
// PerformMove executes a copy operation on registers, values and addresses.
func (machine *Machine) PerformMove() error {
var value, target uint16
var err error
switch machine.flag {
case FLAG_RA:
value, err = machine.Load(machine.args[0])
if err != nil {
return err
}
target, err = machine.Load(machine.args[1])
if err != nil {
return err
}
case FLAG_RR:
value, err = machine.Load(machine.args[0])
if err != nil {
return err
}
target = machine.args[1]
if err != nil {
return err
}
case FLAG_AA:
pointer, err := machine.Load(machine.args[0])
if err != nil {
return err
}
value, err = machine.Load(pointer)
if err != nil {
return err
}
target, err = machine.Load(machine.args[1])
if err != nil {
return err
}
case FLAG_AR:
pointer, err := machine.Load(machine.args[0])
if err != nil {
return err
}
value, err = machine.Load(pointer)
if err != nil {
return err
}
target = machine.args[1]
case FLAG_IA:
value = machine.args[0]
target, err = machine.Load(machine.args[1])
if err != nil {
return err
}
case FLAG_IR:
value = machine.args[0]
target = machine.args[1]
}
err = machine.Store(target, value)
if err != nil {
return err
}
return nil
} | vm/operations.go | 0.564579 | 0.431584 | operations.go | starcoder |
package direction
import (
"fmt"
"github.com/go-gl/mathgl/mgl32"
)
// Type is a direction in the minecraft world.
type Type uint
// Possible direction values.
const (
Up Type = iota
Down
North
South
West
East
Invalid
)
// Values is all valid directions.
var Values = []Type{
Up,
Down,
North,
South,
West,
East,
}
// FromString returns the direction that matches the passed
// string if possible otherwise it will return Invalid.
func FromString(str string) Type {
switch str {
case "up":
return Up
case "down":
return Down
case "north":
return North
case "south":
return South
case "west":
return West
case "east":
return East
}
// ¯\_(ツ)_/¯
return Invalid
}
// Offset returns the x, y and z offset this direction points in.
func (d Type) Offset() (x, y, z int) {
switch d {
case Up:
return 0, 1, 0
case Down:
return 0, -1, 0
case North:
return 0, 0, -1
case South:
return 0, 0, 1
case West:
return -1, 0, 0
case East:
return 1, 0, 0
}
return 0, 0, 0
}
// AsVec returns a vector of the direction's offset.
func (d Type) AsVec() mgl32.Vec3 {
x, y, z := d.Offset()
return mgl32.Vec3{float32(x), float32(y), float32(z)}
}
// Opposite returns the direction directly opposite to this
// direction.
func (d Type) Opposite() Type {
switch d {
case Up:
return Down
case Down:
return Up
case East:
return West
case West:
return East
case North:
return South
case South:
return North
}
return Invalid
}
// Clockwise returns the direction directly in a clockwise rotation to
// this direction.
func (d Type) Clockwise() Type {
switch d {
case Up:
return Up
case Down:
return Down
case East:
return South
case West:
return North
case North:
return East
case South:
return West
}
return Invalid
}
// CounterClockwise returns the direction directly in a counter clockwise
// rotation to this direction.
func (d Type) CounterClockwise() Type {
switch d {
case Up:
return Up
case Down:
return Down
case East:
return North
case West:
return South
case North:
return West
case South:
return East
}
return Invalid
}
// String returns a string representation of the direction.
func (d Type) String() string {
switch d {
case Up:
return "up"
case Down:
return "down"
case North:
return "north"
case South:
return "south"
case West:
return "west"
case East:
return "east"
case Invalid:
return "invalid"
}
return fmt.Sprintf("direction.Type(%d)", d)
} | type/direction/direction.go | 0.759136 | 0.437763 | direction.go | starcoder |
package periodic
import (
"fmt"
"reflect"
"sort"
"time"
)
const (
// HoursInDay is the number of hours in a single day
HoursInDay = 24
// DaysInWeek is the number of days in a week
DaysInWeek = 7
)
// Period defines a block of time bounded by a start and end.
type Period struct {
Start time.Time `json:"start"`
End time.Time `json:"end"`
}
// RecurringPeriod defines an interface for converting periods that represent abstract points in time
// into concrete periods
type RecurringPeriod interface {
AtDate(date time.Time) Period
FromTime(t time.Time) *Period
Contains(period Period) bool
ContainsTime(t time.Time) bool
DayApplicable(t time.Time) bool
Intersects(period Period) bool
}
// ApplicableDays is a structure for storing what days of week something is valid for.
// This is particularly important when schedules are applicable (i.e. hours of operation &
// inventory rules)
type ApplicableDays struct {
Monday bool
Tuesday bool
Wednesday bool
Thursday bool
Friday bool
Saturday bool
Sunday bool
}
// NewPeriod constructs a new time period from start and end times in a given location
func NewPeriod(start, end time.Time) Period {
return Period{
Start: start,
End: end,
}
}
// Intersects returns true if the other time period intersects the Period upon
// which the method was called. Note that if the period's end time is the zero value, it is treated as if
// the time period is unbounded on the end.
func (p Period) Intersects(other Period) bool {
if p.End.IsZero() && !other.End.IsZero() {
return p.Start.Before(other.End)
}
if !p.End.IsZero() && other.End.IsZero() {
return other.Start.Before(p.End)
}
// Calculate max(starts) < min(ends)
return MaxTime(p.Start, other.Start).Before(MinTime(p.End, other.End))
}
// Contains returns true if the other time period is contained within the Period
// upon which the method was called. The time period is treated as inclusive on both ends
// eg [p.Start, p.End]
func (p Period) Contains(other Period) bool {
if p.Start.IsZero() && p.End.IsZero() {
return true
}
s := p.Start.Before(other.Start) || p.Start.Equal(other.Start)
e := p.End.After(other.End) || p.End.Equal(other.End)
if p.Start.IsZero() && !p.End.IsZero() {
return e
} else if !p.Start.IsZero() && p.End.IsZero() {
return s
}
return s && e
}
// ContainsAny returns true if the other time periods start or end is contained within the Period
// upon which the method was called.
func (p Period) ContainsAny(other Period) bool {
if p.Start.IsZero() {
// If the start period is "empty" anything before our end time is contained
return p.End.After(other.Start)
} else if p.End.IsZero() {
// If the end period is "empty" anything after or including our start time is contained
return p.Start.Before(other.Start) || p.Start.Equal(other.Start)
}
// Otherwise, check for inclusion on start and ends times
s := (p.Start.Before(other.Start) || p.Start.Equal(other.Start)) && p.End.After(other.Start)
e := p.Start.Before(other.End) && p.End.After(other.End)
return s || e
}
// Less returns true if the duration of the period is less than the supplied duration
func (p Period) Less(d time.Duration) bool {
return p.End.Sub(p.Start) < d
}
// ContainsTime determines if the Period contains the specified time.
func (p Period) ContainsTime(t time.Time, endInclusive bool) bool {
if p.Start.IsZero() && p.End.IsZero() {
return true
} else if !p.Start.IsZero() && p.End.IsZero() {
return p.Start.Before(t) || p.Start.Equal(t)
} else if p.Start.IsZero() && !p.End.IsZero() {
if endInclusive {
return p.End.After(t) || p.End.Equal(t)
}
return p.End.After(t)
}
if endInclusive {
return (p.Start.Before(t) || p.Start.Equal(t)) && (p.End.After(t) || p.End.Equal(t))
}
return (p.Start.Before(t) || p.Start.Equal(t)) && p.End.After(t)
}
// Equals returns whether or not two periods represent the same timespan. Periods are equal if their start time
// and end times are the same, even if they are located in different timezones. For example a period from 12:00 - 17:00
// UTC and a period from 7:00 - 12:00 UTC-5 on the same day are considered equal.
func (p Period) Equals(other Period) bool {
return p.Start.Equal(other.Start) && p.End.Equal(other.End)
}
// IsZero returns whether the period encompasses no time; in other words, the time difference between the start and end
// of the period is zero.
func (p Period) IsZero() bool {
return p.Start.Sub(p.End) == 0
}
// Difference returns a slice representing the set difference of (p - other). In other words, it will return any
// segments of p that are NOT in other. The possible scenarios and results are:
// * the periods do not intersect - the slice will contain p.
// * the periods intersect but are not fully overlapping - the slice will contain the subset of p that is not contained in other.
// * p fully envelops other - the slice will contain 2 elements: the subsets of p before/after other.
// * other fully envelops p - the slice will be empty
func (p Period) Difference(other Period) []Period {
result := make([]Period, 0)
if p.Start.Before(other.Start) {
var end time.Time
if p.End.IsZero() {
end = other.Start
} else {
end = MinTime(p.End, other.Start)
}
result = append(result, NewPeriod(p.Start, end))
}
if (p.End.After(other.End) || p.End.IsZero()) && !other.End.IsZero() {
result = append(result, NewPeriod(MaxTime(p.Start, other.End), p.End))
}
return result
}
// MaxTime returns the maximum of the provided times
func MaxTime(times ...time.Time) time.Time {
if len(times) == 0 {
return time.Time{}
}
maxTime := times[0]
for _, t := range times[1:] {
if t.After(maxTime) {
maxTime = t
}
}
return maxTime
}
// MinTime returns the minimum of the provided times
func MinTime(times ...time.Time) time.Time {
if len(times) == 0 {
return time.Time{}
}
minTime := times[0]
for _, t := range times[1:] {
if t.Before(minTime) {
minTime = t
}
}
return minTime
}
// MonStartToSunStart normalizes Monday Start Day of Week (Mon=0, Sun=6) to Sunday Start of Week (Sun=0, Sat=6)
func MonStartToSunStart(dow int) (time.Weekday, error) {
switch dow {
case 0:
return time.Monday, nil
case 1:
return time.Tuesday, nil
case 2:
return time.Wednesday, nil
case 3:
return time.Thursday, nil
case 4:
return time.Friday, nil
case 5:
return time.Saturday, nil
case 6:
return time.Sunday, nil
}
return time.Sunday, fmt.Errorf("unknown day of week")
}
// DayApplicable returns whether the given weekday is contained within the set of applicable days.
func (ad ApplicableDays) DayApplicable(d time.Weekday) bool {
switch d {
case time.Sunday:
return ad.Sunday
case time.Monday:
return ad.Monday
case time.Tuesday:
return ad.Tuesday
case time.Wednesday:
return ad.Wednesday
case time.Thursday:
return ad.Thursday
case time.Friday:
return ad.Friday
case time.Saturday:
return ad.Saturday
}
return false
}
// TimeApplicable determines if the given timestamp is valid on the associated day of the week in a given timezone
func (ad ApplicableDays) TimeApplicable(t time.Time, location *time.Location) bool {
wd := t.In(location).Weekday()
return ad.DayApplicable(wd)
}
// AnyApplicable returns whether or not there are any weekdays that are applicable.
func (ad ApplicableDays) AnyApplicable() bool {
return ad.Sunday || ad.Monday || ad.Tuesday || ad.Wednesday || ad.Thursday || ad.Friday || ad.Saturday
}
// NewApplicableDaysMonStart translates continuous days of week to a struct with bools representing each
// day of the week. Note that this implementation is dependent on the ordering
// of days of the week in the applicableDaysOfWeek struct. Monday is 0, Sunday is 6.
func NewApplicableDaysMonStart(startDay int, endDay int) ApplicableDays {
applicableDays := &ApplicableDays{}
v := reflect.ValueOf(applicableDays).Elem()
for i := 0; i < 7; i++ {
var dayApplicable bool
if startDay <= endDay {
dayApplicable = startDay <= i && endDay >= i
} else {
dayApplicable = startDay <= i || endDay >= i
}
v.Field(i).SetBool(dayApplicable)
}
return *applicableDays
}
// MergePeriods accepts an array of time periods and will return a new list with intersecting periods merged together
func MergePeriods(periods []Period) []Period {
sort.Slice(periods, func(i, j int) bool {
return periods[i].Start.Before(periods[j].Start)
})
merged := make([]Period, 0, len(periods))
for _, period := range periods {
// If the merged array is empty, simply add the current period and skip to the next iteration
if len(merged) == 0 {
merged = append(merged, period)
continue
}
// If the last merged period does not intersect the current period, add the current period to the merged
// array. If they DO intersect, merge the periods by updating the end time of the last merged period.
if merged[len(merged)-1].End.Before(period.Start) {
merged = append(merged, period)
} else {
merged[len(merged)-1].End = MaxTime(merged[len(merged)-1].End, period.End)
}
}
return merged
}
// AddDSTAwareDuration will add the given duration to the given time, adjusting for timezone offset changes due to DST and return
// the resulting time. As an example, adding 24 hours to 2019-11-02 15:00:00 -0500 CST will result in 2019-11-02 15:00:00 -0600 CST,
// whereas the time library Add method would result in 2019-11-03 14:00:00 -0600 CST because of the timezone offset change.
func AddDSTAwareDuration(t time.Time, d time.Duration) time.Time {
result := t.Add(d)
_, tOffset := t.Zone()
_, resultOffset := result.Zone()
return result.Add(time.Duration(tOffset-resultOffset) * time.Second)
} | periodic.go | 0.846578 | 0.624165 | periodic.go | starcoder |
package trivium
// Trivium represents the 288-bit state of the Trivium cipher.
type Trivium struct {
state [5]uint64
}
const (
// KeyLength bytes in the key and IV, 10 bytes = 80 bits
KeyLength = 10
lgWordSize = 6 // using uint64 = 2^6 as the backing array
// the indices in the array for the given cells that are tapped for processing
i66 = 65 >> lgWordSize
i93 = 92 >> lgWordSize
i162 = 161 >> lgWordSize
i177 = 176 >> lgWordSize
i243 = 242 >> lgWordSize
i288 = 287 >> lgWordSize
i91 = 90 >> lgWordSize
i92 = 91 >> lgWordSize
i171 = 170 >> lgWordSize
i175 = 174 >> lgWordSize
i176 = 175 >> lgWordSize
i264 = 263 >> lgWordSize
i286 = 285 >> lgWordSize
i287 = 286 >> lgWordSize
i69 = 68 >> lgWordSize
i94 = 93 >> lgWordSize
i178 = 177 >> lgWordSize
// the position within the word, shift within the word starting from the left
wordSize = 1 << lgWordSize
mask = wordSize - 1
sh66 = mask - (65 & mask)
sh93 = mask - (92 & mask)
sh162 = mask - (161 & mask)
sh177 = mask - (176 & mask)
sh243 = mask - (242 & mask)
sh288 = mask - (287 & mask)
sh91 = mask - (90 & mask)
sh92 = mask - (91 & mask)
sh171 = mask - (170 & mask)
sh175 = mask - (174 & mask)
sh176 = mask - (175 & mask)
sh264 = mask - (263 & mask)
sh286 = mask - (285 & mask)
sh287 = mask - (286 & mask)
sh69 = mask - (68 & mask)
sh94 = mask - (93 & mask)
sh178 = mask - (177 & mask)
)
// NewTrivium returns a Trivium cipher initialized with key and initialization value (IV).
// Both the key and IV are 80-bits (10 bytes). The initialization processes the cipher for
// 4*288 cycles to "warm-up" and attempt to eliminate and usable dependency on key and IV.
func NewTrivium(key, iv [KeyLength]byte) *Trivium {
var state [5]uint64
state[0] |= (uint64(reverseByte(key[0])) << 56) | (uint64(reverseByte(key[1])) << 48) | (uint64(reverseByte(key[2])) << 40) | (uint64(reverseByte(key[3])) << 32)
state[0] |= (uint64(reverseByte(key[4])) << 24) | (uint64(reverseByte(key[5])) << 16) | (uint64(reverseByte(key[6])) << 8) | uint64(reverseByte(key[7]))
state[1] |= (uint64(reverseByte(key[8])) << 56) | (uint64(reverseByte(key[9])) << 48)
state[1] |= (uint64(reverseByte(iv[4])) >> 5) | (uint64(reverseByte(iv[3])) << 3) | (uint64(reverseByte(iv[2])) << 11) | (uint64(reverseByte(iv[1])) << 19) | (uint64(reverseByte(iv[0])) << 27)
state[2] |= (uint64(reverseByte(iv[7])) << 35) | (uint64(reverseByte(iv[6])) << 43) | (uint64(reverseByte(iv[5])) << 51) | (uint64(reverseByte(iv[4])) << 59)
state[2] |= (uint64(reverseByte(iv[9])) << 19) | (uint64(reverseByte(iv[8])) << 27)
// state[3] is initialized with all zeros
state[4] |= uint64(7) << 32
trivium := Trivium{state: state}
for i := 0; i < 4*288; i++ {
trivium.NextBit()
}
return &trivium
}
// NextBit gets the next bit from the Trivium stream.
func (t *Trivium) NextBit() uint64 {
return t.NextBits(1)
}
// NextBits gets the next 1 to 63 bits from the Trivium stream.
func (t *Trivium) NextBits(n uint) uint64 {
var bitmask uint64 = (1 << n) - 1
// get the taps
s66 := (t.state[i66] >> sh66) | (t.state[i66-1] << (wordSize - sh66))
s93 := (t.state[i93] >> sh93) | (t.state[i93-1] << (wordSize - sh93))
s162 := (t.state[i162] >> sh162) | (t.state[i162-1] << (wordSize - sh162))
s177 := (t.state[i177] >> sh177) | (t.state[i177-1] << (wordSize - sh177))
s243 := (t.state[i243] >> sh243) | (t.state[i243-1] << (wordSize - sh243))
s288 := (t.state[i288] >> sh288) | (t.state[i288-1] << (wordSize - sh288))
t1 := s66 ^ s93
t2 := s162 ^ s177
t3 := s243 ^ s288
// store the output
z := (t1 ^ t2 ^ t3) & bitmask
// process the taps
s91 := (t.state[i91] >> sh91) | (t.state[i91-1] << (wordSize - sh91))
s92 := (t.state[i92] >> sh92) | (t.state[i92-1] << (wordSize - sh92))
s171 := (t.state[i171] >> sh171) | (t.state[i171-1] << (wordSize - sh171))
s175 := (t.state[i175] >> sh175) | (t.state[i175-1] << (wordSize - sh175))
s176 := (t.state[i176] >> sh176) | (t.state[i176-1] << (wordSize - sh176))
s264 := (t.state[i264] >> sh264) | (t.state[i264-1] << (wordSize - sh264))
s286 := (t.state[i286] >> sh286) | (t.state[i286-1] << (wordSize - sh286))
s287 := (t.state[i287] >> sh287) | (t.state[i287-1] << (wordSize - sh287))
s69 := (t.state[i69] >> sh69) | (t.state[i69-1] << (wordSize - sh69))
t1 ^= ((s91 & s92) ^ s171)
t2 ^= ((s175 & s176) ^ s264)
t3 ^= ((s286 & s287) ^ s69)
t1 &= bitmask
t2 &= bitmask
t3 &= bitmask
// rotate the state
t.state[4] = (t.state[4] >> n) | (t.state[3] << (wordSize - n))
t.state[3] = (t.state[3] >> n) | (t.state[2] << (wordSize - n))
t.state[2] = (t.state[2] >> n) | (t.state[1] << (wordSize - n))
t.state[1] = (t.state[1] >> n) | (t.state[0] << (wordSize - n))
t.state[0] = (t.state[0] >> n) | (t3 << (wordSize - n))
// update the final values
n94 := 92 + n
n178 := 176 + n
ni94 := n94 >> lgWordSize
nsh94 := mask - (n94 & mask)
ni178 := n178 >> lgWordSize
nsh178 := mask - (n178 & mask)
t.state[ni94] = t.state[ni94] &^ (bitmask << nsh94)
t.state[ni94] |= t1 << nsh94
// need to handle overlap across word boundaries
t.state[i94] = t.state[i94] &^ (bitmask >> (wordSize - nsh94))
t.state[i94] |= t1 >> (wordSize - nsh94)
t.state[ni178] = t.state[ni178] &^ (bitmask << nsh178)
t.state[ni178] |= t2 << nsh178
// need to handle overlap across word boundaries
t.state[i178] = t.state[i178] &^ (bitmask >> (wordSize - nsh178))
t.state[i178] |= t2 >> (wordSize - nsh178)
return z
}
// NextByte returns the next byte of key stream with the MSB as the last bit produced.
// the first byte produced will have bits [76543210] of the keystream
func (t *Trivium) NextByte() byte {
return byte(t.NextBits(8))
}
// NextBytes returns the next 1 to 8 bytes of key stream with the MSB as the last bit produced.
// the first byte produced will have bits [76543210] of the keystream
func (t *Trivium) NextBytes(n uint) []byte {
output := make([]byte, n)
word := t.NextBits(n << 3)
for i := uint(0); i < n; i++ {
output[i] = byte(word >> (i << 3))
}
return output
}
// reverseByte reverses the bits in byte
func reverseByte(b byte) byte {
return ((b & 0x1) << 7) | ((b & 0x80) >> 7) |
((b & 0x2) << 5) | ((b & 0x40) >> 5) |
((b & 0x4) << 3) | ((b & 0x20) >> 3) |
((b & 0x8) << 1) | ((b & 0x10) >> 1)
} | trivium.go | 0.631253 | 0.444866 | trivium.go | starcoder |
package log
//index defines our index file, which comprises a persisted file and a memory- mapped file.
//The size tells us the size of the index and where to write the next entry appended to the index.
import (
"io"
"os"
"github.com/tysontate/gommap"
)
var (
offWidth uint64 = 4
posWidth uint64 = 8
//entWidth to jump straight to the position of an entry
//given its offset since the position in the file is offset * entWidth
entWidth = offWidth + posWidth
)
//newIndex(*os.File) creates an index for the given file. We create the index and save the current
//size of the file so we can track the amount of data in the index file as we add index entries.
//We grow the file to the max index size before memory-mapping the file and then return the created index
// to the caller.
func newIndex(f *os.File, c Config) (*index, error) {
idx := &index{file: f}
fi, err := os.Stat(f.Name())
if err != nil {
return nil, err
}
idx.size = uint64(fi.Size())
if err = os.Truncate(
f.Name(), int64(c.Segment.MaxIndexByte),
); err != nil {
return nil, err
}
if idx.nmap, err = gommap.Map(
idx.file.Fd(),
gommap.PROT_READ|gommap.PROT_WRITE,
gommap.MAP_SHARED,
); err != nil {
return nil, err
}
return idx, nil
}
//Close function makes sure the memory-mapped file has synced its data to the persisted
// file and that the persisted file has flushed its contents to stable storage.
//Then it truncates the persisted file to the amount of data that’s actually in it and closes the file.
func (i *index) Close() error {
if err := i.nmap.Sync(gommap.MS_ASYNC); err != nil {
return err
}
if err := i.file.Sync(); err != nil {
return err
}
if err := i.file.Truncate(int64(i.size)); err != nil {
return err
}
return i.file.Close()
}
//ead(int64) takes in an offset and returns the associated record’s position in the store.
//The given offset is relative to the segment’s base offset; 0 is always the offset of the
//index’s first entry, 1 is the second entry, and so on.
func (i *index) Read(in int64) (out uint32, pos uint64, err error) {
if i.size == 0 {
return 0, 0, io.EOF
}
if in == -1 {
out = uint32((i.size / entWidth) - uint64(1))
} else {
out = uint32(in)
}
pos = uint64(out) * entWidth
if i.size < pos+entWidth {
return 0, 0, io.EOF
}
out = enc.Uint32(i.nmap[pos : pos+offWidth])
pos = enc.Uint64(i.nmap[pos+offWidth : pos+entWidth])
return out, pos, nil
} | internal/log/wal-index-functions.go | 0.633183 | 0.610453 | wal-index-functions.go | starcoder |
package grid
// Range represents a grid with the first point being inclusive and the second
// point exclusive.
type Range [2]Pt
// Contains checks if the point is in the Range with the first point being
// inclusive and the second point being exclusive.
func (r Range) Contains(pt Pt) bool {
if r[0].X < r[1].X {
if pt.X < r[0].X || pt.X >= r[1].X {
return false
}
} else if r[1].X < r[0].X {
if pt.X > r[0].X || pt.X <= r[1].X {
return false
}
} else {
return false
}
if r[0].Y < r[1].Y {
if pt.Y < r[0].Y || pt.Y >= r[1].Y {
return false
}
} else if r[1].Y < r[0].Y {
if pt.Y > r[0].Y || pt.Y <= r[1].Y {
return false
}
} else {
return false
}
return true
}
// Size returns a Pt that indicates the width and height of the Range.
func (r Range) Size() Pt {
return r[1].Subtract(r[0]).Abs()
}
// Iter fulfills IteratorFactory and returns a Scanner using the Range.
func (r Range) Iter() Iterator {
return BaseIteratorWrapper{NewScanner(r)}
}
// Start creates and starts an iterator over the range.
func (r Range) Start() (i Iterator, done bool) {
return r.Iter().Start()
}
// Min returns the point with the lowest X and Y value that is contained in the
// range.
func (r Range) Min() Pt {
var pt Pt
if r[0].X < r[1].X {
pt.X = r[0].X
} else {
pt.X = r[1].X + 1
}
if r[0].Y < r[1].Y {
pt.Y = r[0].Y
} else {
pt.Y = r[1].Y + 1
}
return pt
}
// Max returns the point with the largest X and Y value that is contained in the
// range.
func (r Range) Max() Pt {
var pt Pt
if r[0].X > r[1].X {
pt.X = r[0].X
} else {
pt.X = r[1].X - 1
}
if r[0].Y > r[1].Y {
pt.Y = r[0].Y
} else {
pt.Y = r[1].Y - 1
}
return pt
}
// Scale the range so that r[0] returns t0=0, t1=0 and r[1] returns t0=1, t1=1.
func (r Range) Scale() Scale {
var s Scale
d := r[1].Subtract(r[0])
if d.X > 1 {
s.X = 1.0 / float64(d.X-1)
s.DX = -float64(r[0].X) * s.X
} else if d.X < -1 {
s.X = 1.0 / float64(d.X+1)
s.DX = -float64(r[0].X) * s.X
} else {
s.DX = 1
}
if d.Y > 1 {
s.Y = 1.0 / float64(d.Y-1)
s.DY = -float64(r[0].Y) * s.Y
} else if d.Y < -1 {
s.Y = 1.0 / float64(d.Y+1)
s.DY = -float64(r[0].Y) * s.Y
} else {
s.DY = 1
}
return s
} | d2/grid/range.go | 0.8119 | 0.782995 | range.go | starcoder |
package iso20022
// Account between an investor(s) and a fund manager or a fund. The account can contain holdings in any investment fund or investment fund class managed (or distributed) by the fund manager, within the same fund family.
type InvestmentAccount58 struct {
// Unique and unambiguous identification for the account between the account owner and the account servicer.
AccountIdentification *Max35Text `xml:"AcctId"`
// Name of the account. It provides an additional means of identification, and is designated by the account servicer in agreement with the account owner.
AccountName *Max35Text `xml:"AcctNm,omitempty"`
// Supplementary registration information applying to a specific block of units for dealing and reporting purposes. The supplementary registration information may be used when all the units are registered, for example, to a funds supermarket, but holdings for each investor have to reconciled individually.
AccountDesignation *Max35Text `xml:"AcctDsgnt,omitempty"`
// Party that legally owns the account.
OwnerIdentification []*PartyIdentification113 `xml:"OwnrId,omitempty"`
// Party that manages the account on behalf of the account owner, that is manages the registration and booking of entries on the account, calculates balances on the account and provides information about the account.
AccountServicer *PartyIdentification113 `xml:"AcctSvcr,omitempty"`
// Counterparties eligibility as defined by article 24 of the EU MiFID Directive applicable to transactions executed by investment firms for eligible counterparties.
OrderOriginatorEligibility *OrderOriginatorEligibility1Code `xml:"OrdrOrgtrElgblty,omitempty"`
// Sub-account of the master or omnibus account.
SubAccountDetails *SubAccount6 `xml:"SubAcctDtls,omitempty"`
}
func (i *InvestmentAccount58) SetAccountIdentification(value string) {
i.AccountIdentification = (*Max35Text)(&value)
}
func (i *InvestmentAccount58) SetAccountName(value string) {
i.AccountName = (*Max35Text)(&value)
}
func (i *InvestmentAccount58) SetAccountDesignation(value string) {
i.AccountDesignation = (*Max35Text)(&value)
}
func (i *InvestmentAccount58) AddOwnerIdentification() *PartyIdentification113 {
newValue := new(PartyIdentification113)
i.OwnerIdentification = append(i.OwnerIdentification, newValue)
return newValue
}
func (i *InvestmentAccount58) AddAccountServicer() *PartyIdentification113 {
i.AccountServicer = new(PartyIdentification113)
return i.AccountServicer
}
func (i *InvestmentAccount58) SetOrderOriginatorEligibility(value string) {
i.OrderOriginatorEligibility = (*OrderOriginatorEligibility1Code)(&value)
}
func (i *InvestmentAccount58) AddSubAccountDetails() *SubAccount6 {
i.SubAccountDetails = new(SubAccount6)
return i.SubAccountDetails
} | InvestmentAccount58.go | 0.706596 | 0.433622 | InvestmentAccount58.go | starcoder |
package gfmatrix
import (
"fmt"
)
// Matrix represents a GF(2^8)-matrix.
type Matrix []Row
// Mul right-multiplies a matrix by a row.
func (e Matrix) Mul(f Row) Row {
out, in := e.Size()
if in != f.Size() {
panic("Can't multiply by row that is wrong size!")
}
res := NewRow(out)
for i := 0; i < out; i++ {
res[i] = e[i].DotProduct(f)
}
return res
}
// Add adds two matrices from GF(2^8)^nxm.
func (e Matrix) Add(f Matrix) Matrix {
a, _ := e.Size()
out := make([]Row, a)
for i, _ := range out {
out[i] = e[i].Add(f[i])
}
return out
}
// Compose returns the result of composing e with f.
func (e Matrix) Compose(f Matrix) Matrix {
n, m := e.Size()
p, q := f.Size()
if m != p {
panic("Can't multiply matrices of the wrong size!")
}
out := GenerateEmpty(n, q)
g := f.Transpose()
for i, e_i := range e {
for j, g_j := range g {
out[i][j] = e_i.DotProduct(g_j)
}
}
return out
}
// Transpose returns the transpose of a matrix.
func (e Matrix) Transpose() Matrix {
n, m := e.Size()
out := GenerateEmpty(m, n)
for i, row := range e {
for j, elem := range row {
out[j][i] = elem.Dup()
}
}
return out
}
// Invert computes the multiplicative inverse of a matrix, if it exists.
func (e Matrix) Invert() (Matrix, bool) {
inv, _, frees := e.gaussJordan()
return inv, len(frees) == 0
}
// FindPivot finds a row with non-zero entry in column col, starting at the given row and moving down. It returns the
// index of the row or -1 if one does not exist.
func (e Matrix) FindPivot(row, col int) int {
out, _ := e.Size()
for i := row; i < out; i++ {
if !e[i][col].IsZero() {
return i
}
}
return -1
}
// Dup returns a duplicate of this matrix.
func (e Matrix) Dup() Matrix {
n, m := e.Size()
out := GenerateEmpty(n, m)
for i, row := range e {
for j, elem := range row {
out[i][j] = elem.Dup()
}
}
return out
}
// IsBinary returns true if the matrix contains only zero and one entries.
func (e Matrix) IsBinary() bool {
for _, row := range e {
for _, col := range row {
if !col.IsZero() && !col.IsOne() {
return false
}
}
}
return true
}
// Equals returns true if two matrices are equal and false otherwise.
func (e Matrix) Equals(f Matrix) bool {
a, _ := e.Size()
b, _ := f.Size()
if a != b {
return false
}
for row := 0; row < a; row++ {
if !e[row].Equals(f[row]) {
return false
}
}
return true
}
// Size returns the dimensions of the matrix in (Rows, Columns) order.
func (e Matrix) Size() (int, int) {
if len(e) == 0 {
return 0, 0
} else {
return len(e), e[0].Size()
}
}
func (e Matrix) String() string {
out := []rune{}
for _, row := range e {
out = append(out, []rune(row.String())...)
out = append(out, '\n')
}
return string(out)
}
// OctaveString converts the matrix into a string that can be imported into Octave.
func (e Matrix) OctaveString() string {
out := []rune{}
for _, row := range e {
out = append(out, []rune(row.OctaveString())...)
}
return string(out)
}
func (e Matrix) GoString() string {
out := []rune("gfmatrix.Matrix{\n")
for _, row := range e {
out = append(out, []rune("\tgfmatrix.Row{")...)
for _, elem := range row[:len(row)-1] {
out = append(out, []rune(fmt.Sprintf("0x%2.2x, ", elem))...)
}
out = append(out, []rune(fmt.Sprintf("0x%2.2x},\n", row[len(row)-1]))...)
}
out = append(out, '}')
return string(out)
} | gfmatrix/gfmatrix.go | 0.842798 | 0.459197 | gfmatrix.go | starcoder |
package ts
import (
"fmt"
"time"
)
// TimeSpan is a period of time with a beginning and an end.
type TimeSpan struct {
Start time.Time
End time.Time
// I use the convoluted "NotStartInclusive" so that the zero value for TimeSpan's
// bounds is a sensible default, and callers can just use ts.TimeSpan{Start: foo, End: bar}
NotStartInclusive bool
EndInclusive bool
}
// String returns the TimeSpan formatted using the format string
// "2006-01-02 15:04:05.999999999 -0700 MST", same as time.Time
func (ts1 TimeSpan) String() string {
return ts1.Format("2006-01-02 15:04:05.999999999 -0700 MST")
}
// Format returns the TimeSpan formatted according to the provided format, with
// bounds from [] and () when they are inclusive or exclusive, respectively.
func (ts1 TimeSpan) Format(layout string) string {
var f string
switch {
case !ts1.NotStartInclusive && !ts1.EndInclusive:
f = "[%s, %s)"
case ts1.NotStartInclusive && ts1.EndInclusive:
f = "(%s, %s]"
case ts1.NotStartInclusive && !ts1.EndInclusive:
f = "(%s, %s)"
case !ts1.NotStartInclusive && ts1.EndInclusive:
f = "[%s, %s]"
}
return fmt.Sprintf(f, ts1.Start.Format(layout), ts1.End.Format(layout))
}
// Copy produces a timespan that is identical to the input.
func (ts1 TimeSpan) Copy() TimeSpan {
return TimeSpan{
Start: ts1.Start,
End: ts1.End,
NotStartInclusive: ts1.NotStartInclusive,
EndInclusive: ts1.EndInclusive,
}
}
// Union returns the smallest TimeSpan that contains both input TimeSpans.
func (ts1 TimeSpan) Union(ts2 TimeSpan) (ts3 TimeSpan) {
switch {
case ts1.Start.Before(ts2.Start):
ts3.Start = ts1.Start
ts3.NotStartInclusive = ts1.NotStartInclusive
case ts2.Start.Before(ts1.Start):
ts3.Start = ts2.Start
ts3.NotStartInclusive = ts2.NotStartInclusive
default:
ts3.Start = ts1.Start
ts3.NotStartInclusive = ts1.NotStartInclusive && ts2.NotStartInclusive
}
switch {
case ts1.End.After(ts2.End):
ts3.End = ts1.End
ts3.EndInclusive = ts1.EndInclusive
case ts2.End.After(ts1.End):
ts3.End = ts2.End
ts3.EndInclusive = ts2.EndInclusive
default:
ts3.End = ts1.End
ts3.EndInclusive = ts1.EndInclusive || ts2.EndInclusive
}
return
}
// Intersect returns the TimeSpan that is common to both inputs.
func (ts1 TimeSpan) Intersect(ts2 TimeSpan) (ts3 TimeSpan) {
switch {
case ts1.Start.Before(ts2.Start):
ts3.Start = ts2.Start
ts3.NotStartInclusive = ts2.NotStartInclusive
case ts2.Start.Before(ts1.Start):
ts3.Start = ts1.Start
ts3.NotStartInclusive = ts1.NotStartInclusive
default:
ts3.Start = ts1.Start
ts3.NotStartInclusive = ts1.NotStartInclusive || ts2.NotStartInclusive
}
switch {
case ts1.End.After(ts2.End):
ts3.End = ts2.End
ts3.EndInclusive = ts2.EndInclusive
case ts2.End.After(ts1.End):
ts3.End = ts1.End
ts3.EndInclusive = ts1.EndInclusive
default:
ts3.End = ts1.End
ts3.EndInclusive = ts1.EndInclusive && ts2.EndInclusive
}
return
}
// Diff returns the TimeSpans that are in the first but not in the second, and
// that are in the second but not the first.
func (ts1 TimeSpan) Diff(ts2 TimeSpan) (TimeSpan, TimeSpan) {
if ts1.End.Before(ts2.Start) || ts1.Start.After(ts2.End) {
return ts1.Copy(), ts2.Copy()
}
var ts3, ts4 TimeSpan
if ts1.Start.Before(ts2.Start) {
ts3.Start = ts1.Start
ts3.NotStartInclusive = ts1.NotStartInclusive
ts3.End = ts2.Start
ts3.EndInclusive = ts2.NotStartInclusive
ts4.Start = ts1.End
ts4.NotStartInclusive = ts1.EndInclusive
ts4.End = ts2.End
ts4.EndInclusive = ts2.EndInclusive
return ts3, ts4
}
ts4.Start = ts2.Start
ts4.NotStartInclusive = ts2.NotStartInclusive
ts4.End = ts1.Start
ts4.EndInclusive = ts1.NotStartInclusive
ts3.Start = ts2.End
ts3.NotStartInclusive = ts2.EndInclusive
ts3.End = ts1.End
ts3.EndInclusive = ts1.EndInclusive
return ts3, ts4
} | ts.go | 0.796055 | 0.582016 | ts.go | starcoder |
package tmo
import (
"image"
"image/color"
"math"
"github.com/mdouchement/hdr"
"github.com/mdouchement/hdr/filter"
"github.com/mdouchement/hdr/xmath"
"github.com/mdouchement/hdr/parallel"
)
// A CustomReinhard05 is a custom Reinhard05 TMO implementation.
// It looks like a JPEG photo taken with a smartphone.
// It provides a quick render with less RAM consumption than Reinhard05.
type CustomReinhard05 struct {
HDRImage hdr.Image
// Brightness is included in [-50, 50] with 1 increment step.
Brightness float64
// Chromatic is included in [0, 1] with 0.01 increment step.
Chromatic float64
// Light is included in [0, 1] with 0.01 increment step.
Light float64
f float64
}
// NewDefaultCustomReinhard05 instanciates a new CustomReinhard05 TMO with default parameters.
func NewDefaultCustomReinhard05(m hdr.Image) *CustomReinhard05 {
return NewCustomReinhard05(m, 0, 0, 0.1)
}
// NewCustomReinhard05 instanciates a new CustomReinhard05 TMO.
func NewCustomReinhard05(m hdr.Image, brightness, chromatic, light float64) *CustomReinhard05 {
return &CustomReinhard05{
HDRImage: m,
Brightness: xmath.ClampF64(-50, 50, brightness) * 10,
Chromatic: xmath.ClampF64(0, 1, chromatic),
Light: xmath.ClampF64(0, 1, light) * 10,
}
}
// Perform runs the TMO mapping.
func (t *CustomReinhard05) Perform() image.Image {
img := image.NewRGBA64(t.HDRImage.Bounds())
// Image brightness
t.f = math.Exp(-t.Brightness)
minSample, maxSample := t.tonemap()
t.normalize(img, minSample, maxSample)
return img
}
func (t *CustomReinhard05) tonemap() (minSample, maxSample float64) {
qsImg := filter.NewQuickSampling(t.HDRImage, 0.6)
minSample = math.Inf(1)
maxSample = math.Inf(-1)
minCh := make(chan float64)
maxCh := make(chan float64)
completed := parallel.TilesR(qsImg.Bounds(), func(x1, y1, x2, y2 int) {
min := 1.0
max := 0.0
for y := y1; y < y2; y++ {
for x := x1; x < x2; x++ {
pixel := qsImg.HDRAt(x, y)
r, g, b, _ := pixel.HDRRGBA()
_, lum, _, _ := pixel.HDRXYZA()
var sample float64
if lum != 0.0 {
sample = t.sampling(r, lum)
min = math.Min(min, sample)
max = math.Max(max, sample)
sample = t.sampling(g, lum)
min = math.Min(min, sample)
max = math.Max(max, sample)
sample = t.sampling(b, lum)
min = math.Min(min, sample)
max = math.Max(max, sample)
}
}
}
minCh <- min
maxCh <- max
})
for {
select {
case <-completed:
return
case sample := <-minCh:
minSample = math.Min(minSample, sample)
case sample := <-maxCh:
maxSample = math.Max(maxSample, sample)
}
}
}
// sampling one channel
func (t *CustomReinhard05) sampling(sample, lum float64) float64 {
if sample != 0.0 {
// Local light adaptation
il := t.Chromatic*sample + (1-t.Chromatic)*lum
// Interpolated light adaptation
ia := t.Light * il
// Photoreceptor equation
sample /= sample + math.Pow(t.f*ia, ia)
}
return sample
}
func (t *CustomReinhard05) normalize(img *image.RGBA64, minSample, maxSample float64) {
completed := parallel.TilesR(t.HDRImage.Bounds(), func(x1, y1, x2, y2 int) {
for y := y1; y < y2; y++ {
for x := x1; x < x2; x++ {
pixel := t.HDRImage.HDRAt(x, y)
r, g, b, _ := pixel.HDRRGBA()
img.SetRGBA64(x, y, color.RGBA64{
R: t.nrmz(r, minSample, maxSample),
G: t.nrmz(g, minSample, maxSample),
B: t.nrmz(b, minSample, maxSample),
A: RangeMax,
})
}
}
})
<-completed
}
// normalize one channel
func (t *CustomReinhard05) nrmz(channel, minSample, maxSample float64) uint16 {
// Normalize intensities
channel = (channel - minSample) / (maxSample - minSample)
// Gamma correction
if channel > RangeMin {
channel = math.Pow(channel, 1/reinhardGamma)
}
// Inverse pixel mapping
channel = LinearInversePixelMapping(channel, LumPixFloor, LumSize)
// Clamp to solid black and solid white
channel = LDRClamp(channel)
return uint16(channel)
} | tmo/custom_reinhard05.go | 0.747063 | 0.425784 | custom_reinhard05.go | starcoder |
package main
import (
"bytes"
"fmt"
"github.com/olekukonko/tablewriter"
"github.com/pkg/errors"
"github.com/redhat-developer/odo/pkg/odo/cli"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"os"
)
/*
This "script" generates markdown that can be interpreted by the Slate (https://github.com/lord/slate) format.
Use this to script to generate the documentation needed.
*/
// Uses portions of the help / cmd outputter in cobra13 as part of a CLI reference guide and outputs each command
func referenceCommandFormatter(command *cobra.Command) string {
return fmt.Sprintf(`## %s
%s
> Example using %s
%s
%s
`,
command.Name(),
"`"+command.Use+"`",
command.Name(),
"```sh\n"+command.Example+"\n```",
command.Long)
}
// This prints out the CLI reference
func referencePrinter(command *cobra.Command, level int) string {
// List each command
var commandListTable [][]string
for _, subcommand := range command.Commands() {
name := fmt.Sprintf("[%s](#%s)", subcommand.Name(), subcommand.Name())
commandListTable = append(commandListTable, []string{name, subcommand.Short})
}
// Create a "table" for listing each command
tableOutput := new(bytes.Buffer)
table := tablewriter.NewWriter(tableOutput)
table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})
table.SetCenterSeparator("|")
table.SetHeader([]string{"Name", "Description"})
table.SetColWidth(10000)
table.AppendBulk(commandListTable)
table.Render() // Send output to writer
// Create documentation for each command
var commandOutput string
for _, subcommand := range command.Commands() {
commandOutput = commandOutput + referenceCommandFormatter(subcommand)
}
// The main markdown "template" for everything
return fmt.Sprintf(`# Overview of the Odo (OpenShift Do) CLI Structure
> Example application
%s
%s
# Syntax
#### List of Commands
%s
#### CLI Structure
%s
%s
`,
"```sh\n"+command.Example+"\n```",
command.Long,
tableOutput.String(),
"```sh\n"+fmt.Sprint(commandPrinter(command, 0))+"\n```",
commandOutput)
}
func getFlags(flags *pflag.FlagSet) []string {
var f []string
flags.VisitAll(func(flag *pflag.Flag) {
f = append(f, fmt.Sprintf("--%v", flag.Name))
})
return f
}
func flattenFlags(flags []string) string {
var flagString string
for _, flag := range flags {
flagString = flagString + flag + " "
}
return flagString
}
func commandPrinter(command *cobra.Command, level int) string {
var finalCommand string
// add indentation
for i := 0; i < level; i++ {
finalCommand = finalCommand + " "
}
finalCommand = finalCommand +
command.Name() +
" " +
flattenFlags(getFlags(command.NonInheritedFlags())) +
": " +
command.Short +
"\n"
for _, subcommand := range command.Commands() {
finalCommand = finalCommand + commandPrinter(subcommand, level+1)
}
return finalCommand
}
// Generates and returns a markdown-formatted CLI reference page for Odo
func main() {
var clidoc = &cobra.Command{
Use: "cli-doc",
Short: "Generate CLI reference for Odo",
Example: ` # Generate a markdown-formatted CLI reference page for Odo
cli-doc reference > docs/cli-reference.md
# Generate the CLI structure
cli-doc structure`,
Args: cobra.OnlyValidArgs,
ValidArgs: []string{"help", "reference", "structure"},
Run: func(command *cobra.Command, args []string) {
if len(args) == 0 {
fmt.Print(command.Usage())
} else {
switch args[0] {
case "reference":
fmt.Print(referencePrinter(cli.NewCmdOdo(cli.OdoRecommendedName, cli.OdoRecommendedName), 0))
case "structure":
fmt.Print(commandPrinter(cli.NewCmdOdo(cli.OdoRecommendedName, cli.OdoRecommendedName), 0))
default:
fmt.Print(command.Usage())
}
}
},
}
err := clidoc.Execute()
if err != nil {
fmt.Println(errors.Cause(err))
os.Exit(1)
}
} | cmd/cli-doc/cli-doc.go | 0.503174 | 0.424054 | cli-doc.go | starcoder |
package parser
import (
"sync"
"github.com/z7zmey/php-parser/node"
"github.com/z7zmey/php-parser/position"
"github.com/z7zmey/php-parser/scanner"
)
// PositionBuilder provide functions to constuct positions
type PositionBuilder struct {
Positions *Positions
PositionPool *sync.Pool
}
type startPos struct {
startLine int
startPos int
}
type endPos struct {
endLine int
endPos int
}
func (b *PositionBuilder) getListStartPos(l []node.Node) startPos {
if l == nil {
return startPos{-1, -1}
}
if len(l) == 0 {
return startPos{-1, -1}
}
return b.getNodeStartPos(l[0])
}
func (b *PositionBuilder) getNodeStartPos(n node.Node) startPos {
sl := -1
sp := -1
if n == nil {
return startPos{-1, -1}
}
p := (*b.Positions)[n]
if p != nil {
sl = p.StartLine
sp = p.StartPos
}
return startPos{sl, sp}
}
func (b *PositionBuilder) getListEndPos(l []node.Node) endPos {
if l == nil {
return endPos{-1, -1}
}
if len(l) == 0 {
return endPos{-1, -1}
}
return b.getNodeEndPos(l[len(l)-1])
}
func (b *PositionBuilder) getNodeEndPos(n node.Node) endPos {
el := -1
ep := -1
if n == nil {
return endPos{-1, -1}
}
p := (*b.Positions)[n]
if p != nil {
el = p.EndLine
ep = p.EndPos
}
return endPos{el, ep}
}
// NewNodeListPosition returns new Position
func (b *PositionBuilder) NewNodeListPosition(list []node.Node) *position.Position {
pos := b.PositionPool.Get().(*position.Position)
pos.StartLine = b.getListStartPos(list).startLine
pos.EndLine = b.getListEndPos(list).endLine
pos.StartPos = b.getListStartPos(list).startPos
pos.EndPos = b.getListEndPos(list).endPos
return pos
}
// NewNodePosition returns new Position
func (b *PositionBuilder) NewNodePosition(n node.Node) *position.Position {
pos := b.PositionPool.Get().(*position.Position)
pos.StartLine = b.getNodeStartPos(n).startLine
pos.EndLine = b.getNodeEndPos(n).endLine
pos.StartPos = b.getNodeStartPos(n).startPos
pos.EndPos = b.getNodeEndPos(n).endPos
return pos
}
// NewTokenPosition returns new Position
func (b *PositionBuilder) NewTokenPosition(t *scanner.Token) *position.Position {
pos := b.PositionPool.Get().(*position.Position)
pos.StartLine = t.Position.StartLine
pos.EndLine = t.Position.EndLine
pos.StartPos = t.Position.StartPos
pos.EndPos = t.Position.EndPos
return pos
}
// NewTokensPosition returns new Position
func (b *PositionBuilder) NewTokensPosition(startToken *scanner.Token, endToken *scanner.Token) *position.Position {
pos := b.PositionPool.Get().(*position.Position)
pos.StartLine = startToken.Position.StartLine
pos.EndLine = endToken.Position.EndLine
pos.StartPos = startToken.Position.StartPos
pos.EndPos = endToken.Position.EndPos
return pos
}
// NewTokenNodePosition returns new Position
func (b *PositionBuilder) NewTokenNodePosition(t *scanner.Token, n node.Node) *position.Position {
pos := b.PositionPool.Get().(*position.Position)
pos.StartLine = t.Position.StartLine
pos.EndLine = b.getNodeEndPos(n).endLine
pos.StartPos = t.Position.StartPos
pos.EndPos = b.getNodeEndPos(n).endPos
return pos
}
// NewNodeTokenPosition returns new Position
func (b *PositionBuilder) NewNodeTokenPosition(n node.Node, t *scanner.Token) *position.Position {
pos := b.PositionPool.Get().(*position.Position)
pos.StartLine = b.getNodeStartPos(n).startLine
pos.EndLine = t.Position.EndLine
pos.StartPos = b.getNodeStartPos(n).startPos
pos.EndPos = t.Position.EndPos
return pos
}
// NewNodesPosition returns new Position
func (b *PositionBuilder) NewNodesPosition(startNode node.Node, endNode node.Node) *position.Position {
pos := b.PositionPool.Get().(*position.Position)
pos.StartLine = b.getNodeStartPos(startNode).startLine
pos.EndLine = b.getNodeEndPos(endNode).endLine
pos.StartPos = b.getNodeStartPos(startNode).startPos
pos.EndPos = b.getNodeEndPos(endNode).endPos
return pos
}
// NewNodeListTokenPosition returns new Position
func (b *PositionBuilder) NewNodeListTokenPosition(list []node.Node, t *scanner.Token) *position.Position {
pos := b.PositionPool.Get().(*position.Position)
pos.StartLine = b.getListStartPos(list).startLine
pos.EndLine = t.Position.EndLine
pos.StartPos = b.getListStartPos(list).startPos
pos.EndPos = t.Position.EndPos
return pos
}
// NewTokenNodeListPosition returns new Position
func (b *PositionBuilder) NewTokenNodeListPosition(t *scanner.Token, list []node.Node) *position.Position {
pos := b.PositionPool.Get().(*position.Position)
pos.StartLine = t.Position.StartLine
pos.EndLine = b.getListEndPos(list).endLine
pos.StartPos = t.Position.StartPos
pos.EndPos = b.getListEndPos(list).endPos
return pos
}
// NewNodeNodeListPosition returns new Position
func (b *PositionBuilder) NewNodeNodeListPosition(n node.Node, list []node.Node) *position.Position {
pos := b.PositionPool.Get().(*position.Position)
pos.StartLine = b.getNodeStartPos(n).startLine
pos.EndLine = b.getListEndPos(list).endLine
pos.StartPos = b.getNodeStartPos(n).startPos
pos.EndPos = b.getListEndPos(list).endPos
return pos
}
// NewNodeListNodePosition returns new Position
func (b *PositionBuilder) NewNodeListNodePosition(list []node.Node, n node.Node) *position.Position {
pos := b.PositionPool.Get().(*position.Position)
pos.StartLine = b.getListStartPos(list).startLine
pos.EndLine = b.getNodeEndPos(n).endLine
pos.StartPos = b.getListStartPos(list).startPos
pos.EndPos = b.getNodeEndPos(n).endPos
return pos
}
// NewOptionalListTokensPosition returns new Position
func (b *PositionBuilder) NewOptionalListTokensPosition(list []node.Node, t *scanner.Token, endToken *scanner.Token) *position.Position {
pos := b.PositionPool.Get().(*position.Position)
if list == nil {
pos.StartLine = t.Position.StartLine
pos.EndLine = endToken.Position.EndLine
pos.StartPos = t.Position.StartPos
pos.EndPos = endToken.Position.EndPos
return pos
}
pos.StartLine = b.getListStartPos(list).startLine
pos.EndLine = endToken.Position.EndLine
pos.StartPos = b.getListStartPos(list).startPos
pos.EndPos = endToken.Position.EndPos
return pos
} | stage2/vendor/github.com/z7zmey/php-parser/parser/position_builder.go | 0.582254 | 0.439627 | position_builder.go | starcoder |
package day11
import "fmt"
type (
CountNeighbours func(*SeatMap, int, int) int
)
type SeatMap struct {
seats [][]rune
occupied int
}
// Parses the input list of seat string into a seat map
func ParseSeatStrings(seatStrings []string) (*SeatMap, error) {
seatMap := &SeatMap{
seats: make([][]rune, len(seatStrings)),
}
rowLength := len(seatStrings[0])
for i, seatString := range seatStrings {
if len(seatString) != rowLength {
return nil, fmt.Errorf("Row %v different length to first row", i)
}
seatMap.seats[i] = make([]rune, rowLength)
for j, char := range seatString {
seatMap.seats[i][j] = char
}
}
return seatMap, nil
}
// Calulates the number of seats occupied once the given seat map reaches a state of equilibrium.
// Iterates the map with the given CountNeighbours function and neighbout limit
func EquilibriumOccupation(seatMap *SeatMap, count CountNeighbours, neighbourLimit int) int {
nextStep := seatMap
reachedEquilibrium := false
for !reachedEquilibrium {
nextStep, reachedEquilibrium = stepSeatMap(nextStep, count, neighbourLimit)
}
return nextStep.occupied
}
// Does a single step on a seatMap and returns whether any changes were made
func stepSeatMap(seatMap *SeatMap, count CountNeighbours, neighbourLimit int) (*SeatMap, bool) {
newMap := &SeatMap{
seats: make([][]rune, len(seatMap.seats)),
occupied: 0,
}
changed := false
for i := 0; i < len(seatMap.seats); i++ {
newMap.seats[i] = make([]rune, len(seatMap.seats[i]))
for j := 0; j < len(seatMap.seats[i]); j++ {
if seatMap.seats[i][j] == '.' {
newMap.seats[i][j] = '.'
continue
}
occupiedNeighbours := count(seatMap, i, j)
if seatMap.seats[i][j] == 'L' {
if occupiedNeighbours == 0 {
newMap.seats[i][j] = '#'
changed = true
newMap.occupied++
} else {
newMap.seats[i][j] = 'L'
}
} else if seatMap.seats[i][j] == '#' {
if occupiedNeighbours < neighbourLimit {
newMap.seats[i][j] = '#'
newMap.occupied++
} else {
newMap.seats[i][j] = 'L'
changed = true
}
}
}
}
return newMap, !changed
}
// Calculates the number of occupied seats adjacent to the one with the given x, y coordinates
// Implements CountNeighbours
func OccupiedNeighbours(seatMap *SeatMap, x, y int) int {
occupied := 0
for i := x - 1; i <= x+1; i++ {
if i < 0 || i >= len(seatMap.seats) {
continue
}
for j := y - 1; j <= y+1; j++ {
if j < 0 || j >= len(seatMap.seats[i]) || (i == x && j == y) {
continue
}
if seatMap.seats[i][j] == '#' {
occupied++
}
}
}
return occupied
}
// Calculates the number of occupied seats visible from the one with the given x, y coordinates
// Implements CountNeighbours
func OccupiedVisibleNeighbours(seatMap *SeatMap, x, y int) int {
occupied := 0
for i := -1; i <= 1; i++ {
for j := -1; j <= 1; j++ {
if i == 0 && j == 0 {
continue
}
if nextSeatOccupied(seatMap, x, y, i, j) {
occupied++
}
}
}
return occupied
}
// Returns the number of occupied visible seats from the given seat in the direction
// specified by the step values. An empty seat blocks the view of occupied seats
func nextSeatOccupied(seatMap *SeatMap, x, y, stepX, stepY int) bool {
i := x + stepX
j := y + stepY
for i >= 0 && i < len(seatMap.seats) && j >= 0 && j < len(seatMap.seats[i]) {
if seatMap.seats[i][j] == 'L' {
return false
} else if seatMap.seats[i][j] == '#' {
return true
}
i += stepX
j += stepY
}
return false
} | day11/day11.go | 0.723895 | 0.432003 | day11.go | starcoder |
package heisenberg
import (
"fmt"
"math"
"math/cmplx"
"math/rand"
"sort"
)
// Qubit is a qubit
type Qubit uint64
// GateType is a type of gate
type GateType int
const (
// GateTypeControlledNot controlled not gate
GateTypeControlledNot GateType = iota
// GateTypeI multiply by identity
GateTypeI
// GateTypeH multiply by Hadamard gate
GateTypeH
// GateTypeX multiply by Pauli X matrix
GateTypeX
// GateTypeY multiply by Pauli Y matrix
GateTypeY
// GateTypeZ multiply by Pauli Z matrix
GateTypeZ
// GateTypeS multiply by phase matrix
GateTypeS
// GateTypeT multiply by T matrix
GateTypeT
// GateTypeU multiply by U matrix
GateTypeU
// GateTypeRX rotate X gate
GateTypeRX
// GateTypeRY rotate Y gate
GateTypeRY
// GateTypeRZ rotate Z gate
GateTypeRZ
)
// Gate is a gate
type Gate struct {
GateType
Qubits []Qubit
Target Qubit
Theta, Phi, Lambda float64
}
// Genome is a quantum circuit
type Genome struct {
Gates []Gate
Fitness float64
Width int
Probabilities [][2][]float64
}
// Copy copies a genome
func (g *Genome) Copy() Genome {
cp := Genome{}
cp.Gates = make([]Gate, len(g.Gates))
for i := range g.Gates {
cp.Gates[i].GateType = g.Gates[i].GateType
cp.Gates[i].Qubits = make([]Qubit, len(g.Gates[i].Qubits))
copy(cp.Gates[i].Qubits, g.Gates[i].Qubits)
cp.Gates[i].Target = g.Gates[i].Target
cp.Gates[i].Theta = g.Gates[i].Theta
cp.Gates[i].Phi = g.Gates[i].Phi
cp.Gates[i].Lambda = g.Gates[i].Lambda
}
cp.Width = g.Width
cp.Probabilities = g.Probabilities
return cp
}
// Execute the gates
func (g *Genome) Execute() {
fitness := 0.0
for _, probability := range g.Probabilities {
machine, qubits := MachineSparse64{}, []Qubit{}
i := 0
for i < len(probability[0]) {
if probability[0][i] == 0 {
qubits = append(qubits, machine.Zero())
} else {
qubits = append(qubits, machine.One())
}
i++
}
for i < g.Width {
qubits = append(qubits, machine.Zero())
i++
}
for _, gate := range g.Gates {
switch gate.GateType {
case GateTypeControlledNot:
machine.ControlledNot(gate.Qubits, gate.Target)
case GateTypeI:
machine.I(gate.Qubits...)
case GateTypeH:
machine.H(gate.Qubits...)
case GateTypeX:
machine.X(gate.Qubits...)
case GateTypeY:
machine.Y(gate.Qubits...)
case GateTypeZ:
machine.Z(gate.Qubits...)
case GateTypeS:
machine.S(gate.Qubits...)
case GateTypeT:
machine.T(gate.Qubits...)
case GateTypeU:
machine.U(gate.Theta, gate.Phi, gate.Lambda, gate.Qubits...)
case GateTypeRX:
machine.RX(gate.Theta, gate.Qubits...)
case GateTypeRY:
machine.RY(gate.Theta, gate.Qubits...)
case GateTypeRZ:
machine.RZ(gate.Theta, gate.Qubits...)
}
}
max, state := 0.0, 0
for i := 0; i < len(machine.Vector64); i++ {
abs := cmplx.Abs(complex128(machine.Vector64[i]))
if abs > max {
max, state = abs, i
}
}
for i := 0; i < len(probability[1]); i++ {
x := probability[1][i] - float64((state>>(g.Width-1-i))&1)
fitness += x * x
}
}
g.Fitness = fitness
}
// Optimize is an implementation of genetic optimize
func Optimize(width, depth int, probabilities [][2][]float64) {
rand.Seed(1)
qubit := func(qubits []Qubit) Qubit {
qubit := Qubit(0)
for {
qubit = Qubit(rand.Intn(width))
contains := false
for _, value := range qubits {
if value == qubit {
contains = true
}
}
if !contains {
break
}
}
return qubit
}
qubits := func() []Qubit {
qubits := make([]Qubit, 0, 8)
q := rand.Intn(3)
for k := 0; k < q; k++ {
qubits = append(qubits, qubit(qubits))
}
return qubits
}
gate := func() Gate {
gate := Gate{}
n := rand.Intn(17)
if n < 5 {
gate.GateType = GateTypeControlledNot
gate.Qubits = qubits()
gate.Target = qubit([]Qubit{})
} else if n < 7 {
gate.GateType = GateTypeI
gate.Qubits = qubits()
} else if n < 8 {
gate.GateType = GateTypeH
gate.Qubits = qubits()
} else if n < 9 {
gate.GateType = GateTypeX
gate.Qubits = qubits()
} else if n < 10 {
gate.GateType = GateTypeY
gate.Qubits = qubits()
} else if n < 11 {
gate.GateType = GateTypeZ
gate.Qubits = qubits()
} else if n < 12 {
gate.GateType = GateTypeS
gate.Qubits = qubits()
} else if n < 13 {
gate.GateType = GateTypeT
gate.Qubits = qubits()
} else if n < 14 {
gate.GateType = GateTypeU
gate.Qubits = qubits()
gate.Theta = 4 * math.Pi * rand.Float64()
gate.Lambda = rand.Float64()
gate.Phi = rand.Float64()
} else if n < 15 {
gate.GateType = GateTypeRX
gate.Qubits = qubits()
gate.Theta = 4 * math.Pi * rand.Float64()
} else if n < 16 {
gate.GateType = GateTypeRY
gate.Qubits = qubits()
gate.Theta = 4 * math.Pi * rand.Float64()
} else if n < 17 {
gate.GateType = GateTypeRZ
gate.Qubits = qubits()
gate.Theta = 4 * math.Pi * rand.Float64()
}
return gate
}
genomes := make([]Genome, 100)
for i := 0; i < 100; i++ {
gates := make([]Gate, 0, depth)
for j := 0; j < depth; j++ {
gates = append(gates, gate())
}
genomes[i].Gates = gates
genomes[i].Width = width
genomes[i].Probabilities = probabilities
}
for g := 0; g < 100; g++ {
for i := range genomes {
genomes[i].Execute()
}
sort.Slice(genomes, func(i, j int) bool {
return genomes[i].Fitness < genomes[j].Fitness
})
genomes = genomes[:100]
fmt.Println(genomes[0].Fitness)
if genomes[0].Fitness == 0 {
break
}
for i := 0; i < 10; i++ {
m1, m2 := rand.Intn(10), rand.Intn(10)
c1, c2 := genomes[m1].Copy(), genomes[m2].Copy()
g1, g2 := rand.Intn(depth), rand.Intn(depth)
c1.Gates[g1], c2.Gates[g2] = c2.Gates[g2], c1.Gates[g1]
genomes = append(genomes, c1, c2)
}
for i := range genomes {
cp := genomes[i].Copy()
g := rand.Intn(depth)
cp.Gates[g] = gate()
genomes = append(genomes, cp)
}
}
} | heisenberg.go | 0.667798 | 0.662469 | heisenberg.go | starcoder |
package linkedlist
// LinkedList represents abstract data structure
type LinkedList struct {
Head *Node
Tail *Node
isEqual func(a, b interface{}) bool
}
// NewList is a function that creates a new "instance" of linked list
func NewList(compare func(a, b interface{}) bool) LinkedList {
return LinkedList{nil, nil, compare}
}
// DefaultCompare is a function that represents integer comparassion
func DefaultCompare(a, b interface{}) bool {
aInt := a.(int)
bInt := b.(int)
return aInt == bInt
}
// Index is a function that gets index of first node where value equal index
func (list LinkedList) Index(value interface{}) int {
if list.Head == nil {
return -1
}
listNode := list.Head
for i := 0; listNode != nil; i++ {
if list.isEqual(listNode.Value, value) {
return i
}
listNode = listNode.next
}
return -1
}
// Find is a function that gets a first node with specific value
func (list LinkedList) Find(value int) *Node {
listNode := list.Head
for listNode != nil {
if list.isEqual(listNode.Value, value) {
return listNode
}
listNode = listNode.next
}
return nil
}
// Append is a function that add new node into the end of linked list
func (list *LinkedList) Append(value interface{}) *Node {
node := NewNode(value)
if list.Head == nil {
list.Head = node
list.Tail = node
return node
}
previous := list.Tail
previous.next = node
list.Tail = node
return node
}
// Prepend is a function that add new node into the begging of linked list
func (list *LinkedList) Prepend(value int) *Node {
node := NewNode(value)
if list.Head == nil {
list.Head = node
return list.Head
}
node.next = list.Head
list.Head = node
return node
}
// Delete is a function that removes node(s) from the linked list
func (list *LinkedList) Delete(value int) {
if list.Head == nil {
return
}
// Remove nodes from the begging of the list
for list.Head != nil && list.isEqual(list.Head.Value, value) {
list.Head = list.Head.next
}
listNode := list.Head
for listNode != nil {
if listNode.next != nil && list.isEqual(listNode.next.Value, value) {
if listNode.next.next != nil {
// Next for current node equal to the node after next one
listNode.next = listNode.next.next
} else {
// Made current node is end of the list
listNode.next = nil
}
}
listNode = listNode.next
}
}
// DeleteHead is a function that removes list head
func (list *LinkedList) DeleteHead() *Node {
if list.Head == nil {
return nil
}
head := list.Head
list.Head = head.next
return head
}
// Map is a function that iterate through list via custom predicate function
func (list LinkedList) Map(predicate func(*Node) bool) *Node {
if list.Head == nil {
return nil
}
listNode := list.Head
for listNode != nil {
if predicate(listNode) == true {
return listNode
}
listNode = listNode.next
}
return nil
} | linkedlist/list.go | 0.735357 | 0.409457 | list.go | starcoder |
package embd
import "time"
// The Direction type indicates the direction of a GPIO pin.
type Direction int
// The Edge trigger for the GPIO Interrupt
type Edge string
const (
// In represents read mode.
In Direction = iota
// Out represents write mode.
Out
)
const (
// Low represents 0.
Low int = iota
// High represents 1.
High
)
const (
EdgeNone Edge = "none"
EdgeRising Edge = "rising"
EdgeFalling Edge = "falling"
EdgeBoth Edge = "both"
)
// InterruptPin implements access to a Interruptable capable GPIO pin.
type InterruptPin interface {
// Start watching this pin for interrupt
Watch(edge Edge, handler func(DigitalPin)) error
// Stop watching this pin for interrupt
StopWatching() error
}
// DigitalPin implements access to a digital IO capable GPIO pin.
type DigitalPin interface {
InterruptPin
// N returns the logical GPIO number.
N() int
// Write writes the provided value to the pin.
Write(val int) error
// Read reads the value from the pin.
Read() (int, error)
// TimePulse measures the duration of a pulse on the pin.
TimePulse(state int) (time.Duration, error)
// SetDirection sets the direction of the pin (in/out).
SetDirection(dir Direction) error
// ActiveLow makes the pin active low. A low logical state is represented by
// a high state on the physical pin, and vice-versa.
ActiveLow(b bool) error
// PullUp pulls the pin up.
PullUp() error
// PullDown pulls the pin down.
PullDown() error
// Close releases the resources associated with the pin.
Close() error
}
// AnalogPin implements access to a analog IO capable GPIO pin.
type AnalogPin interface {
// N returns the logical GPIO number.
N() int
// Read reads the value from the pin.
Read() (int, error)
// Close releases the resources associated with the pin.
Close() error
}
// The Polarity type indicates the polarity of a pwm pin.
type Polarity int
const (
// Positive represents (default) positive polarity.
Positive Polarity = iota
// Negative represents negative polarity.
Negative
)
// PWMPin implements access to a pwm capable GPIO pin.
type PWMPin interface {
// N returns the logical PWM id.
N() string
// SetPeriod sets the period of a pwm pin.
SetPeriod(ns int) error
// SetDuty sets the duty of a pwm pin.
SetDuty(ns int) error
// SetPolarity sets the polarity of a pwm pin.
SetPolarity(pol Polarity) error
// SetMicroseconds sends a command to the PWM driver to generate a us wide pulse.
SetMicroseconds(us int) error
// SetAnalog allows easy manipulation of the PWM based on a (0-255) range value.
SetAnalog(value byte) error
// Close releases the resources associated with the pin.
Close() error
}
// GPIODriver implements a generic GPIO driver.
type GPIODriver interface {
// PinMap returns the pinmap for this driver.
PinMap() PinMap
// Unregister unregisters the pin from the driver. Should be called when the pin is closed.
Unregister(string) error
// DigitalPin returns a pin capable of doing digital IO.
DigitalPin(key interface{}) (DigitalPin, error)
// AnalogPin returns a pin capable of doing analog IO.
AnalogPin(key interface{}) (AnalogPin, error)
// PWMPin returns a pin capable of generating PWM.
PWMPin(key interface{}) (PWMPin, error)
// Close releases the resources associated with the driver.
Close() error
}
var gpioDriverInitialized bool
var gpioDriverInstance GPIODriver
// InitGPIO initializes the GPIO driver.
func InitGPIO() error {
if gpioDriverInitialized {
return nil
}
desc, err := DescribeHost()
if err != nil {
return err
}
if desc.GPIODriver == nil {
return ErrFeatureNotSupported
}
gpioDriverInstance = desc.GPIODriver()
gpioDriverInitialized = true
return nil
}
// CloseGPIO releases resources associated with the GPIO driver.
func CloseGPIO() error {
return gpioDriverInstance.Close()
}
// NewDigitalPin returns a DigitalPin interface which allows control over
// the digital GPIO pin.
func NewDigitalPin(key interface{}) (DigitalPin, error) {
if err := InitGPIO(); err != nil {
return nil, err
}
return gpioDriverInstance.DigitalPin(key)
}
// DigitalWrite writes val to the pin.
func DigitalWrite(key interface{}, val int) error {
pin, err := NewDigitalPin(key)
if err != nil {
return err
}
return pin.Write(val)
}
// DigitalRead reads a value from the pin.
func DigitalRead(key interface{}) (int, error) {
pin, err := NewDigitalPin(key)
if err != nil {
return 0, err
}
return pin.Read()
}
// SetDirection sets the direction of the pin (in/out).
func SetDirection(key interface{}, dir Direction) error {
pin, err := NewDigitalPin(key)
if err != nil {
return err
}
return pin.SetDirection(dir)
}
// ActiveLow makes the pin active low. A low logical state is represented by
// a high state on the physical pin, and vice-versa.
func ActiveLow(key interface{}, b bool) error {
pin, err := NewDigitalPin(key)
if err != nil {
return err
}
return pin.ActiveLow(b)
}
// PullUp pulls the pin up.
func PullUp(key interface{}) error {
pin, err := NewDigitalPin(key)
if err != nil {
return err
}
return pin.PullUp()
}
// PullDown pulls the pin down.
func PullDown(key interface{}) error {
pin, err := NewDigitalPin(key)
if err != nil {
return err
}
return pin.PullDown()
}
// NewAnalogPin returns a AnalogPin interface which allows control over
// the analog GPIO pin.
func NewAnalogPin(key interface{}) (AnalogPin, error) {
if err := InitGPIO(); err != nil {
return nil, err
}
return gpioDriverInstance.AnalogPin(key)
}
// AnalogWrite reads a value from the pin.
func AnalogRead(key interface{}) (int, error) {
pin, err := NewAnalogPin(key)
if err != nil {
return 0, err
}
return pin.Read()
}
// NewPWMPin returns a PWMPin interface which allows PWM signal
// generation over a the PWM pin.
func NewPWMPin(key interface{}) (PWMPin, error) {
if err := InitGPIO(); err != nil {
return nil, err
}
return gpioDriverInstance.PWMPin(key)
} | gpio.go | 0.760828 | 0.425187 | gpio.go | starcoder |
package common
import (
"regexp"
"regexp/syntax"
)
// MergeREs merges together a list of regexps (this will match any pattern that matches at least one of
// the input regexps).
// the good news is that '^(^value$)$' will match 'value', so we can use the output of this function and give
// it to the collectors of windows_exporter and node_exporter (had this property been false, it would have
// been difficult to merge them, given the way the regexps are processed in windows_exporter:
// https://github.com/prometheus-community/windows_exporter/blob/21a02c4fbec4304f883ed7957bd81045d2f0c133/collector/logical_disk.go#L153)
func MergeREs(regexps []*regexp.Regexp) (string, error) {
tmp := make([]string, 0, len(regexps))
for _, re := range regexps {
tmp = append(tmp, re.String())
}
return ReFromREs(tmp)
}
// ReFromPrefix returns a regular expression matching any prefixes in the input.
// e.g. ReFromPrefixes("eth"}) will match eth, eth0, ...
func ReFromPrefix(prefix string) (string, error) {
re, err := syntax.Parse(prefix, syntax.Literal)
return "^" + re.String(), err
}
// ReFromPathPrefix returns a regular expression matching any path prefix in the input.
// By path-prefix, we means that the path-prefix "/mnt" will match "/mnt", "/mnt/disk" but not "/mnt-disk"
// Only "/" is supported (e.g. only unix).
func ReFromPathPrefix(prefix string) (string, error) {
re, err := syntax.Parse(prefix, syntax.Literal)
return "^" + re.String() + "($|/)", err
}
// ReFromREs return an RE matching any of the input REs.
func ReFromREs(input []string) (string, error) {
var err error
res := make([]*syntax.Regexp, len(input))
for i, v := range input {
res[i], err = syntax.Parse(v, syntax.Perl)
if err != nil {
return "", err
}
}
re := syntax.Regexp{
Op: syntax.OpAlternate,
Flags: syntax.Perl,
Sub: res,
}
return re.String(), nil
}
// CompileREs compiles a list of regexps, returning an error and the empty list in case one of the regexps fails.
func CompileREs(regexps []string) (out []*regexp.Regexp, err error) {
out = make([]*regexp.Regexp, len(regexps))
for index, v := range regexps {
out[index], err = regexp.Compile(v)
if err != nil {
return
}
}
return
} | prometheus/exporter/common/common.go | 0.672869 | 0.416619 | common.go | starcoder |
package faker
import (
"math"
)
// IntInRange will build a random int between min and max included.
func IntInRange(min, max int) int {
if min >= max {
return min
}
return random.Intn(max-min+1) + min
}
// Int will build a random int.
func Int() int {
return IntInRange(math.MinInt32, math.MaxInt32)
}
// Int64InRange will build a random int64 between min and max included.
func Int64InRange(min, max int64) int64 {
if min >= max {
return min
}
return random.Int63n(max-min) + min
}
// Int64 will build a random int64.
func Int64() int64 {
return random.Int63n(math.MaxInt64) + math.MinInt64
}
// Int32InRange will build a random int32 between min and max included.
func Int32InRange(min, max int32) int32 {
return int32(Int64InRange(int64(min), int64(max)))
}
// Int32 will build a random int32.
func Int32() int32 {
return Int32InRange(math.MinInt32, math.MaxInt32)
}
// Int16InRange will build a random int16 between min and max included.
func Int16InRange(min, max int16) int16 {
return int16(Int64InRange(int64(min), int64(max)))
}
// Int16 will build a random int16.
func Int16() int16 {
return Int16InRange(math.MinInt16, math.MaxInt16)
}
// Int8InRange will build a random int8 between min and max included.
func Int8InRange(min, max int8) int8 {
return int8(Int64InRange(int64(min), int64(max)))
}
// Int8 will build a random int8.
func Int8() int8 {
return Int8InRange(math.MinInt8, math.MaxInt8)
}
// UintInRange will build a random uint between min and max included.
func UintInRange(min, max uint) uint {
if min >= max {
return min
}
return uint(random.Intn(int(max)-int(min)+1) + int(min))
}
// Uint will build a random uint.
func Uint() uint {
return uint(IntInRange(0, math.MaxUint32))
}
// Uint64InRange will build a random uint64 between min and max included.
func Uint64InRange(min, max uint64) uint64 {
if min >= max {
return min
}
return uint64(random.Int63n(int64(max)-int64(min)) + int64(min))
}
// Uint64 will build a random uint64.
func Uint64() uint64 {
return Uint64InRange(0, math.MaxInt64) + Uint64InRange(0, math.MaxInt64)
}
// Uint32InRange will build a random uint32 between min and max included.
func Uint32InRange(min, max uint32) uint32 {
return uint32(Uint64InRange(uint64(min), uint64(max)))
}
// Uint32 will build a random uint32.
func Uint32() uint32 {
return Uint32InRange(0, math.MaxUint32)
}
// Uint16InRange will build a random uint16 between min and max included.
func Uint16InRange(min, max uint16) uint16 {
return uint16(Uint64InRange(uint64(min), uint64(max)))
}
// Uint16 will build a random uint16.
func Uint16() uint16 {
return Uint16InRange(0, math.MaxUint16)
}
// Uint8InRange will build a random uint8 between min and max included.
func Uint8InRange(min, max uint8) uint8 {
return uint8(Uint64InRange(uint64(min), uint64(max)))
}
// Uint8 will build a random uint8.
func Uint8() uint8 {
return Uint8InRange(0, math.MaxUint8)
}
// Float64InRange will build a random float64 between min and max included.
func Float64InRange(min, max float64) float64 {
if min >= max {
return min
}
return random.Float64()*(max-min) + min
}
// Float64 will build a random float64.
func Float64() float64 {
return Float64InRange(math.SmallestNonzeroFloat64, math.MaxFloat64)
}
// Float32InRange will build a random float32 between min and max included.
func Float32InRange(min, max float32) float32 {
if min >= max {
return min
}
return random.Float32()*(max-min) + min
}
// Float32 will build a random float32.
func Float32() float32 {
return Float32InRange(math.SmallestNonzeroFloat32, math.MaxFloat32)
}
// Builder functions
func intInRangeBuilder(params ...string) (interface{}, error) {
min, max, err := paramsToMinMaxInt(params...)
if err != nil {
return nil, err
}
return IntInRange(min, max), nil
}
func intBuilder(params ...string) (interface{}, error) {
return Int(), nil
}
func int64InRangeBuilder(params ...string) (interface{}, error) {
min, max, err := paramsToMinMaxInt(params...)
if err != nil {
return nil, err
}
return Int64InRange(int64(min), int64(max)), nil
}
func int64Builder(params ...string) (interface{}, error) {
return Int64(), nil
}
func int32InRangeBuilder(params ...string) (interface{}, error) {
min, max, err := paramsToMinMaxInt(params...)
if err != nil {
return nil, err
}
return Int32InRange(int32(min), int32(max)), nil
}
func int32Builder(params ...string) (interface{}, error) {
return Int32(), nil
}
func int16InRangeBuilder(params ...string) (interface{}, error) {
min, max, err := paramsToMinMaxInt(params...)
if err != nil {
return nil, err
}
return Int16InRange(int16(min), int16(max)), nil
}
func int16Builder(params ...string) (interface{}, error) {
return Int16(), nil
}
func int8InRangeBuilder(params ...string) (interface{}, error) {
min, max, err := paramsToMinMaxInt(params...)
if err != nil {
return nil, err
}
return Int8InRange(int8(min), int8(max)), nil
}
func int8Builder(params ...string) (interface{}, error) {
return Int8(), nil
}
func uintInRangeBuilder(params ...string) (interface{}, error) {
min, max, err := paramsToMinMaxInt(params...)
if err != nil {
return nil, err
}
return UintInRange(uint(min), uint(max)), nil
}
func uintBuilder(params ...string) (interface{}, error) {
return Uint(), nil
}
func uint64InRangeBuilder(params ...string) (interface{}, error) {
min, max, err := paramsToMinMaxInt(params...)
if err != nil {
return nil, err
}
return Uint64InRange(uint64(min), uint64(max)), nil
}
func uint64Builder(params ...string) (interface{}, error) {
return Uint64(), nil
}
func uint32InRangeBuilder(params ...string) (interface{}, error) {
min, max, err := paramsToMinMaxInt(params...)
if err != nil {
return nil, err
}
return Uint32InRange(uint32(min), uint32(max)), nil
}
func uint32Builder(params ...string) (interface{}, error) {
return Uint32(), nil
}
func uint16InRangeBuilder(params ...string) (interface{}, error) {
min, max, err := paramsToMinMaxInt(params...)
if err != nil {
return nil, err
}
return Uint16InRange(uint16(min), uint16(max)), nil
}
func uint16Builder(params ...string) (interface{}, error) {
return Uint16(), nil
}
func uint8InRangeBuilder(params ...string) (interface{}, error) {
min, max, err := paramsToMinMaxInt(params...)
if err != nil {
return nil, err
}
return Uint8InRange(uint8(min), uint8(max)), nil
}
func uint8Builder(params ...string) (interface{}, error) {
return Uint8(), nil
}
func float64InRangeBuilder(params ...string) (interface{}, error) {
min, max, err := paramsToMinMaxFloat64(params...)
if err != nil {
return nil, err
}
return Float64InRange(min, max), nil
}
func float64Builder(params ...string) (interface{}, error) {
return Float64(), nil
}
func float32InRangeBuilder(params ...string) (interface{}, error) {
min, max, err := paramsToMinMaxFloat64(params...)
if err != nil {
return nil, err
}
return Float32InRange(float32(min), float32(max)), nil
}
func float32Builder(params ...string) (interface{}, error) {
return Float32(), nil
} | number.go | 0.712232 | 0.551574 | number.go | starcoder |
package orbdata
import (
"github.com/emilyselwood/orbcalc/orbcore"
)
// This file will contain orbital information for standard objects. Major planets, moons and so on.
// MercuryOrbit defines the standard mercury orbit
var MercuryOrbit = orbcore.Orbit{
ID: "Mercury",
ParentGrav: SunGrav,
Epoch: J2000,
MeanAnomalyEpoch: 0.7363828677023899, // rad
ArgumentOfPerihelion: 1.290398137330985, // rad
LongitudeOfTheAscendingNode: 0.19016162418731905, // rad
InclinationToTheEcliptic: 0.122258, // rad
OrbitalEccentricity: 0.2161872518335417,
SemimajorAxis: 5.7909176e7, // km
}
// VenusOrbit defines the standard mercury orbit
var VenusOrbit = orbcore.Orbit{
ID: "Venus",
ParentGrav: SunGrav,
Epoch: J2000,
MeanAnomalyEpoch: 6.024347789858294, // rad
ArgumentOfPerihelion: 1.8790979389622697, // rad
LongitudeOfTheAscendingNode: 0.13963804205942293, // rad
InclinationToTheEcliptic: 0.0592489, // rad
OrbitalEccentricity: 0.017361719534212148,
SemimajorAxis: 1.0820893e8, // km
}
// EarthOrbit defines the standard earth orbit.
var EarthOrbit = orbcore.Orbit{
ID: "Earth",
ParentGrav: SunGrav,
Epoch: J2000,
MeanAnomalyEpoch: 6.039693392708146, // rad
ArgumentOfPerihelion: 1.4877567222443007, // rad
LongitudeOfTheAscendingNode: 8.219803446009808e-05, // rad
InclinationToTheEcliptic: 0.9e-07, // rad
OrbitalEccentricity: 0.023506053256160484,
SemimajorAxis: 1.49597887e8, // km
}
// MarsOrbit defines the standard mars orbit.
var MarsOrbit = orbcore.Orbit{
ID: "Mars",
ParentGrav: SunGrav,
Epoch: J2000,
MeanAnomalyEpoch: 0.9016227920497925, // rad
ArgumentOfPerihelion: 5.804221558977953, // rad
LongitudeOfTheAscendingNode: 0.059136325715984754, // rad
InclinationToTheEcliptic: 0.0322992, // rad
OrbitalEccentricity: 0.09853112210172534,
SemimajorAxis: 2.27936637e8, // km
}
// JupiterOrbit defines the standard Jupiter orbit.
var JupiterOrbit = orbcore.Orbit{
ID: "Jupiter",
ParentGrav: SunGrav,
Epoch: J2000,
MeanAnomalyEpoch: 3.986624571747394, // rad
ArgumentOfPerihelion: 0.22894709895829354, // rad
LongitudeOfTheAscendingNode: 0.056682739190454204, // rad
InclinationToTheEcliptic: 0.0227818, // rad
OrbitalEccentricity: 0.05041232826440195,
SemimajorAxis: 7.78412027e8, // km
}
// SaturnOrbit defines the standard Saturn orbit.
var SaturnOrbit = orbcore.Orbit{
ID: "Saturn",
ParentGrav: SunGrav,
Epoch: J2000,
MeanAnomalyEpoch: 3.2720797523951766, // rad
ArgumentOfPerihelion: 1.5276434137035415, // rad
LongitudeOfTheAscendingNode: 0.10399170848152173, // rad
InclinationToTheEcliptic: 0.043362, // rad
OrbitalEccentricity: 0.05853326249640754,
SemimajorAxis: 1.42672541e9, // km
}
// UranusOrbit defines the standard Uranus orbit.
var UranusOrbit = orbcore.Orbit{
ID: "Uranus",
ParentGrav: SunGrav,
Epoch: J2000,
MeanAnomalyEpoch: 3.8644829632802806, // rad
ArgumentOfPerihelion: 2.926548412800625, // rad
LongitudeOfTheAscendingNode: 0.03235322856941487, // rad
InclinationToTheEcliptic: 0.013437, // rad
OrbitalEccentricity: 0.044645557888114,
SemimajorAxis: 2.87097222e9, // km
}
// NeptuneOrbit defines the standard Neptune orbit.
var NeptuneOrbit = orbcore.Orbit{
ID: "Neptune",
ParentGrav: SunGrav,
Epoch: J2000,
MeanAnomalyEpoch: 5.100969108525634, // rad
ArgumentOfPerihelion: 0.8712884041923264, // rad
LongitudeOfTheAscendingNode: 0.060720496894987035, // rad
InclinationToTheEcliptic: 0.0308778, // rad
OrbitalEccentricity: 0.011600603763700122,
SemimajorAxis: 4.49825291e9, // km
}
// SolarSystem is a collection of major bodies in the solar system
var SolarSystem = []orbcore.Orbit{
MercuryOrbit,
VenusOrbit,
EarthOrbit,
MarsOrbit,
JupiterOrbit,
SaturnOrbit,
UranusOrbit,
NeptuneOrbit,
}
// InnerSolarSystem is a collection of major bodies in the inner solar system
var InnerSolarSystem = []orbcore.Orbit{
MercuryOrbit,
VenusOrbit,
EarthOrbit,
MarsOrbit,
}
// OuterSolarSystem is a collection of major bodies in the outer solar system
var OuterSolarSystem = []orbcore.Orbit{
JupiterOrbit,
SaturnOrbit,
UranusOrbit,
NeptuneOrbit,
} | orbdata/orbits.go | 0.536799 | 0.498779 | orbits.go | starcoder |
package spreadsheet
import (
"fmt"
"baliance.com/gooxml"
"baliance.com/gooxml/measurement"
"baliance.com/gooxml/schema/soo/sml"
"baliance.com/gooxml/spreadsheet/reference"
)
// Row is a row within a spreadsheet.
type Row struct {
w *Workbook
s *sml.Worksheet
x *sml.CT_Row
}
// X returns the inner wrapped XML type.
func (r Row) X() *sml.CT_Row {
return r.x
}
// RowNumber returns the row number (1-N), or zero if it is unset.
func (r Row) RowNumber() uint32 {
if r.x.RAttr != nil {
return *r.x.RAttr
}
return 0
}
// SetHeight sets the row height in points.
func (r Row) SetHeight(d measurement.Distance) {
r.x.HtAttr = gooxml.Float64(float64(d))
}
// SetHeightAuto sets the row height to be automatically determined.
func (r Row) SetHeightAuto() {
r.x.HtAttr = nil
}
// IsHidden returns whether the row is hidden or not.
func (r Row) IsHidden() bool {
return r.x.HiddenAttr != nil && *r.x.HiddenAttr
}
// SetHidden hides or unhides the row
func (r Row) SetHidden(hidden bool) {
if !hidden {
r.x.HiddenAttr = nil
} else {
r.x.HiddenAttr = gooxml.Bool(true)
}
}
// AddCell adds a cell to a spreadsheet.
func (r Row) AddCell() Cell {
numCells := uint32(len(r.x.C))
var nextCellID *string
if numCells > 0 {
prevCellName := gooxml.Stringf("%s%d", reference.IndexToColumn(numCells-1), r.RowNumber())
// previous cell has an expected name
if r.x.C[numCells-1].RAttr != nil && *r.x.C[numCells-1].RAttr == *prevCellName {
nextCellID = gooxml.Stringf("%s%d", reference.IndexToColumn(numCells), r.RowNumber())
}
}
c := sml.NewCT_Cell()
r.x.C = append(r.x.C, c)
// fast path failed, so find the last cell and add another
if nextCellID == nil {
nextIdx := uint32(0)
for _, c := range r.x.C {
if c.RAttr != nil {
cref, _ := reference.ParseCellReference(*c.RAttr)
if cref.ColumnIdx >= nextIdx {
nextIdx = cref.ColumnIdx + 1
}
}
}
nextCellID = gooxml.Stringf("%s%d", reference.IndexToColumn(nextIdx), r.RowNumber())
}
c.RAttr = nextCellID
return Cell{r.w, r.s, r.x, c}
}
// Cells returns a slice of cells. The cells can be manipulated, but appending
// to the slice will have no effect.
func (r Row) Cells() []Cell {
ret := []Cell{}
for _, c := range r.x.C {
ret = append(ret, Cell{r.w, r.s, r.x, c})
}
return ret
}
// AddNamedCell adds a new named cell to a row and returns it. You should
// normally prefer Cell() as it will return the existing cell if the cell
// already exists, while AddNamedCell will duplicate the cell creating an
// invaild spreadsheet.
func (r Row) AddNamedCell(col string) Cell {
c := sml.NewCT_Cell()
r.x.C = append(r.x.C, c)
c.RAttr = gooxml.Stringf("%s%d", col, r.RowNumber())
return Cell{r.w, r.s, r.x, c}
}
// Cell retrieves or adds a new cell to a row. Col is the column (e.g. 'A', 'B')
func (r Row) Cell(col string) Cell {
name := fmt.Sprintf("%s%d", col, r.RowNumber())
for _, c := range r.x.C {
if c.RAttr != nil && *c.RAttr == name {
return Cell{r.w, r.s, r.x, c}
}
}
return r.AddNamedCell(col)
}
// renumberAs assigns a new row number and fixes any cell references within the
// row so they refer to the new row number. This is used when sorting to fix up
// moved rows.
func (r Row) renumberAs(rowNumber uint32) {
r.x.RAttr = gooxml.Uint32(rowNumber)
for _, c := range r.Cells() {
cref, err := reference.ParseCellReference(c.Reference())
if err == nil {
newRef := fmt.Sprintf("%s%d", cref.Column, rowNumber)
c.x.RAttr = gooxml.String(newRef)
}
}
} | spreadsheet/row.go | 0.75183 | 0.42471 | row.go | starcoder |
package chunks
import "io"
// bstream is a stream of bits
type bstream struct {
stream []byte // the data stream
count uint8 // how many bits are valid in current byte
}
func newBReader(b []byte) *bstream {
return &bstream{stream: b, count: 8}
}
func newBWriter(size int) *bstream {
return &bstream{stream: make([]byte, 0, size), count: 0}
}
func (b *bstream) clone() *bstream {
d := make([]byte, len(b.stream))
copy(d, b.stream)
return &bstream{stream: d, count: b.count}
}
func (b *bstream) bytes() []byte {
return b.stream
}
type bit bool
const (
zero bit = false
one bit = true
)
func (b *bstream) writeBit(bit bit) {
if b.count == 0 {
b.stream = append(b.stream, 0)
b.count = 8
}
i := len(b.stream) - 1
if bit {
b.stream[i] |= 1 << (b.count - 1)
}
b.count--
}
func (b *bstream) writeByte(byt byte) {
if b.count == 0 {
b.stream = append(b.stream, 0)
b.count = 8
}
i := len(b.stream) - 1
// fill up b.b with b.count bits from byt
b.stream[i] |= byt >> (8 - b.count)
b.stream = append(b.stream, 0)
i++
b.stream[i] = byt << b.count
}
func (b *bstream) writeBits(u uint64, nbits int) {
u <<= (64 - uint(nbits))
for nbits >= 8 {
byt := byte(u >> 56)
b.writeByte(byt)
u <<= 8
nbits -= 8
}
for nbits > 0 {
b.writeBit((u >> 63) == 1)
u <<= 1
nbits--
}
}
func (b *bstream) readBit() (bit, error) {
if len(b.stream) == 0 {
return false, io.EOF
}
if b.count == 0 {
b.stream = b.stream[1:]
if len(b.stream) == 0 {
return false, io.EOF
}
b.count = 8
}
d := (b.stream[0] << (8 - b.count)) & 0x80
b.count--
return d != 0, nil
}
func (b *bstream) ReadByte() (byte, error) {
return b.readByte()
}
func (b *bstream) readByte() (byte, error) {
if len(b.stream) == 0 {
return 0, io.EOF
}
if b.count == 0 {
b.stream = b.stream[1:]
if len(b.stream) == 0 {
return 0, io.EOF
}
return b.stream[0], nil
}
if b.count == 8 {
b.count = 0
return b.stream[0], nil
}
byt := b.stream[0] << (8 - b.count)
b.stream = b.stream[1:]
if len(b.stream) == 0 {
return 0, io.EOF
}
// We just advanced the stream and can assume the shift to be 0.
byt |= b.stream[0] >> b.count
return byt, nil
}
func (b *bstream) readBits(nbits int) (uint64, error) {
var u uint64
for nbits >= 8 {
byt, err := b.readByte()
if err != nil {
return 0, err
}
u = (u << 8) | uint64(byt)
nbits -= 8
}
if nbits == 0 {
return u, nil
}
if nbits > int(b.count) {
u = (u << uint(b.count)) | uint64((b.stream[0]<<(8-b.count))>>(8-b.count))
nbits -= int(b.count)
b.stream = b.stream[1:]
if len(b.stream) == 0 {
return 0, io.EOF
}
b.count = 8
}
u = (u << uint(nbits)) | uint64((b.stream[0]<<(8-b.count))>>(8-uint(nbits)))
b.count -= uint8(nbits)
return u, nil
} | vendor/github.com/prometheus/tsdb/chunks/bstream.go | 0.593374 | 0.427695 | bstream.go | starcoder |
package util
import (
"fmt"
"strings"
"time"
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2"
)
const (
// MaxCheckpointWeight is the maximum weight that can be stored in
// HistogramCheckpoint in a single bucket
MaxCheckpointWeight uint32 = 10000
)
// Histogram represents an approximate distribution of some variable.
type Histogram interface {
// Returns an approximation of the given percentile of the distribution.
// Note: the argument passed to Percentile() is a number between
// 0 and 1. For example 0.5 corresponds to the median and 0.9 to the
// 90th percentile.
// If the histogram is empty, Percentile() returns 0.0.
Percentile(percentile float64) float64
// Add a sample with a given value and weight.
AddSample(value float64, weight float64, time time.Time)
// Remove a sample with a given value and weight. Note that the total
// weight of samples with a given value cannot be negative.
SubtractSample(value float64, weight float64, time time.Time)
// Add all samples from another histogram. Requires the histograms to be
// of the exact same type.
Merge(other Histogram)
// Returns true if the histogram is empty.
IsEmpty() bool
// Returns true if the histogram is equal to another one. The two
// histograms must use the same HistogramOptions object (not two
// different copies).
// If the two histograms are not of the same runtime type returns false.
Equals(other Histogram) bool
// Returns a human-readable text description of the histogram.
String() string
// SaveToChekpoint returns a representation of the histogram as a
// HistogramCheckpoint. During conversion buckets with small weights
// can be omitted.
SaveToChekpoint() (*vpa_types.HistogramCheckpoint, error)
// LoadFromCheckpoint loads data from the checkpoint into the histogram
// by appending samples.
LoadFromCheckpoint(*vpa_types.HistogramCheckpoint) error
}
// NewHistogram returns a new Histogram instance using given options.
func NewHistogram(options HistogramOptions) Histogram {
return &histogram{
options: options,
bucketWeight: make([]float64, options.NumBuckets()),
totalWeight: 0.0,
minBucket: options.NumBuckets() - 1,
maxBucket: 0}
}
// Simple bucket-based implementation of the Histogram interface. Each bucket
// holds the total weight of samples that belong to it.
// Percentile() returns the upper bound of the corresponding bucket.
// Resolution (bucket boundaries) of the histogram depends on the options.
// There's no interpolation within buckets (i.e. one sample falls to exactly one
// bucket).
// A bucket is considered empty if its weight is smaller than options.Epsilon().
type histogram struct {
// Bucketing scheme.
options HistogramOptions
// Cumulative weight of samples in each bucket.
bucketWeight []float64
// Total cumulative weight of samples in all buckets.
totalWeight float64
// Index of the first non-empty bucket if there's any. Otherwise index
// of the last bucket.
minBucket int
// Index of the last non-empty bucket if there's any. Otherwise 0.
maxBucket int
}
func (h *histogram) AddSample(value float64, weight float64, time time.Time) {
if weight < 0.0 {
panic("sample weight must be non-negative")
}
bucket := h.options.FindBucket(value)
h.bucketWeight[bucket] += weight
h.totalWeight += weight
if bucket < h.minBucket && h.bucketWeight[bucket] >= h.options.Epsilon() {
h.minBucket = bucket
}
if bucket > h.maxBucket && h.bucketWeight[bucket] >= h.options.Epsilon() {
h.maxBucket = bucket
}
}
func safeSubtract(value, sub, epsilon float64) float64 {
value -= sub
if value < epsilon {
return 0.0
}
return value
}
func (h *histogram) SubtractSample(value float64, weight float64, time time.Time) {
if weight < 0.0 {
panic("sample weight must be non-negative")
}
bucket := h.options.FindBucket(value)
epsilon := h.options.Epsilon()
h.totalWeight = safeSubtract(h.totalWeight, weight, epsilon)
h.bucketWeight[bucket] = safeSubtract(h.bucketWeight[bucket], weight, epsilon)
h.updateMinAndMaxBucket()
}
func (h *histogram) Merge(other Histogram) {
o := other.(*histogram)
if h.options != o.options {
panic("can't merge histograms with different options")
}
for bucket := o.minBucket; bucket <= o.maxBucket; bucket++ {
h.bucketWeight[bucket] += o.bucketWeight[bucket]
}
h.totalWeight += o.totalWeight
if o.minBucket < h.minBucket {
h.minBucket = o.minBucket
}
if o.maxBucket > h.maxBucket {
h.maxBucket = o.maxBucket
}
}
func (h *histogram) Percentile(percentile float64) float64 {
if h.IsEmpty() {
return 0.0
}
partialSum := 0.0
threshold := percentile * h.totalWeight
bucket := h.minBucket
for ; bucket < h.maxBucket; bucket++ {
partialSum += h.bucketWeight[bucket]
if partialSum >= threshold {
break
}
}
if bucket < h.options.NumBuckets()-1 {
// Return the end of the bucket.
return h.options.GetBucketStart(bucket + 1)
}
// Return the start of the last bucket (note that the last bucket
// doesn't have an upper bound).
return h.options.GetBucketStart(bucket)
}
func (h *histogram) IsEmpty() bool {
return h.bucketWeight[h.minBucket] < h.options.Epsilon()
}
func (h *histogram) String() string {
lines := []string{
fmt.Sprintf("minBucket: %d, maxBucket: %d, totalWeight: %.3f",
h.minBucket, h.maxBucket, h.totalWeight),
"%-tile\tvalue",
}
for i := 0; i <= 100; i += 5 {
lines = append(lines, fmt.Sprintf("%d\t%.3f", i, h.Percentile(0.01*float64(i))))
}
return strings.Join(lines, "\n")
}
func (h *histogram) Equals(other Histogram) bool {
h2, typesMatch := other.(*histogram)
if !typesMatch || h.options != h2.options || h.minBucket != h2.minBucket || h.maxBucket != h2.maxBucket {
return false
}
for bucket := h.minBucket; bucket <= h.maxBucket; bucket++ {
diff := h.bucketWeight[bucket] - h2.bucketWeight[bucket]
if diff > 1e-15 || diff < -1e-15 {
return false
}
}
return true
}
// Adjusts the value of minBucket and maxBucket after any operation that
// decreases weights.
func (h *histogram) updateMinAndMaxBucket() {
epsilon := h.options.Epsilon()
lastBucket := h.options.NumBuckets() - 1
for h.bucketWeight[h.minBucket] < epsilon && h.minBucket < lastBucket {
h.minBucket++
}
for h.bucketWeight[h.maxBucket] < epsilon && h.maxBucket > 0 {
h.maxBucket--
}
}
func (h *histogram) SaveToChekpoint() (*vpa_types.HistogramCheckpoint, error) {
result := vpa_types.HistogramCheckpoint{
BucketWeights: make(map[int]uint32),
}
result.TotalWeight = h.totalWeight
// Find max
max := 0.
for bucket := h.minBucket; bucket <= h.maxBucket; bucket++ {
if h.bucketWeight[bucket] > max {
max = h.bucketWeight[bucket]
}
}
// Compute ratio
ratio := float64(MaxCheckpointWeight) / max
// Convert weights and drop near-zero weights
for bucket := h.minBucket; bucket <= h.maxBucket; bucket++ {
newWeight := uint32(round(h.bucketWeight[bucket] * ratio))
if newWeight > 0 {
result.BucketWeights[bucket] = newWeight
}
}
return &result, nil
}
func (h *histogram) LoadFromCheckpoint(checkpoint *vpa_types.HistogramCheckpoint) error {
if checkpoint == nil {
return fmt.Errorf("Cannot load from empty checkpoint")
}
if checkpoint.TotalWeight < 0.0 {
return fmt.Errorf("Cannot load checkpoint with negative weight %v", checkpoint.TotalWeight)
}
sum := int64(0)
for bucket, weight := range checkpoint.BucketWeights {
sum += int64(weight)
if bucket >= h.options.NumBuckets() {
return fmt.Errorf("Checkpoint has bucket %v that is exceeding histogram buckets %v", bucket, h.options.NumBuckets())
}
if bucket < 0 {
return fmt.Errorf("Checkpoint has a negative bucket %v", bucket)
}
}
if sum == 0 {
return nil
}
ratio := checkpoint.TotalWeight / float64(sum)
for bucket, weight := range checkpoint.BucketWeights {
if bucket < h.minBucket {
h.minBucket = bucket
}
if bucket > h.maxBucket {
h.maxBucket = bucket
}
h.bucketWeight[bucket] += float64(weight) * ratio
}
h.totalWeight += checkpoint.TotalWeight
return nil
}
// Multiplies all weights by a given factor. The factor must be non-negative.
// (note: this operation does not affect the percentiles of the distribution)
func (h *histogram) scale(factor float64) {
if factor < 0.0 {
panic("scale factor must be non-negative")
}
for bucket := h.minBucket; bucket <= h.maxBucket; bucket++ {
h.bucketWeight[bucket] *= factor
}
h.totalWeight *= factor
// Some buckets might become empty (weight < epsilon), so adjust min and max buckets.
h.updateMinAndMaxBucket()
} | vertical-pod-autoscaler/pkg/recommender/util/histogram.go | 0.861844 | 0.493164 | histogram.go | starcoder |
package encoder
// circle.go assists in calculation of points and angles on a circle.
import (
"image"
"math"
"github.com/mum4k/termdash/private/canvas/braille"
)
// startEndAngles given progress indicators and the desired start angle and
// direction, returns the starting and the ending angle of the partial circle
// that represents this progress.
func startEndAngles(current, total, startAngle, direction int) (start, end int) {
const fullCircle = 360
if total == 0 {
return startAngle, startAngle + 1
}
mult := float64(current) / float64(total)
angleSize := math.Round(float64(360) * mult)
if angleSize == fullCircle {
return 0, fullCircle
}
end = startAngle + int(math.Round(float64(direction)*angleSize))
if end < 0 {
end += fullCircle
if startAngle == 0 {
startAngle = fullCircle
}
return end, startAngle
}
if end < startAngle {
return end, startAngle
}
if end > fullCircle {
end = end % fullCircle
}
return startAngle, end
}
// midAndRadius given an area of a braille canvas, determines the mid point in
// pixels and radius to draw the largest circle that fits.
// The circle's mid point is always positioned on the {0,1} pixel in the chosen
// cell so that any text inside of it can be visually centered.
func midAndRadius(ar image.Rectangle) (image.Point, int) {
mid := image.Point{ar.Dx() / 2, ar.Dy() / 2}
if mid.X%2 != 0 {
mid.X--
}
switch mid.Y % 4 {
case 0:
mid.Y++
case 1:
case 2:
mid.Y--
case 3:
mid.Y -= 2
}
// Calculate radius based on the smaller axis.
var radius int
if ar.Dx() < ar.Dy() {
if mid.X < ar.Dx()/2 {
radius = mid.X
} else {
radius = ar.Dx() - mid.X - 1
}
} else {
if mid.Y < ar.Dy()/2 {
radius = mid.Y
} else {
radius = ar.Dy() - mid.Y - 1
}
}
return mid, radius
}
// availableCells given a radius returns the number of cells that are available
// within the circle and the coordinates of the first cell.
// These coordinates are for a normal (non-braille) canvas.
// That is the cells that do not contain any of the circle points. This is
// important since normal characters and braille characters cannot share the
// same cell.
func availableCells(mid image.Point, radius int) (int, image.Point) {
if radius < 3 {
return 0, image.Point{0, 0}
}
// Pixels available for the text only.
// Subtract one for the circle itself.
pixels := radius*2 - 1
startPixel := image.Point{mid.X - pixels/2, mid.Y}
startCell := image.Point{
startPixel.X / braille.ColMult,
mid.Y / braille.RowMult,
}
return pixels / braille.ColMult, startCell
} | internal/encoder/circle.go | 0.859059 | 0.594198 | circle.go | starcoder |
package solar
import (
"image/color"
"math"
"github.com/golang/geo/r2"
)
// DrawRotatingLine renders a line that moves back and forth
type DrawRotatingLine struct {
// where line starts
startPosition r2.Point
// end of the line, updated after call to Animate
endPosition r2.Point
// where line neds
length float64
// time it takes for line to travel from start to end
traverseTime float64
// angle of the line
currentAngle float64
// width of the line
lineWidth float64
// color of the line
color color.RGBA
// z position of line
zindex ZIndex
}
var _ Drawable = &DrawLine{}
// NewRotatingLine Construct a circle
func NewRotatingLine(planet PlanetIndex, solarSystem *System) *DrawRotatingLine {
return &DrawRotatingLine{
startPosition: solarSystem.planets[planet].position,
length: 7.0,
traverseTime: 4.0,
currentAngle: 0.0,
lineWidth: 3.0,
color: color.RGBA{R: 255, G: 255, B: 255, A: 100},
zindex: 2,
}
}
// Affects returns bounding circle check
func (line *DrawRotatingLine) Affects(position r2.Point, radius float64) bool {
distance := line.startPosition.Sub(position).Norm()
//fmt.Println(distance, position, radius)
return (distance < line.length+radius)
}
// ColorAt Returns the color at position blended on top of baseColor
func (line *DrawRotatingLine) ColorAt(position r2.Point, baseColor RGBA) (color RGBA) {
distance := line.distanceToPoint(position)
//fmt.Println(distance, line.endPosition, position)
if distance > line.lineWidth {
return baseColor
}
distance = distance / line.lineWidth
color = RGBA{line.color.R, line.color.G, line.color.B, uint8((1.0 - distance) * 255.0)}
if (line.currentAngle < 0.5) {
color = RGBA{255,0,0, uint8((1.0 - distance) * 255.0)}
}
result := color.BlendWith(baseColor)
//fmt.Println(baseColor, color, result, position, distance)
return result
}
// Computer distance from point to line segment https://stackoverflow.com/questions/849211/shortest-distance-between-a-point-and-a-line-segment
func (line *DrawRotatingLine) distanceToPoint(position r2.Point) float64 {
length := line.startPosition.Sub(line.endPosition).Norm()
length = length * length
if length == 0 {
return line.startPosition.Sub(position).Norm()
}
startToEndVector := line.endPosition.Sub(line.startPosition)
t := (position.Sub(line.startPosition).Dot(startToEndVector)) / length
if t > 1 {
t = 1
}
if t < 0 {
t = 0
}
projectedPoint := line.startPosition.Add(startToEndVector.Mul(t))
return position.Sub(projectedPoint).Norm()
}
// ZIndex of the circle
func (line *DrawRotatingLine) ZIndex() ZIndex {
return line.zindex
}
// Animate circle
func (line *DrawRotatingLine) Animate(dt float64) bool {
line.currentAngle += 2 * math.Pi * dt / line.traverseTime
if line.currentAngle > 2*math.Pi {
line.currentAngle -= 2 * math.Pi
}
endOffset := r2.Point{X: math.Cos(line.currentAngle) * line.length, Y: math.Sin(line.currentAngle) * line.length}
line.endPosition = line.startPosition.Add(endOffset)
//fmt.Println("Animated to ", line.currentAngle, line.endPosition, dt)
return true
} | solar/drawRotatingLine.go | 0.896891 | 0.485112 | drawRotatingLine.go | starcoder |
package plaid
import (
"encoding/json"
)
// RecipientBACS An object containing a BACS account number and sort code. If an IBAN is not provided or if this recipient needs to accept domestic GBP-denominated payments, BACS data is required.
type RecipientBACS struct {
// The account number of the account. Maximum of 10 characters.
Account *string `json:"account,omitempty"`
// The 6-character sort code of the account.
SortCode *string `json:"sort_code,omitempty"`
AdditionalProperties map[string]interface{}
}
type _RecipientBACS RecipientBACS
// NewRecipientBACS instantiates a new RecipientBACS object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewRecipientBACS() *RecipientBACS {
this := RecipientBACS{}
return &this
}
// NewRecipientBACSWithDefaults instantiates a new RecipientBACS object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewRecipientBACSWithDefaults() *RecipientBACS {
this := RecipientBACS{}
return &this
}
// GetAccount returns the Account field value if set, zero value otherwise.
func (o *RecipientBACS) GetAccount() string {
if o == nil || o.Account == nil {
var ret string
return ret
}
return *o.Account
}
// GetAccountOk returns a tuple with the Account field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *RecipientBACS) GetAccountOk() (*string, bool) {
if o == nil || o.Account == nil {
return nil, false
}
return o.Account, true
}
// HasAccount returns a boolean if a field has been set.
func (o *RecipientBACS) HasAccount() bool {
if o != nil && o.Account != nil {
return true
}
return false
}
// SetAccount gets a reference to the given string and assigns it to the Account field.
func (o *RecipientBACS) SetAccount(v string) {
o.Account = &v
}
// GetSortCode returns the SortCode field value if set, zero value otherwise.
func (o *RecipientBACS) GetSortCode() string {
if o == nil || o.SortCode == nil {
var ret string
return ret
}
return *o.SortCode
}
// GetSortCodeOk returns a tuple with the SortCode field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *RecipientBACS) GetSortCodeOk() (*string, bool) {
if o == nil || o.SortCode == nil {
return nil, false
}
return o.SortCode, true
}
// HasSortCode returns a boolean if a field has been set.
func (o *RecipientBACS) HasSortCode() bool {
if o != nil && o.SortCode != nil {
return true
}
return false
}
// SetSortCode gets a reference to the given string and assigns it to the SortCode field.
func (o *RecipientBACS) SetSortCode(v string) {
o.SortCode = &v
}
func (o RecipientBACS) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Account != nil {
toSerialize["account"] = o.Account
}
if o.SortCode != nil {
toSerialize["sort_code"] = o.SortCode
}
for key, value := range o.AdditionalProperties {
toSerialize[key] = value
}
return json.Marshal(toSerialize)
}
func (o *RecipientBACS) UnmarshalJSON(bytes []byte) (err error) {
varRecipientBACS := _RecipientBACS{}
if err = json.Unmarshal(bytes, &varRecipientBACS); err == nil {
*o = RecipientBACS(varRecipientBACS)
}
additionalProperties := make(map[string]interface{})
if err = json.Unmarshal(bytes, &additionalProperties); err == nil {
delete(additionalProperties, "account")
delete(additionalProperties, "sort_code")
o.AdditionalProperties = additionalProperties
}
return err
}
type NullableRecipientBACS struct {
value *RecipientBACS
isSet bool
}
func (v NullableRecipientBACS) Get() *RecipientBACS {
return v.value
}
func (v *NullableRecipientBACS) Set(val *RecipientBACS) {
v.value = val
v.isSet = true
}
func (v NullableRecipientBACS) IsSet() bool {
return v.isSet
}
func (v *NullableRecipientBACS) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableRecipientBACS(val *RecipientBACS) *NullableRecipientBACS {
return &NullableRecipientBACS{value: val, isSet: true}
}
func (v NullableRecipientBACS) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableRecipientBACS) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | plaid/model_recipient_bacs.go | 0.771413 | 0.58353 | model_recipient_bacs.go | starcoder |
package qrcode
import (
"errors"
"image/color"
"github.com/ajstarks/svgo"
"github.com/boombuler/barcode"
)
// QrSVG holds the data related to the size, location,
// and block size of the QR Code. Holds unexported fields.
type QrSVG struct {
qr barcode.Barcode
qrWidth int
blockSize int
startingX int
startingY int
}
// NewQrSVG contructs a QrSVG struct. It takes a QR Code in the form
// of barcode.Barcode and sets the "pixel" or block size of QR Code in
// the SVG file.
func NewQrSVG(qr barcode.Barcode, blockSize int) QrSVG {
return QrSVG{
qr: qr,
qrWidth: qr.Bounds().Max.X,
blockSize: blockSize,
startingX: 0,
startingY: 0,
}
}
// WriteQrSVG writes the QR Code to SVG.
func (qs *QrSVG) WriteQrSVG(s *svg.SVG) error {
if qs.qr.Metadata().CodeKind == "QR Code" {
currY := qs.startingY
for x := 0; x < qs.qrWidth; x++ {
currX := qs.startingX
for y := 0; y < qs.qrWidth; y++ {
if qs.qr.At(x, y) == color.Black {
s.Rect(currX, currY, qs.blockSize, qs.blockSize, "class=\"color\"")
} else if qs.qr.At(x, y) == color.White {
s.Rect(currX, currY, qs.blockSize, qs.blockSize, "class=\"bg-color\"")
}
currX += qs.blockSize
}
currY += qs.blockSize
}
return nil
}
return errors.New("can not write to SVG: Not a QR code")
}
// SetStartPoint sets the top left start point of QR Code.
// This takes an X and Y value and then adds four white "blocks"
// to create the "quiet zone" around the QR Code.
func (qs *QrSVG) SetStartPoint(x, y int) {
qs.startingX = x + (qs.blockSize * 4)
qs.startingY = y + (qs.blockSize * 4)
}
// StartQrSVG creates a start for writing an SVG file that
// only contains a barcode. This is similar to the svg.Start() method.
// This fucntion should only be used if you only want to write a QR code
// to the SVG. Otherwise use the regular svg.Start() method to start your
// SVG file.
func (qs *QrSVG) StartQrSVG(s *svg.SVG) {
width := (qs.qrWidth * qs.blockSize) + (qs.blockSize * 8)
qs.SetStartPoint(0, 0)
s.Start(width, width)
} | internal/qrcode/qr_svg.go | 0.648021 | 0.507202 | qr_svg.go | starcoder |
package gosql
import (
"reflect"
"sort"
"time"
)
//inSlice
func inSlice(k string, s []string) bool {
for _, v := range s {
if k == v {
return true
}
}
return false
}
//IsZero assert value is zero value
func IsZero(val reflect.Value) bool {
if !val.IsValid() {
return true
}
kind := val.Kind()
switch kind {
case reflect.String:
return val.Len() == 0
case reflect.Bool:
return val.Bool() == false
case reflect.Float32, reflect.Float64:
return val.Float() == 0
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return val.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return val.Uint() == 0
case reflect.Ptr, reflect.Chan, reflect.Func, reflect.Interface, reflect.Slice, reflect.Map:
return val.IsNil()
case reflect.Array:
for i := 0; i < val.Len(); i++ {
if !IsZero(val.Index(i)) {
return false
}
}
return true
case reflect.Struct:
if t, ok := val.Interface().(time.Time); ok {
return t.IsZero()
} else {
valid := val.FieldByName("Valid")
if valid.IsValid() {
va, ok := valid.Interface().(bool)
return ok && !va
}
return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface())
}
default:
return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface())
}
}
//zeroValueFilter filter zero value and keep the specified zero value
func zeroValueFilter(fields map[string]reflect.Value, zv []string) map[string]interface{} {
m := make(map[string]interface{})
for k, v := range fields {
v = reflect.Indirect(v)
if inSlice(k, zv) || !IsZero(v) {
m[k] = v.Interface()
}
}
return m
}
//summer modify 20211209
func ValueFilter(fields map[string]reflect.Value, zv []string) map[string]interface{} {
m := make(map[string]interface{})
for k, v := range fields {
v = reflect.Indirect(v)
m[k] = v.Interface()
}
return m
}
// structAutoTime auto set created_at updated_at
func structAutoTime(fields map[string]reflect.Value, f []string) {
for k, v := range fields {
v = reflect.Indirect(v)
if v.IsValid() && inSlice(k, f) && IsZero(v) {
switch v.Kind() {
case reflect.String:
v.SetString(time.Now().Format("2006-01-02 15:04:05"))
case reflect.Struct:
// truncate 1 sec, Otherwise the data you create and the data you get will never be compared
v.Set(reflect.ValueOf(time.Now().Truncate(1 * time.Second)))
case reflect.Int, reflect.Int32, reflect.Int64:
v.SetInt(time.Now().Unix())
case reflect.Uint, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
v.SetUint(uint64(time.Now().Unix()))
}
}
}
}
// structToMap
func structToMap(fields map[string]reflect.Value) map[string]interface{} {
m := make(map[string]interface{})
for k, v := range fields {
v = reflect.Indirect(v)
m[k] = v.Interface()
}
return m
}
// fillPrimaryKey is created fill primary key
func fillPrimaryKey(v reflect.Value, value int64) {
v = reflect.Indirect(v)
if v.IsValid() {
switch v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
v.SetInt(value)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
v.SetUint(uint64(value))
}
}
}
// sortedParamKeys Sorts the param names given - map iteration order is explicitly random in Go
// but we need params in a defined order to avoid unexpected results.
func sortedParamKeys(params map[string]interface{}) []string {
sortedKeys := make([]string, len(params))
i := 0
for k := range params {
sortedKeys[i] = k
i++
}
sort.Strings(sortedKeys)
return sortedKeys
} | util.go | 0.596433 | 0.478955 | util.go | starcoder |
package ring
import (
"math/bits"
"unsafe"
"github.com/tuneinsight/lattigo/v3/utils"
)
// GenGaloisParams generates the generators for the Galois endomorphisms.
func GenGaloisParams(n, gen uint64) (galElRotCol []uint64) {
var m, mask uint64
m = n << 1
mask = m - 1
galElRotCol = make([]uint64, n>>1)
galElRotCol[0] = 1
for i := uint64(1); i < n>>1; i++ {
galElRotCol[i] = (galElRotCol[i-1] * gen) & mask
}
return
}
// PermuteNTTIndex computes the index table for PermuteNTT.
func (r *Ring) PermuteNTTIndex(galEl uint64) (index []uint64) {
var mask, tmp1, tmp2, logNthRoot uint64
logNthRoot = uint64(bits.Len64(r.NthRoot) - 2)
mask = r.NthRoot - 1
index = make([]uint64, r.N)
for i := uint64(0); i < uint64(r.N); i++ {
tmp1 = 2*utils.BitReverse64(i, logNthRoot) + 1
tmp2 = ((galEl * tmp1 & mask) - 1) >> 1
index[i] = utils.BitReverse64(tmp2, logNthRoot)
}
return
}
// PermuteNTT applies the Galois transform on a polynomial in the NTT domain.
// It maps the coefficients x^i to x^(gen*i)
// It must be noted that the result cannot be in-place.
func (r *Ring) PermuteNTT(polIn *Poly, gen uint64, polOut *Poly) {
r.PermuteNTTLvl(utils.MinInt(polIn.Level(), polOut.Level()), polIn, gen, polOut)
}
// PermuteNTTLvl applies the Galois transform on a polynomial in the NTT domain, up to a given level.
// It maps the coefficients x^i to x^(gen*i)
// It must be noted that the result cannot be in-place.
func (r *Ring) PermuteNTTLvl(level int, polIn *Poly, gen uint64, polOut *Poly) {
r.PermuteNTTWithIndexLvl(level, polIn, r.PermuteNTTIndex(gen), polOut)
}
// PermuteNTTWithIndexLvl applies the Galois transform on a polynomial in the NTT domain, up to a given level.
// It maps the coefficients x^i to x^(gen*i) using the PermuteNTTIndex table.
// It must be noted that the result cannot be in-place.
func (r *Ring) PermuteNTTWithIndexLvl(level int, polIn *Poly, index []uint64, polOut *Poly) {
for j := 0; j < r.N; j = j + 8 {
x := (*[8]uint64)(unsafe.Pointer(&index[j]))
for i := 0; i < level+1; i++ {
z := (*[8]uint64)(unsafe.Pointer(&polOut.Coeffs[i][j]))
y := polIn.Coeffs[i]
z[0] = y[x[0]]
z[1] = y[x[1]]
z[2] = y[x[2]]
z[3] = y[x[3]]
z[4] = y[x[4]]
z[5] = y[x[5]]
z[6] = y[x[6]]
z[7] = y[x[7]]
}
}
}
// PermuteNTTWithIndexAndAddNoModLvl applies the Galois transform on a polynomial in the NTT domain, up to a given level,
// and adds the result to the output polynomial without modular reduction.
// It maps the coefficients x^i to x^(gen*i) using the PermuteNTTIndex table.
// It must be noted that the result cannot be in-place.
func (r *Ring) PermuteNTTWithIndexAndAddNoModLvl(level int, polIn *Poly, index []uint64, polOut *Poly) {
for j := 0; j < r.N; j = j + 8 {
x := (*[8]uint64)(unsafe.Pointer(&index[j]))
for i := 0; i < level+1; i++ {
z := (*[8]uint64)(unsafe.Pointer(&polOut.Coeffs[i][j]))
y := polIn.Coeffs[i]
z[0] += y[x[0]]
z[1] += y[x[1]]
z[2] += y[x[2]]
z[3] += y[x[3]]
z[4] += y[x[4]]
z[5] += y[x[5]]
z[6] += y[x[6]]
z[7] += y[x[7]]
}
}
}
// Permute applies the Galois transform on a polynomial outside of the NTT domain.
// It maps the coefficients x^i to x^(gen*i)
// It must be noted that the result cannot be in-place.
func (r *Ring) Permute(polIn *Poly, gen uint64, polOut *Poly) {
var mask, index, indexRaw, logN, tmp uint64
mask = uint64(r.N - 1)
logN = uint64(bits.Len64(mask))
for i := uint64(0); i < uint64(r.N); i++ {
indexRaw = i * gen
index = indexRaw & mask
tmp = (indexRaw >> logN) & 1
for j, qi := range r.Modulus {
polOut.Coeffs[j][index] = polIn.Coeffs[j][i]*(tmp^1) | (qi-polIn.Coeffs[j][i])*tmp
}
}
} | ring/ring_automorphism.go | 0.636805 | 0.512815 | ring_automorphism.go | starcoder |
package gocuke
import (
"github.com/cockroachdb/apd/v3"
"github.com/cucumber/common/messages/go/v17"
"math/big"
"reflect"
)
// DataTable wraps a data table step argument
type DataTable struct {
t TestingT
table *messages.PickleTable
}
// NumRows returns the number of rows in the data table.
func (d DataTable) NumRows() int {
return len(d.table.Rows)
}
// NumCols returns the number of columns in the data table.
func (d DataTable) NumCols() int {
if len(d.table.Rows) == 0 {
d.t.Fatalf("no table rows")
}
return len(d.table.Rows[0].Cells)
}
// Cell returns the cell at the provided 0-based row and col offset.
func (d DataTable) Cell(row, col int) *Cell {
if row >= len(d.table.Rows) {
d.t.Fatalf("table row %d out of range", row)
}
r := d.table.Rows[row]
if col >= len(r.Cells) {
d.t.Fatalf("table column %d out of range", col)
}
return &Cell{
t: d.t,
value: r.Cells[col].Value,
}
}
// Cell represents a data table cell.
type Cell struct {
t TestingT
value string
}
// String returns the cell value as a string.
func (c Cell) String() string {
return c.value
}
// Int64 returns the cell as an int64.
func (c Cell) Int64() int64 {
return toInt64(c.t, c.value)
}
// BigInt returns the cell as a *big.Int.
func (c Cell) BigInt() *big.Int {
return toBigInt(c.t, c.value)
}
// Decimal returns the cell value as an *apd.Decimal.
func (c Cell) Decimal() *apd.Decimal {
return toDecimal(c.t, c.value)
}
// HeaderTable returns the data table as a header table which is a wrapper
// around the table which assumes that the first row is the table header.
func (d DataTable) HeaderTable() *HeaderTable {
headers := map[string]int{}
for i := 0; i < d.NumCols(); i++ {
headers[d.Cell(0, i).String()] = i
}
return &HeaderTable{headers: headers, DataTable: d}
}
// HeaderTable is a wrapper around a table which assumes that the first row is \
// the table header.
type HeaderTable struct {
DataTable
headers map[string]int
}
// Get returns the cell at the provided row offset (skipping the header row)
// and column name (as indicated in the header).
func (h *HeaderTable) Get(row int, col string) *Cell {
return h.DataTable.Cell(row+1, h.headers[col])
}
// NumRows returns the number of rows in the table (excluding the header row).
func (h *HeaderTable) NumRows() int {
return h.DataTable.NumRows() - 1
}
var dataTableType = reflect.TypeOf(DataTable{}) | datatable.go | 0.868576 | 0.652823 | datatable.go | starcoder |
package tuple
// Couple is a 2-tuple struct.
type Couple[T1, T2 any] struct {
V1 T1
V2 T2
}
// NewCouple returns a new Couple containing v1 and v2.
func NewCouple[T1, T2 any](v1 T1, v2 T2) Couple[T1, T2] {
return Couple[T1, T2]{v1, v2}
}
// Pair is a alternative name of Couple.
type Pair[T1, T2 any] Couple[T1, T2]
// NewPair returns a new Pair containing v1 and v2.
func NewPair[T1, T2 any](v1 T1, v2 T2) Pair[T1, T2] {
return Pair[T1, T2]{v1, v2}
}
// Dyad is a alternative name of Couple.
type Dyad[T1, T2 any] Couple[T1, T2]
// NewDyad returns a new Dyad containing v1 and v2.
func NewDyad[T1, T2 any](v1 T1, v2 T2) Dyad[T1, T2] {
return Dyad[T1, T2]{v1, v2}
}
// Triple is a 3-tuple struct.
type Triple[T1, T2, T3 any] struct {
V1 T1
V2 T2
V3 T3
}
// NewTriple returns a new Triple containing v1, v2 and v3.
func NewTriple[T1, T2, T3 any](v1 T1, v2 T2, v3 T3) Triple[T1, T2, T3] {
return Triple[T1, T2, T3]{v1, v2, v3}
}
// Triplet is a alternative name of Triple.
type Triplet[T1, T2, T3 any] Triple[T1, T2, T3]
// NewTriplet returns a new Triplet containing v1, v2 and v3.
func NewTriplet[T1, T2, T3 any](v1 T1, v2 T2, v3 T3) Triplet[T1, T2, T3] {
return Triplet[T1, T2, T3]{v1, v2, v3}
}
// Triad is a alternative name of Triple.
type Triad[T1, T2, T3 any] Triple[T1, T2, T3]
// NewTriad returns a new Triad containing v1, v2 and v3.
func NewTriad[T1, T2, T3 any](v1 T1, v2 T2, v3 T3) Triad[T1, T2, T3] {
return Triad[T1, T2, T3]{v1, v2, v3}
}
// Quadruple is a 4-tuple struct.
type Quadruple[T1, T2, T3, T4 any] struct {
V1 T1
V2 T2
V3 T3
V4 T4
}
// NewQuadruple returns a new Quadruple containing v1, v2, v3 and v4.
func NewQuadruple[T1, T2, T3, T4 any](v1 T1, v2 T2, v3 T3, v4 T4) Quadruple[T1, T2, T3, T4] {
return Quadruple[T1, T2, T3, T4]{v1, v2, v3, v4}
}
// Quartet is a alternative name of Quadruple.
type Quartet[T1, T2, T3, T4 any] Quadruple[T1, T2, T3, T4]
// NewQuartet returns a new Quartet containing v1, v2, v3 and v4.
func NewQuartet[T1, T2, T3, T4 any](v1 T1, v2 T2, v3 T3, v4 T4) Quartet[T1, T2, T3, T4] {
return Quartet[T1, T2, T3, T4]{v1, v2, v3, v4}
}
// Quad is a alternative name of Quadruple.
type Quad[T1, T2, T3, T4 any] Quadruple[T1, T2, T3, T4]
// NewQuad returns a new Quad containing v1, v2, v3 and v4.
func NewQuad[T1, T2, T3, T4 any](v1 T1, v2 T2, v3 T3, v4 T4) Quad[T1, T2, T3, T4] {
return Quad[T1, T2, T3, T4]{v1, v2, v3, v4}
}
// Tetrad is a alternative name of Quadruple.
type Tetrad[T1, T2, T3, T4 any] Quadruple[T1, T2, T3, T4]
// NewTetrad returns a new Tetrad containing v1, v2, v3 and v4.
func NewTetrad[T1, T2, T3, T4 any](v1 T1, v2 T2, v3 T3, v4 T4) Tetrad[T1, T2, T3, T4] {
return Tetrad[T1, T2, T3, T4]{v1, v2, v3, v4}
}
// Quintuple is a 5-tuple struct.
type Quintuple[T1, T2, T3, T4, T5 any] struct {
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
}
// NewQuintuple returns a new Quintuple containing v1, v2, v3, v4 and v5.
func NewQuintuple[T1, T2, T3, T4, T5 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5) Quintuple[T1, T2, T3, T4, T5] {
return Quintuple[T1, T2, T3, T4, T5]{v1, v2, v3, v4, v5}
}
// Quint is a alternative name of Quintuple.
type Quint[T1, T2, T3, T4, T5 any] Quintuple[T1, T2, T3, T4, T5]
// NewQuint returns a new Quint containing v1, v2, v3, v4 and v5.
func NewQuint[T1, T2, T3, T4, T5 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5) Quint[T1, T2, T3, T4, T5] {
return Quint[T1, T2, T3, T4, T5]{v1, v2, v3, v4, v5}
}
// Pentuple is a alternative name of Quintuple.
type Pentuple[T1, T2, T3, T4, T5 any] Quintuple[T1, T2, T3, T4, T5]
// NewPentuple returns a new Pentuple containing v1, v2, v3, v4 and v5.
func NewPentuple[T1, T2, T3, T4, T5 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5) Pentuple[T1, T2, T3, T4, T5] {
return Pentuple[T1, T2, T3, T4, T5]{v1, v2, v3, v4, v5}
}
// Pentad is a alternative name of Quintuple.
type Pentad[T1, T2, T3, T4, T5 any] Quintuple[T1, T2, T3, T4, T5]
// NewPentad returns a new Pentad containing v1, v2, v3, v4 and v5.
func NewPentad[T1, T2, T3, T4, T5 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5) Pentad[T1, T2, T3, T4, T5] {
return Pentad[T1, T2, T3, T4, T5]{v1, v2, v3, v4, v5}
}
// Sextuple is a 6-tuple struct.
type Sextuple[T1, T2, T3, T4, T5, T6 any] struct {
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
}
// NewSextuple returns a new Sextuple containing v1, v2, v3, v4, v5 and v6.
func NewSextuple[T1, T2, T3, T4, T5, T6 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6) Sextuple[T1, T2, T3, T4, T5, T6] {
return Sextuple[T1, T2, T3, T4, T5, T6]{v1, v2, v3, v4, v5, v6}
}
// Hextuple is a alternative name of Sextuple.
type Hextuple[T1, T2, T3, T4, T5, T6 any] Sextuple[T1, T2, T3, T4, T5, T6]
// NewHextuple returns a new Hextuple containing v1, v2, v3, v4, v5 and v6.
func NewHextuple[T1, T2, T3, T4, T5, T6 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6) Hextuple[T1, T2, T3, T4, T5, T6] {
return Hextuple[T1, T2, T3, T4, T5, T6]{v1, v2, v3, v4, v5, v6}
}
// Hexad is a alternative name of Sextuple.
type Hexad[T1, T2, T3, T4, T5, T6 any] Sextuple[T1, T2, T3, T4, T5, T6]
// NewHexad returns a new Hexad containing v1, v2, v3, v4, v5 and v6.
func NewHexad[T1, T2, T3, T4, T5, T6 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6) Hexad[T1, T2, T3, T4, T5, T6] {
return Hexad[T1, T2, T3, T4, T5, T6]{v1, v2, v3, v4, v5, v6}
}
// Septuple is a 7-tuple struct.
type Septuple[T1, T2, T3, T4, T5, T6, T7 any] struct {
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
}
// NewSeptuple returns a new Septuple containing v1, v2, v3, v4, v5, v6 and v7.
func NewSeptuple[T1, T2, T3, T4, T5, T6, T7 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7) Septuple[T1, T2, T3, T4, T5, T6, T7] {
return Septuple[T1, T2, T3, T4, T5, T6, T7]{v1, v2, v3, v4, v5, v6, v7}
}
// Heptuple is a alternative name of Septuple.
type Heptuple[T1, T2, T3, T4, T5, T6, T7 any] Septuple[T1, T2, T3, T4, T5, T6, T7]
// NewHeptuple returns a new Heptuple containing v1, v2, v3, v4, v5, v6 and v7.
func NewHeptuple[T1, T2, T3, T4, T5, T6, T7 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7) Heptuple[T1, T2, T3, T4, T5, T6, T7] {
return Heptuple[T1, T2, T3, T4, T5, T6, T7]{v1, v2, v3, v4, v5, v6, v7}
}
// Heptad is a alternative name of Septuple.
type Heptad[T1, T2, T3, T4, T5, T6, T7 any] Septuple[T1, T2, T3, T4, T5, T6, T7]
// NewHeptad returns a new Heptad containing v1, v2, v3, v4, v5, v6 and v7.
func NewHeptad[T1, T2, T3, T4, T5, T6, T7 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7) Heptad[T1, T2, T3, T4, T5, T6, T7] {
return Heptad[T1, T2, T3, T4, T5, T6, T7]{v1, v2, v3, v4, v5, v6, v7}
}
// Octuple is a 8-tuple struct.
type Octuple[T1, T2, T3, T4, T5, T6, T7, T8 any] struct {
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
}
// NewOctuple returns a new Octuple containing v1, v2, v3, v4, v5, v6, v7 and v8.
func NewOctuple[T1, T2, T3, T4, T5, T6, T7, T8 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8) Octuple[T1, T2, T3, T4, T5, T6, T7, T8] {
return Octuple[T1, T2, T3, T4, T5, T6, T7, T8]{v1, v2, v3, v4, v5, v6, v7, v8}
}
// Octet is a alternative name of Octuple.
type Octet[T1, T2, T3, T4, T5, T6, T7, T8 any] Octuple[T1, T2, T3, T4, T5, T6, T7, T8]
// NewOctet returns a new Octet containing v1, v2, v3, v4, v5, v6, v7 and v8.
func NewOctet[T1, T2, T3, T4, T5, T6, T7, T8 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8) Octet[T1, T2, T3, T4, T5, T6, T7, T8] {
return Octet[T1, T2, T3, T4, T5, T6, T7, T8]{v1, v2, v3, v4, v5, v6, v7, v8}
}
// Octad is a alternative name of Octuple.
type Octad[T1, T2, T3, T4, T5, T6, T7, T8 any] Octuple[T1, T2, T3, T4, T5, T6, T7, T8]
// NewOctad returns a new Octad containing v1, v2, v3, v4, v5, v6, v7 and v8.
func NewOctad[T1, T2, T3, T4, T5, T6, T7, T8 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8) Octad[T1, T2, T3, T4, T5, T6, T7, T8] {
return Octad[T1, T2, T3, T4, T5, T6, T7, T8]{v1, v2, v3, v4, v5, v6, v7, v8}
}
// Nonuple is a 9-tuple struct.
type Nonuple[T1, T2, T3, T4, T5, T6, T7, T8, T9 any] struct {
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
V9 T9
}
// NewNonuple returns a new Nonuple containing v1, v2, v3, v4, v5, v6, v7, v8 and v9.
func NewNonuple[T1, T2, T3, T4, T5, T6, T7, T8, T9 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9) Nonuple[T1, T2, T3, T4, T5, T6, T7, T8, T9] {
return Nonuple[T1, T2, T3, T4, T5, T6, T7, T8, T9]{v1, v2, v3, v4, v5, v6, v7, v8, v9}
}
// Nonad is a alternative name of Nonuple.
type Nonad[T1, T2, T3, T4, T5, T6, T7, T8, T9 any] Nonuple[T1, T2, T3, T4, T5, T6, T7, T8, T9]
// NewNonad returns a new Nonad containing v1, v2, v3, v4, v5, v6, v7, v8 and v9.
func NewNonad[T1, T2, T3, T4, T5, T6, T7, T8, T9 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9) Nonad[T1, T2, T3, T4, T5, T6, T7, T8, T9] {
return Nonad[T1, T2, T3, T4, T5, T6, T7, T8, T9]{v1, v2, v3, v4, v5, v6, v7, v8, v9}
}
// Decuple is a 10-tuple struct.
type Decuple[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10 any] struct {
V1 T1
V2 T2
V3 T3
V4 T4
V5 T5
V6 T6
V7 T7
V8 T8
V9 T9
V10 T10
}
// NewDecuple returns a new Decuple containing v1, v2, v3, v4, v5, v6, v7, v8, v9 and v10.
func NewDecuple[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9, v10 T10) Decuple[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10] {
return Decuple[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10]{v1, v2, v3, v4, v5, v6, v7, v8, v9, v10}
}
// Decad is a alternative name of Decuple.
type Decad[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10 any] Decuple[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10]
// NewDecad returns a new Decad containing v1, v2, v3, v4, v5, v6, v7, v8, v9 and v10.
func NewDecad[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10 any](v1 T1, v2 T2, v3 T3, v4 T4, v5 T5, v6 T6, v7 T7, v8 T8, v9 T9, v10 T10) Decad[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10] {
return Decad[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10]{v1, v2, v3, v4, v5, v6, v7, v8, v9, v10}
} | tuple.go | 0.843315 | 0.529324 | tuple.go | starcoder |
package parser
import "fmt"
// NewPos creates a new initialized Pos with the values supplied.
func NewPos(filename string, line int) *Pos {
return &Pos{
Filename: filename,
Line: line,
}
}
// Pos defines a position in a B++ program, commonly used in debug prints
type Pos struct {
Filename string
Line int
}
// String allows Pos to implement the Stringer interface
func (p *Pos) String() string {
return fmt.Sprintf("%s:%d", p.Filename, p.Line)
}
// Statement stores the data for everything in B++
type Statement interface {
Pos() *Pos
Type() DataType
}
// Block is a statement that supports being multiple types
type Block interface {
Pos() *Pos
Type() DataType
Keywords() []string
EndSignature() []DataType
End(keyword string, arguments []Statement, statements []Statement) (bool, error) // Returns whether closed or not
}
// StatementParser defines the type for a statement parser - a cusotm function that can parse the statement and a signature of its parameters
type StatementParser struct {
Parse func(args []Statement, pos *Pos) (Statement, error)
Signature []DataType
}
// BlockParser defines the type of a block parser - a function to parse the first statement of a block and return a block object based on that, and the signature of the first statement in the block
type BlockParser struct {
Parse func(args []Statement, pos *Pos) (Block, error)
Signature []DataType
}
// BasicStatement allows other statements to implement the Statement interface
type BasicStatement struct {
pos *Pos
}
// Pos gives the pos of a basic statement
func (b *BasicStatement) Pos() *Pos {
return b.pos
}
// Type gives NULL for a basic statement, this method is usually overwritten by the statement embedding this struct
func (b *BasicStatement) Type() DataType {
return NULL
}
// Parsers
var parsers = make(map[string]StatementParser)
var blocks = make(map[string]BlockParser)
// Operator is an enum for all the B++ math and comparison operators
type Operator int
const (
EQUAL Operator = iota // =
NOTEQUAL // !=
GREATER // >
LESS // <
GREATEREQUAL // >=
LESSEQUAL // <=
ADDITION // +
SUBTRACTION // -
MULTIPLICATION // *
DIVISION // /
POWER // ^
)
var operatorNames = map[Operator]string{
EQUAL: "EQUAL",
NOTEQUAL: "NOTEQUAL",
GREATER: "GREATER",
LESS: "LESS",
GREATEREQUAL: "GREATEREQUAL",
LESSEQUAL: "LESSEQUAL",
ADDITION: "ADDITION",
SUBTRACTION: "SUBTRACTION",
MULTIPLICATION: "MULTIPLICATION",
DIVISION: "DIVISION",
POWER: "POWER",
}
func (o Operator) String() string {
return operatorNames[o]
}
// DataType is an enum for all B++ data types. It also supports combining multiple data types through bit masks. The data type for the B++ Data struct is commented next to the enum value
type DataType int
// IsEqual compares data types using bit masks
func (a DataType) IsEqual(b DataType) bool {
return (a&b) != 0 || (a&b) == a
}
const (
STRING DataType = 1 << iota // string
INT // int
FLOAT // float64
ARRAY // []Data
IDENTIFIER // string
NULL // nil
VARIADIC // Multiple args
PARAMETER // For PARAM statements
NUMBER = INT | FLOAT // interface{}
ANY = STRING | INT | FLOAT | ARRAY // interface{}
)
// Data represents a piece of Data in B++, most often a literal. It implements the Statement interface.
type Data struct {
*BasicStatement
kind DataType
Data interface{}
}
// Type returns the type of a piece of data
func (d *Data) Type() DataType {
return d.kind
} | old/parser/types.go | 0.720467 | 0.4856 | types.go | starcoder |
package fastxml
import (
"bytes"
"errors"
"io"
)
// Allocate the errors once and return the same structs
var (
errCDATASuffix = errors.New("expected Token to end with ']]>'")
errElementSuffix = errors.New("expected Token to end with '>'")
)
// Allocate these once instead of on each bytes.Index/HasPrefix/HasSuffix call
var (
prefixCDATA = []byte("<![CDATA[")
suffixCDATA = []byte("]]>")
)
// Scanner reads a []byte emitting each "token" as a slice
type Scanner struct {
buf []byte // immutable slice of data
pos int // pos is the current offset in buf
}
// Offset outputs the internal position the Scanner is at
func (s *Scanner) Offset() int {
return s.pos
}
// Seek implements the io.Seeker interface
func (s *Scanner) Seek(offset int64, whence int) (int64, error) {
var abs int
switch whence {
case io.SeekStart:
abs = int(offset)
case io.SeekCurrent:
abs = s.pos + int(offset)
case io.SeekEnd:
abs = len(s.buf) + int(offset)
default:
return int64(s.pos), errors.New("invalid whence")
}
if abs < 0 {
return int64(s.pos), errors.New("negative position")
} else if abs > len(s.buf) {
return int64(s.pos), errors.New("seek past end of buffer")
}
s.pos = abs
return int64(s.pos), nil
}
// Next produces the next token from the scanner
// When no more tokens are available io.EOF is returned AND the trailing token (if any)
func (s *Scanner) Next() (token []byte, chardata bool, err error) {
// EOF, no more data
if s.pos == len(s.buf) {
err = io.EOF
return
}
// Find the next (potential) element start
// Doing a lookup on first byte avoids a duplicate call to bytes.IndexByte
if s.buf[s.pos] != '<' {
next := bytes.IndexByte(s.buf[s.pos+1:], '<')
// If we are at the EOF
if next == -1 {
// Trailing CharData returned here if present
if s.pos < len(s.buf) {
token = s.buf[s.pos:]
s.pos = len(s.buf)
chardata = true
return
}
err = io.EOF
return
}
// If there's a gap between next and current pos, that's CharData
next++ // account for the +1 in IndexByte
token = s.buf[s.pos : s.pos+next]
s.pos += next
chardata = true
return
}
// If it starts with the CDATA prefix it's actually CharData (special case)
if bytes.HasPrefix(s.buf[s.pos:], prefixCDATA) {
chardata = true
// Find the end of the CDATA section
end := bytes.Index(s.buf[s.pos+8:], suffixCDATA)
if end == -1 {
token = s.buf[s.pos:]
err = errCDATASuffix
return
}
end += 11 // len(prefixCDATA) + len(suffixCDATA)
token = s.buf[s.pos : s.pos+end]
s.pos += end
return
}
// Find the end of the element
end := bytes.IndexByte(s.buf[s.pos:], '>')
if end == -1 {
token = s.buf[s.pos:]
err = errElementSuffix
return
}
end++ // len('>')
token = s.buf[s.pos : s.pos+end]
s.pos += end
return
}
// NextElement calls Next until a Element is reached
func (s *Scanner) NextElement() (elemToken []byte, err error) {
for {
token, chardata, err := s.Next()
if err != nil {
return nil, err
} else if chardata || !IsElement(token) {
continue
}
return token, nil
}
}
// Skip will skip until the end of the most recently processed element
func (s *Scanner) Skip() error {
for depth := 1; depth > 0; {
// Grab the next token, bail on error
token, chardata, err := s.Next()
if err != nil {
return err
}
// Skip ProcInst, Directive, CharData
if chardata || !IsElement(token) {
continue
}
// If self-closing, has no impact on depth
if IsSelfClosing(token) {
continue
}
// Increment the depth based on an element start/stop
if IsEndElement(token) {
depth--
} else {
depth++
}
}
return nil
}
// SkipElement extends Skip with a helper for self-closed elements
// It is faster than SkipToken as it assumes the token is an element
func (s *Scanner) SkipElement(elemToken []byte) error {
if elemToken != nil && IsSelfClosing(elemToken) {
return nil
}
return s.Skip()
}
// SkipToken extends Skip with a helper for self-closed elements.
// token is an _optional_ parameter, if present it will check if the
// element was a self-closed element in which case it will exit immediately
func (s *Scanner) SkipToken(token []byte) error {
if token != nil && IsElement(token) && IsSelfClosing(token) {
return nil
}
return s.Skip()
}
// Reset replaces the buf in scanner to a new slice
func (s *Scanner) Reset(buf []byte) {
s.buf = buf
s.pos = 0
}
// NewScanner creates a *Scanner for a given byte slice
func NewScanner(buf []byte) *Scanner {
return &Scanner{buf: buf, pos: 0}
} | scanner.go | 0.562898 | 0.421909 | scanner.go | starcoder |
package geojson
import "github.com/tidwall/tile38/pkg/geojson/geohash"
// LineString is a geojson object with the type "LineString"
type LineString struct {
Coordinates []Position
BBox *BBox
bboxDefined bool
}
func fillLineString(coordinates []Position, bbox *BBox, err error) (LineString, error) {
if err == nil {
if len(coordinates) < 2 {
err = errLineStringInvalidCoordinates
}
}
bboxDefined := bbox != nil
if !bboxDefined {
cbbox := level2CalculatedBBox(coordinates, nil)
bbox = &cbbox
}
return LineString{
Coordinates: coordinates,
BBox: bbox,
bboxDefined: bboxDefined,
}, err
}
// CalculatedBBox is exterior bbox containing the object.
func (g LineString) CalculatedBBox() BBox {
return level2CalculatedBBox(g.Coordinates, g.BBox)
}
// CalculatedPoint is a point representation of the object.
func (g LineString) CalculatedPoint() Position {
return g.CalculatedBBox().center()
}
// Geohash converts the object to a geohash value.
func (g LineString) Geohash(precision int) (string, error) {
p := g.CalculatedPoint()
return geohash.Encode(p.Y, p.X, precision)
}
// PositionCount return the number of coordinates.
func (g LineString) PositionCount() int {
return level2PositionCount(g.Coordinates, g.BBox)
}
// Weight returns the in-memory size of the object.
func (g LineString) Weight() int {
return level2Weight(g.Coordinates, g.BBox)
}
func (g LineString) appendJSON(json []byte) []byte {
return appendLevel2JSON(json, "LineString", g.Coordinates, g.BBox, g.bboxDefined)
}
// MarshalJSON allows the object to be encoded in json.Marshal calls.
func (g LineString) MarshalJSON() ([]byte, error) {
return g.appendJSON(nil), nil
}
// JSON is the json representation of the object. This might not be exactly the same as the original.
func (g LineString) JSON() string {
return string(g.appendJSON(nil))
}
// String returns a string representation of the object. This might be JSON or something else.
func (g LineString) String() string {
return g.JSON()
}
func (g LineString) bboxPtr() *BBox {
return g.BBox
}
func (g LineString) hasPositions() bool {
return g.bboxDefined || len(g.Coordinates) > 0
}
// WithinBBox detects if the object is fully contained inside a bbox.
func (g LineString) WithinBBox(bbox BBox) bool {
if g.bboxDefined {
return rectBBox(g.CalculatedBBox()).InsideRect(rectBBox(bbox))
}
return polyPositions(g.Coordinates).InsideRect(rectBBox(bbox))
}
// IntersectsBBox detects if the object intersects a bbox.
func (g LineString) IntersectsBBox(bbox BBox) bool {
if g.bboxDefined {
return rectBBox(g.CalculatedBBox()).IntersectsRect(rectBBox(bbox))
}
return polyPositions(g.Coordinates).IntersectsRect(rectBBox(bbox))
}
// Within detects if the object is fully contained inside another object.
func (g LineString) Within(o Object) bool {
return withinObjectShared(g, o,
func(v Polygon) bool {
return polyPositions(g.Coordinates).Inside(polyExteriorHoles(v.Coordinates))
},
)
}
// Intersects detects if the object intersects another object.
func (g LineString) Intersects(o Object) bool {
return intersectsObjectShared(g, o,
func(v Polygon) bool {
return polyPositions(g.Coordinates).LineStringIntersects(polyExteriorHoles(v.Coordinates))
},
)
}
// Nearby detects if the object is nearby a position.
func (g LineString) Nearby(center Position, meters float64) bool {
return nearbyObjectShared(g, center.X, center.Y, meters)
}
// IsBBoxDefined returns true if the object has a defined bbox.
func (g LineString) IsBBoxDefined() bool {
return g.bboxDefined
}
// IsGeometry return true if the object is a geojson geometry object. false if it something else.
func (g LineString) IsGeometry() bool {
return true
} | pkg/geojson/linestring.go | 0.80502 | 0.508117 | linestring.go | starcoder |
package mino
import (
"sort"
)
// Filter is a set of parameters for the Players.Take function.
type Filter struct {
// Indices indicates the indexes of the elements that must be included. This
// list if updated based on the filter that we apply. For example, [0,3]
// tells that this filter keeps 2 elements from the underlying data
// structure we filter that are stored at indexes 0, 3. This list is always
// sorted and can be shifted in a circular way.
Indices []int
}
// ApplyFilters applies the filters and return the result.
func ApplyFilters(filters []FilterUpdater) *Filter {
f := &Filter{
Indices: []int{},
}
for _, filter := range filters {
filter(f)
}
return f
}
// FilterUpdater is a function to update the filters.
type FilterUpdater func(*Filter)
// RotateFilter is a filter to rotate the indices. When n is above zero, it will
// rotate by n steps on the left and when n is below, it will do the same on the
// right. The behaviour is unknown if not used as the last filter as next
// updaters could change the order.
func RotateFilter(n int) FilterUpdater {
return func(filter *Filter) {
if len(filter.Indices) == 0 {
return
}
n = n % len(filter.Indices)
if n < 0 {
n += len(filter.Indices)
}
filter.Indices = append(filter.Indices[n:], filter.Indices[:n]...)
}
}
// IndexFilter is a filter to include a given index.
func IndexFilter(index int) FilterUpdater {
return func(filters *Filter) {
arr := filters.Indices
i := sort.IntSlice(arr).Search(index)
// do nothing if the element is already there
if i < len(arr) && arr[i] == index {
return
}
filters.Indices = append(arr, index)
sort.Ints(filters.Indices)
}
}
// RangeFilter is a filter to include a range of indices.
func RangeFilter(start, end int) FilterUpdater {
return func(filters *Filter) {
arr := filters.Indices
queue := []int{}
i := sort.IntSlice(arr).Search(start)
for k := start; k < end; k++ {
if i < len(arr) && arr[i] == k {
i++
} else {
queue = append(queue, k)
}
}
filters.Indices = append(arr, queue...)
sort.Ints(filters.Indices)
}
}
// ListFilter is a filter to set the list of indices. It will override any index
// previously set.
func ListFilter(indices []int) FilterUpdater {
return func(filters *Filter) {
filters.Indices = indices
}
} | mino/option.go | 0.727492 | 0.54359 | option.go | starcoder |
package bls12381
import (
"fmt"
"math"
"math/big"
)
// PointG1 is type for point in G1.
// PointG1 is both used for Affine and Jacobian point representation.
// If z is equal to one the point is accounted as in affine form.
type PointG1 [3]fe
func (p *PointG1) Set(p2 *PointG1) *PointG1 {
p[0].set(&p2[0])
p[1].set(&p2[1])
p[2].set(&p2[2])
return p
}
type tempG1 struct {
t [9]*fe
}
// G1 is struct for G1 group.
type G1 struct {
tempG1
}
// NewG1 constructs a new G1 instance.
func NewG1() *G1 {
cfgArch()
t := newTempG1()
return &G1{t}
}
func newTempG1() tempG1 {
t := [9]*fe{}
for i := 0; i < 9; i++ {
t[i] = &fe{}
}
return tempG1{t}
}
// Q returns group order in big.Int.
func (g *G1) Q() *big.Int {
return new(big.Int).Set(q)
}
// FromUncompressed expects byte slice larger than 96 bytes and given bytes returns a new point in G1.
// Serialization rules are in line with zcash library. See below for details.
// https://github.com/zcash/librustzcash/blob/master/pairing/src/bls12_381/README.md#serialization
// https://docs.rs/bls12_381/0.1.1/bls12_381/notes/serialization/index.html
func (g *G1) FromUncompressed(uncompressed []byte) (*PointG1, error) {
if len(uncompressed) < 96 {
return nil, fmt.Errorf("input string should be equal or larger than 96")
}
var in [96]byte
copy(in[:], uncompressed[:96])
if in[0]&(1<<7) != 0 {
return nil, fmt.Errorf("compression flag should be zero")
}
if in[0]&(1<<5) != 0 {
return nil, fmt.Errorf("sort flag should be zero")
}
if in[0]&(1<<6) != 0 {
for i, v := range in {
if (i == 0 && v != 0x40) || (i != 0 && v != 0x00) {
return nil, fmt.Errorf("input string should be zero when infinity flag is set")
}
}
return g.Zero(), nil
}
in[0] &= 0x1f
x, err := fromBytes(in[:48])
if err != nil {
return nil, err
}
y, err := fromBytes(in[48:])
if err != nil {
return nil, err
}
z := one()
p := &PointG1{*x, *y, *z}
if !g.IsOnCurve(p) {
return nil, fmt.Errorf("point is not on curve")
}
if !g.InCorrectSubgroup(p) {
return nil, fmt.Errorf("point is not on correct subgroup")
}
return p, nil
}
// ToUncompressed given a G1 point returns bytes in uncompressed (x, y) form of the point.
// Serialization rules are in line with zcash library. See below for details.
// https://github.com/zcash/librustzcash/blob/master/pairing/src/bls12_381/README.md#serialization
// https://docs.rs/bls12_381/0.1.1/bls12_381/notes/serialization/index.html
func (g *G1) ToUncompressed(p *PointG1) []byte {
out := make([]byte, 96)
if g.IsZero(p) {
out[0] |= 1 << 6
return out
}
g.Affine(p)
copy(out[:48], toBytes(&p[0]))
copy(out[48:], toBytes(&p[1]))
return out
}
// FromCompressed expects byte slice larger than 96 bytes and given bytes returns a new point in G1.
// Serialization rules are in line with zcash library. See below for details.
// https://github.com/zcash/librustzcash/blob/master/pairing/src/bls12_381/README.md#serialization
// https://docs.rs/bls12_381/0.1.1/bls12_381/notes/serialization/index.html
func (g *G1) FromCompressed(compressed []byte) (*PointG1, error) {
if len(compressed) < 48 {
return nil, fmt.Errorf("input string should be equal or larger than 48")
}
var in [48]byte
copy(in[:], compressed[:])
if in[0]&(1<<7) == 0 {
return nil, fmt.Errorf("compression flag should be set")
}
if in[0]&(1<<6) != 0 {
// in[0] == (1 << 6) + (1 << 7)
for i, v := range in {
if (i == 0 && v != 0xc0) || (i != 0 && v != 0x00) {
return nil, fmt.Errorf("input string should be zero when infinity flag is set")
}
}
return g.Zero(), nil
}
a := in[0]&(1<<5) != 0
in[0] &= 0x1f
x, err := fromBytes(in[:])
if err != nil {
return nil, err
}
// solve curve equation
y := &fe{}
square(y, x)
mul(y, y, x)
add(y, y, b)
if ok := sqrt(y, y); !ok {
return nil, fmt.Errorf("point is not on curve")
}
if y.signBE() == a {
neg(y, y)
}
z := one()
p := &PointG1{*x, *y, *z}
if !g.InCorrectSubgroup(p) {
return nil, fmt.Errorf("point is not on correct subgroup")
}
return p, nil
}
// ToCompressed given a G1 point returns bytes in compressed form of the point.
// Serialization rules are in line with zcash library. See below for details.
// https://github.com/zcash/librustzcash/blob/master/pairing/src/bls12_381/README.md#serialization
// https://docs.rs/bls12_381/0.1.1/bls12_381/notes/serialization/index.html
func (g *G1) ToCompressed(p *PointG1) []byte {
out := make([]byte, 48)
g.Affine(p)
if g.IsZero(p) {
out[0] |= 1 << 6
} else {
copy(out[:], toBytes(&p[0]))
if !p[1].signBE() {
out[0] |= 1 << 5
}
}
out[0] |= 1 << 7
return out
}
func (g *G1) fromBytesUnchecked(in []byte) (*PointG1, error) {
p0, err := fromBytes(in[:48])
if err != nil {
return nil, err
}
p1, err := fromBytes(in[48:])
if err != nil {
return nil, err
}
p2 := one()
return &PointG1{*p0, *p1, *p2}, nil
}
// FromBytes constructs a new point given uncompressed byte input.
// FromBytes does not take zcash flags into account.
// Byte input expected to be larger than 96 bytes.
// First 96 bytes should be concatenation of x and y values.
// Point (0, 0) is considered as infinity.
func (g *G1) FromBytes(in []byte) (*PointG1, error) {
if len(in) < 96 {
return nil, fmt.Errorf("input string should be equal or larger than 96")
}
p0, err := fromBytes(in[:48])
if err != nil {
return nil, err
}
p1, err := fromBytes(in[48:])
if err != nil {
return nil, err
}
// check if given input points to infinity
if p0.isZero() && p1.isZero() {
return g.Zero(), nil
}
p2 := one()
p := &PointG1{*p0, *p1, *p2}
if !g.IsOnCurve(p) {
return nil, fmt.Errorf("point is not on curve")
}
return p, nil
}
// ToBytes serializes a point into bytes in uncompressed form.
// ToBytes does not take zcash flags into account.
// ToBytes returns (0, 0) if point is infinity.
func (g *G1) ToBytes(p *PointG1) []byte {
out := make([]byte, 96)
if g.IsZero(p) {
return out
}
g.Affine(p)
copy(out[:48], toBytes(&p[0]))
copy(out[48:], toBytes(&p[1]))
return out
}
// New creates a new G1 Point which is equal to zero in other words point at infinity.
func (g *G1) New() *PointG1 {
return g.Zero()
}
// Zero returns a new G1 Point which is equal to point at infinity.
func (g *G1) Zero() *PointG1 {
return &PointG1{
*zero(),
*one(),
*zero(),
}
}
// One returns a new G1 Point which is equal to generator point.
func (g *G1) One() *PointG1 {
return g.Copy(&PointG1{}, &g1One)
}
// Copy copies source point to destination point.
func (g *G1) Copy(dst *PointG1, src *PointG1) *PointG1 {
return dst.Set(src)
}
// IsZero returns true if given point is equal to zero.
func (g *G1) IsZero(p *PointG1) bool {
return isZero(&p[2])
}
// Equal checks if given two G1 point is equal in their affine form.
func (g *G1) Equal(p1, p2 *PointG1) bool {
if g.IsZero(p1) {
return g.IsZero(p2)
}
if g.IsZero(p2) {
return g.IsZero(p1)
}
t := g.t
square(t[0], &p1[2])
square(t[1], &p2[2])
mul(t[2], t[0], &p2[0])
mul(t[3], t[1], &p1[0])
mul(t[0], t[0], &p1[2])
mul(t[1], t[1], &p2[2])
mul(t[1], t[1], &p1[1])
mul(t[0], t[0], &p2[1])
return equal(t[0], t[1]) && equal(t[2], t[3])
}
// InCorrectSubgroup checks whether given point is in correct subgroup.
func (g *G1) InCorrectSubgroup(p *PointG1) bool {
tmp := &PointG1{}
g.MulScalar(tmp, p, q)
return g.IsZero(tmp)
}
// IsOnCurve checks a G1 point is on curve.
func (g *G1) IsOnCurve(p *PointG1) bool {
if g.IsZero(p) {
return true
}
t := g.t
square(t[0], &p[1])
square(t[1], &p[0])
mul(t[1], t[1], &p[0])
square(t[2], &p[2])
square(t[3], t[2])
mul(t[2], t[2], t[3])
mul(t[2], b, t[2])
add(t[1], t[1], t[2])
return equal(t[0], t[1])
}
// IsAffine checks a G1 point whether it is in affine form.
func (g *G1) IsAffine(p *PointG1) bool {
return equal(&p[2], one())
}
// Add adds two G1 points p1, p2 and assigns the result to point at first argument.
func (g *G1) Affine(p *PointG1) *PointG1 {
if g.IsZero(p) {
return p
}
if !g.IsAffine(p) {
t := g.t
inverse(t[0], &p[2])
square(t[1], t[0])
mul(&p[0], &p[0], t[1])
mul(t[0], t[0], t[1])
mul(&p[1], &p[1], t[0])
p[2].set(one())
}
return p
}
// Add adds two G1 points p1, p2 and assigns the result to point at first argument.
func (g *G1) Add(r, p1, p2 *PointG1) *PointG1 {
// http://www.hyperelliptic.org/EFD/gp/auto-shortw-jacobian-0.html#addition-add-2007-bl
if g.IsZero(p1) {
g.Copy(r, p2)
return r
}
if g.IsZero(p2) {
g.Copy(r, p1)
return r
}
t := g.t
square(t[7], &p1[2])
mul(t[1], &p2[0], t[7])
mul(t[2], &p1[2], t[7])
mul(t[0], &p2[1], t[2])
square(t[8], &p2[2])
mul(t[3], &p1[0], t[8])
mul(t[4], &p2[2], t[8])
mul(t[2], &p1[1], t[4])
if equal(t[1], t[3]) {
if equal(t[0], t[2]) {
return g.Double(r, p1)
} else {
return g.Copy(r, infinity)
}
}
sub(t[1], t[1], t[3])
double(t[4], t[1])
square(t[4], t[4])
mul(t[5], t[1], t[4])
sub(t[0], t[0], t[2])
double(t[0], t[0])
square(t[6], t[0])
sub(t[6], t[6], t[5])
mul(t[3], t[3], t[4])
double(t[4], t[3])
sub(&r[0], t[6], t[4])
sub(t[4], t[3], &r[0])
mul(t[6], t[2], t[5])
double(t[6], t[6])
mul(t[0], t[0], t[4])
sub(&r[1], t[0], t[6])
add(t[0], &p1[2], &p2[2])
square(t[0], t[0])
sub(t[0], t[0], t[7])
sub(t[0], t[0], t[8])
mul(&r[2], t[0], t[1])
return r
}
// Double doubles a G1 point p and assigns the result to the point at first argument.
func (g *G1) Double(r, p *PointG1) *PointG1 {
// http://www.hyperelliptic.org/EFD/gp/auto-shortw-jacobian-0.html#doubling-dbl-2009-l
if g.IsZero(p) {
g.Copy(r, p)
return r
}
t := g.t
square(t[0], &p[0])
square(t[1], &p[1])
square(t[2], t[1])
add(t[1], &p[0], t[1])
square(t[1], t[1])
sub(t[1], t[1], t[0])
sub(t[1], t[1], t[2])
double(t[1], t[1])
double(t[3], t[0])
add(t[0], t[3], t[0])
square(t[4], t[0])
double(t[3], t[1])
sub(&r[0], t[4], t[3])
sub(t[1], t[1], &r[0])
double(t[2], t[2])
double(t[2], t[2])
double(t[2], t[2])
mul(t[0], t[0], t[1])
sub(t[1], t[0], t[2])
mul(t[0], &p[1], &p[2])
r[1].set(t[1])
double(&r[2], t[0])
return r
}
// Neg negates a G1 point p and assigns the result to the point at first argument.
func (g *G1) Neg(r, p *PointG1) *PointG1 {
r[0].set(&p[0])
r[2].set(&p[2])
neg(&r[1], &p[1])
return r
}
// Sub subtracts two G1 points p1, p2 and assigns the result to point at first argument.
func (g *G1) Sub(c, a, b *PointG1) *PointG1 {
d := &PointG1{}
g.Neg(d, b)
g.Add(c, a, d)
return c
}
// MulScalar multiplies a point by given scalar value in big.Int and assigns the result to point at first argument.
func (g *G1) MulScalar(c, p *PointG1, e *big.Int) *PointG1 {
q, n := &PointG1{}, &PointG1{}
g.Copy(n, p)
l := e.BitLen()
for i := 0; i < l; i++ {
if e.Bit(i) == 1 {
g.Add(q, q, n)
}
g.Double(n, n)
}
return g.Copy(c, q)
}
// ClearCofactor maps given a G1 point to correct subgroup
func (g *G1) ClearCofactor(p *PointG1) {
g.MulScalar(p, p, cofactorEFFG1)
}
// MultiExp calculates multi exponentiation. Given pairs of G1 point and scalar values
// (P_0, e_0), (P_1, e_1), ... (P_n, e_n) calculates r = e_0 * P_0 + e_1 * P_1 + ... + e_n * P_n
// Length of points and scalars are expected to be equal, otherwise an error is returned.
// Result is assigned to point at first argument.
func (g *G1) MultiExp(r *PointG1, points []*PointG1, powers []*big.Int) (*PointG1, error) {
if len(points) != len(powers) {
return nil, fmt.Errorf("point and scalar vectors should be in same length")
}
var c uint32 = 3
if len(powers) >= 32 {
c = uint32(math.Ceil(math.Log10(float64(len(powers)))))
}
bucketSize, numBits := (1<<c)-1, uint32(g.Q().BitLen())
windows := make([]*PointG1, numBits/c+1)
bucket := make([]*PointG1, bucketSize)
acc, sum := g.New(), g.New()
for i := 0; i < bucketSize; i++ {
bucket[i] = g.New()
}
mask := (uint64(1) << c) - 1
j := 0
var cur uint32
for cur <= numBits {
g.Copy(acc, g.Zero())
bucket = make([]*PointG1, (1<<c)-1)
for i := 0; i < len(bucket); i++ {
bucket[i] = g.New()
}
for i := 0; i < len(powers); i++ {
s0 := powers[i].Uint64()
index := uint(s0 & mask)
if index != 0 {
g.Add(bucket[index-1], bucket[index-1], points[i])
}
powers[i] = new(big.Int).Rsh(powers[i], uint(c))
}
g.Copy(sum, g.Zero())
for i := len(bucket) - 1; i >= 0; i-- {
g.Add(sum, sum, bucket[i])
g.Add(acc, acc, sum)
}
windows[j] = g.New()
g.Copy(windows[j], acc)
j++
cur += c
}
g.Copy(acc, g.Zero())
for i := len(windows) - 1; i >= 0; i-- {
for j := uint32(0); j < c; j++ {
g.Double(acc, acc)
}
g.Add(acc, acc, windows[i])
}
g.Copy(r, acc)
return r, nil
}
// MapToCurve given a byte slice returns a valid G1 point.
// This mapping function implements the Simplified Shallue-van de Woestijne-Ulas method.
// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06
// Input byte slice should be a valid field element, otherwise an error is returned.
func (g *G1) MapToCurve(in []byte) (*PointG1, error) {
u, err := fromBytes(in)
if err != nil {
return nil, err
}
x, y := swuMapG1(u)
isogenyMapG1(x, y)
one := one()
p := &PointG1{*x, *y, *one}
g.ClearCofactor(p)
return g.Affine(p), nil
}
// EncodeToCurve given a message and domain seperator tag returns the hash result
// which is a valid curve point.
// Implementation follows BLS12381G1_XMD:SHA-256_SSWU_NU_ suite at
// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06
func (g *G1) EncodeToCurve(msg, domain []byte) (*PointG1, error) {
hashRes, err := hashToFpXMDSHA256(msg, domain, 1)
if err != nil {
return nil, err
}
u := hashRes[0]
x, y := swuMapG1(u)
isogenyMapG1(x, y)
one := one()
p := &PointG1{*x, *y, *one}
g.ClearCofactor(p)
return g.Affine(p), nil
}
// HashToCurve given a message and domain seperator tag returns the hash result
// which is a valid curve point.
// Implementation follows BLS12381G1_XMD:SHA-256_SSWU_RO_ suite at
// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06
func (g *G1) HashToCurve(msg, domain []byte) (*PointG1, error) {
hashRes, err := hashToFpXMDSHA256(msg, domain, 2)
if err != nil {
return nil, err
}
u0, u1 := hashRes[0], hashRes[1]
x0, y0 := swuMapG1(u0)
x1, y1 := swuMapG1(u1)
one := one()
p0, p1 := &PointG1{*x0, *y0, *one}, &PointG1{*x1, *y1, *one}
g.Add(p0, p0, p1)
g.Affine(p0)
isogenyMapG1(&p0[0], &p0[1])
g.ClearCofactor(p0)
return g.Affine(p0), nil
} | g1.go | 0.760917 | 0.54462 | g1.go | starcoder |
package cli
import (
"fmt"
"os"
"strconv"
"strings"
"time"
)
// EnvAttribute describes expected environmental attributes associated with the cli app.
// It also provides the default value of the environmental attribute if missing from the environment.
type EnvAttribute struct {
// Name is the environment variable (e.g. DATASET, USERNAME)
Name string
// Type holds the type name of the attribute, e.g. int, int64, float64, string, bool, uint, uint64, time.Duration
Type string
// BoolValue holds the default boolean
BoolValue bool
// IntValue holds the default int
IntValue int
// Int64Value holds the default int64
Int64Value int64
// UintValue holds the default uint
UintValue uint
// Uint64Value holds the default uint64
Uint64Value uint64
// Float64Value holds the default float64
Float64Value float64
// Dura1tionValue holds the default time.Duration
DurationValue time.Duration
// StringValue holds the default string
StringValue string
// Usage describes the environment variable role and expected setting
Usage string
}
// EnvBool adds an environment variable which is evaluate before evaluating options
// returns a pointer to the value.
func (c *Cli) EnvBool(name string, value bool, usage string) *bool {
c.env[name] = &EnvAttribute{
Name: name,
Type: fmt.Sprintf("%T", value),
BoolValue: value,
Usage: usage,
}
// FIXME: make sure i am creating a point to the boolean value in the map.
var p *bool
p = &c.env[name].BoolValue
_, ok := c.env[name]
if ok == false {
return nil
}
return p
}
// EnvBoolVar adds environment variable which is evaluate before evaluating options
// It is the environment counterpart to flag.BoolVar()
func (c *Cli) EnvBoolVar(p *bool, name string, value bool, usage string) error {
c.env[name] = &EnvAttribute{
Name: name,
Type: fmt.Sprintf("%T", value),
BoolValue: value,
Usage: usage,
}
p = &c.env[name].BoolValue
_, ok := c.env[name]
if ok == false {
return fmt.Errorf("%q could not be added to environment attributes", name)
}
return nil
}
// EnvInt adds environment variable which is evaluate before evaluating options
// It is the environment counterpart to flag.IntVar()
func (c *Cli) EnvInt(name string, value int, usage string) *int {
c.env[name] = &EnvAttribute{
Name: name,
Type: fmt.Sprintf("%T", value),
IntValue: value,
Usage: usage,
}
var p *int
p = &c.env[name].IntValue
_, ok := c.env[name]
if ok == false {
return nil
}
return p
}
// EnvIntVar adds environment variable which is evaluate before evaluating options
// It is the environment counterpart to flag.IntVar()
func (c *Cli) EnvIntVar(p *int, name string, value int, usage string) error {
c.env[name] = &EnvAttribute{
Name: name,
Type: fmt.Sprintf("%T", value),
IntValue: value,
Usage: usage,
}
p = &c.env[name].IntValue
_, ok := c.env[name]
if ok == false {
return fmt.Errorf("%q could not be added to environment attributes", name)
}
return nil
}
// EnvInt64 adds environment variable which is evaluate before evaluating options
// It is the environment counterpart to flag.Int64Var()
func (c *Cli) EnvInt64(name string, value int64, usage string) *int64 {
c.env[name] = &EnvAttribute{
Name: name,
Type: fmt.Sprintf("%T", value),
Int64Value: value,
Usage: usage,
}
var p *int64
p = &c.env[name].Int64Value
_, ok := c.env[name]
if ok == false {
return nil
}
return p
}
// EnvInt64Var adds environment variable which is evaluate before evaluating options
// It is the environment counterpart to flag.Int64Var()
func (c *Cli) EnvInt64Var(p *int64, name string, value int64, usage string) error {
c.env[name] = &EnvAttribute{
Name: name,
Type: fmt.Sprintf("%T", value),
Int64Value: value,
Usage: usage,
}
p = &c.env[name].Int64Value
_, ok := c.env[name]
if ok == false {
return fmt.Errorf("%q could not be added to environment attributes", name)
}
return nil
}
// EnvUint adds environment variable which is evaluate before evaluating options
// It is the environment counterpart to flag.UintVar()
func (c *Cli) EnvUint(name string, value uint, usage string) *uint {
c.env[name] = &EnvAttribute{
Name: name,
Type: fmt.Sprintf("%T", value),
UintValue: value,
Usage: usage,
}
var p *uint
p = &c.env[name].UintValue
_, ok := c.env[name]
if ok == false {
return nil
}
return p
}
// EnvUintVar adds environment variable which is evaluate before evaluating options
// It is the environment counterpart to flag.UintVar()
func (c *Cli) EnvUintVar(p *uint, name string, value uint, usage string) error {
c.env[name] = &EnvAttribute{
Name: name,
Type: fmt.Sprintf("%T", value),
UintValue: value,
Usage: usage,
}
p = &c.env[name].UintValue
_, ok := c.env[name]
if ok == false {
return fmt.Errorf("%q could not be added to environment attributes", name)
}
return nil
}
// EnvUint64 adds environment variable which is evaluate before evaluating options
// It is the environment counterpart to flag.Uint64Var()
func (c *Cli) EnvUint64(name string, value uint64, usage string) *uint64 {
c.env[name] = &EnvAttribute{
Name: name,
Type: fmt.Sprintf("%T", value),
Uint64Value: value,
Usage: usage,
}
var p *uint64
p = &c.env[name].Uint64Value
_, ok := c.env[name]
if ok == false {
return nil
}
return p
}
// EnvFloat64 adds environment variable which is evaluate before evaluating options
// It is the environment counterpart to flag.Float64Var()
func (c *Cli) EnvFloat64(name string, value float64, usage string) *float64 {
c.env[name] = &EnvAttribute{
Name: name,
Type: fmt.Sprintf("%T", value),
Float64Value: value,
Usage: usage,
}
var p *float64
p = &c.env[name].Float64Value
_, ok := c.env[name]
if ok == false {
return nil
}
return p
}
// EnvUint64Var adds environment variable which is evaluate before evaluating options
// It is the environment counterpart to flag.Uint64Var()
func (c *Cli) EnvUint64Var(p *uint64, name string, value uint64, usage string) error {
c.env[name] = &EnvAttribute{
Name: name,
Type: fmt.Sprintf("%T", value),
Uint64Value: value,
Usage: usage,
}
p = &c.env[name].Uint64Value
_, ok := c.env[name]
if ok == false {
return fmt.Errorf("%q could not be added to environment attributes", name)
}
return nil
}
// EnvString adds environment variable which is evaluate before evaluating options
// It is the environment counterpart to flag.StringVar()
func (c *Cli) EnvString(name string, value string, usage string) *string {
c.env[name] = &EnvAttribute{
Name: name,
Type: fmt.Sprintf("%T", value),
StringValue: value,
Usage: usage,
}
var p *string
p = &c.env[name].StringValue
_, ok := c.env[name]
if ok == false {
return nil
}
return p
}
// EnvStringVar adds environment variable which is evaluate before evaluating options
// It is the environment counterpart to flag.StringVar()
func (c *Cli) EnvStringVar(p *string, name string, value string, usage string) error {
c.env[name] = &EnvAttribute{
Name: name,
Type: fmt.Sprintf("%T", value),
StringValue: value,
Usage: usage,
}
*p = c.env[name].StringValue
_, ok := c.env[name]
if ok == false {
return fmt.Errorf("%q could not be added to environment attributes", name)
}
return nil
}
// EnvDuration adds environment variable which is evaluate before evaluating options
// It is the environment counterpart to flag.DurationVar()
func (c *Cli) EnvDuration(name string, value time.Duration, usage string) *time.Duration {
c.env[name] = &EnvAttribute{
Name: name,
Type: fmt.Sprintf("%T", value),
DurationValue: value,
Usage: usage,
}
var p *time.Duration
p = &c.env[name].DurationValue
_, ok := c.env[name]
if ok == false {
return nil
}
return p
}
// EnvDurationVar adds environment variable which is evaluate before evaluating options
// It is the environment counterpart to flag.DurationVar()
func (c *Cli) EnvDurationVar(p *time.Duration, name string, value time.Duration, usage string) error {
c.env[name] = &EnvAttribute{
Name: name,
Type: fmt.Sprintf("%T", value),
DurationValue: value,
Usage: usage,
}
p = &c.env[name].DurationValue
_, ok := c.env[name]
if ok == false {
return fmt.Errorf("%q could not be added to environment attributes", name)
}
return nil
}
// EnvAttribute returns the struct corresponding to the matchine name
func (c *Cli) EnvAttribute(name string) (*EnvAttribute, error) {
e, ok := c.env[name]
if ok == false {
return nil, fmt.Errorf("%q not defined for environment", name)
}
return e, nil
}
// Env returns an EnvAttribute documentation string for matching name
func (c *Cli) Env(name string) string {
e, ok := c.env[name]
if ok == false {
return fmt.Sprintf("%q not documented for environment", name)
}
return e.Usage
}
// Getenv returns a given environment attribute value as a string
func (c *Cli) Getenv(name string) string {
var s string
e, err := c.EnvAttribute(name)
if err != nil {
return s
}
switch e.Type {
case "bool":
return fmt.Sprintf("%t", e.BoolValue)
case "int":
return fmt.Sprintf("%d", e.IntValue)
case "int64":
return fmt.Sprintf("%d", e.Int64Value)
case "uint":
return fmt.Sprintf("%d", e.UintValue)
case "uint64":
return fmt.Sprintf("%d", e.Uint64Value)
case "float64":
return fmt.Sprintf("%f", e.Float64Value)
case "time.Duration":
return fmt.Sprintf("%s", e.DurationValue)
}
return e.StringValue
}
// ParseEnv loops through the os environment using os.Getenv() and updates
// c.env EnvAttribute. Returns an error if there is a problem with environment.
func (c *Cli) ParseEnv() error {
var (
err error
u64 uint64
)
for k, e := range c.env {
s := strings.TrimSpace(os.Getenv(k))
// NOTE: we only parse the environment if it is not an emprt string
if s != "" {
switch e.Type {
case "bool":
e.BoolValue, err = strconv.ParseBool(s)
case "int":
e.IntValue, err = strconv.Atoi(s)
case "int64":
e.Int64Value, err = strconv.ParseInt(s, 10, 64)
case "uint":
u64, err = strconv.ParseUint(s, 10, 32)
e.UintValue = uint(u64)
case "uint64":
e.Uint64Value, err = strconv.ParseUint(s, 10, 64)
case "float64":
e.Float64Value, err = strconv.ParseFloat(s, 64)
case "time.Duration":
e.DurationValue, err = time.ParseDuration(s)
default:
e.StringValue = s
}
if err != nil {
return fmt.Errorf("%q should be type %q, %s", e.Name, e.Type, err)
}
}
c.env[k] = e
}
return err
} | cli/env.go | 0.543348 | 0.410106 | env.go | starcoder |
package expr
import (
"fmt"
"math"
"strings"
"github.com/jesperkha/Fizz/env"
"github.com/jesperkha/Fizz/lexer"
"github.com/jesperkha/Fizz/util"
)
// Evaluates expression tree. Hands off to helper methods which can also recursively call to
// resolve nested expressions. Returned value is result of expression and is Go literal.
func EvaluateExpression(expr *Expression) (value interface{}, err error) {
switch expr.Type {
case Literal:
return evalLiteral(expr)
case Unary:
return evalUnary(expr)
case Binary:
return evalBinary(expr)
case Group:
return EvaluateExpression(expr.Inner)
case Variable:
return env.Get(expr.Name)
case Call:
return evalCall(expr)
case Getter:
return evalGetter(expr)
case Array:
return evalArray(expr)
case Index:
return evalIndex(expr)
}
// Wont be reached
return expr, ErrInvalidExpression
}
// Performs equality check and parsing for arrays and object
// because they are pointers and cannot be compared as addresses.
func equal(left, right interface{}) bool {
l, r := util.GetType(left), util.GetType(right)
if l == "array" && r == "array" {
a, _ := left.(*env.Array)
b, _ := right.(*env.Array)
return a.IsEqual(b)
}
if l == "object" && r == "object" {
a, _ := left.(*env.Object)
b, _ := right.(*env.Object)
return a.IsEqual(b)
}
return left == right
}
// Token types >= string are valid literal types
func evalLiteral(literal *Expression) (value interface{}, err error) {
if literal.Value.Type >= lexer.STRING {
return literal.Value.Literal, err
}
return value, ErrInvalidExpression
}
func evalUnary(unary *Expression) (value interface{}, err error) {
right, err := EvaluateExpression(unary.Right)
if err != nil {
return value, err
}
// Matches to operator
switch unary.Operand.Type {
case lexer.MINUS:
if isNumber(right) {
return -right.(float64), err
}
op, typ, line := unary.Operand.Lexeme, util.GetType(right), unary.Line
return nil, fmt.Errorf(ErrInvalidOperatorType.Error(), op, typ, line)
case lexer.NOT:
return !isTruthy(right), err
case lexer.TYPE:
return util.GetType(right), err
}
// If none of the mentioned operators are present its an invalid one
op, line := unary.Operand.Lexeme, unary.Line
return value, fmt.Errorf(ErrInvalidUnaryOperator.Error(), op, line)
}
func evalBinary(binary *Expression) (value interface{}, err error) {
opType := binary.Operand.Type
// Recursivly evaluates left and right expressions
left, err := EvaluateExpression(binary.Left)
if err != nil {
return nil, err
}
right, err := EvaluateExpression(binary.Right)
if err != nil {
return nil, err
}
// Operations if both are number types
if isNumber(right) && isNumber(left) {
vl, vr := left.(float64), right.(float64)
switch opType {
case lexer.PLUS:
return vl + vr, err
case lexer.MINUS:
return vl - vr, err
case lexer.STAR:
return vl * vr, err
case lexer.HAT:
return math.Pow(vl, vr), err
case lexer.GREATER:
return vl > vr, err
case lexer.LESS:
return vl < vr, err
case lexer.LESS_EQUAL:
return vl <= vr, err
case lexer.GREATER_EQUAL:
return vl >= vr, err
case lexer.MODULO:
return float64(int(vl) % int(vr)), err
case lexer.SLASH:
if vr == 0 {
return nil, util.FormatError(ErrDivideByZero, binary.Line)
}
return vl / vr, err
}
}
// Types do not need to match for comparisons
switch opType {
case lexer.EQUAL_EQUAL:
return equal(left, right), err
case lexer.NOT_EQUAL:
return !equal(left, right), err
case lexer.AND:
return isTruthy(left) && isTruthy(right), err
case lexer.OR:
return isTruthy(left) || isTruthy(right), err
}
// Support string addition
if util.GetType(left) == "string" && util.GetType(right) == "string" && opType == lexer.PLUS {
return strings.Join([]string{left.(string), right.(string)}, ""), err
}
// Binary 'in' operator for arrays
if util.GetType(right) == "array" && opType == lexer.IN {
arr, _ := right.(*env.Array)
for _, v := range arr.Values {
if equal(v, left) {
return true, err
}
}
return false, err
}
// If non of the previous checks worked the expression is invalid
typeLeft, typeRight := util.GetType(left), util.GetType(right)
op, line := binary.Operand.Lexeme, binary.Line
return nil, fmt.Errorf(ErrInvalidOperatorTypes.Error(), op, typeLeft, typeRight, line)
}
func evalCall(call *Expression) (value interface{}, err error) {
callee, err := EvaluateExpression(call.Left)
if err != nil {
return value, err
}
// Function should be of type env.Callable
if f, ok := callee.(*env.Callable); ok {
argToken := call.Inner.Inner
args := []interface{}{}
// Single argument
if argToken.Type != Args && argToken.Type != EmptyExpression {
arg, err := EvaluateExpression(call.Inner)
if err != nil {
return value, err
}
args = append(args, arg)
}
// Argument list
if argToken.Type == Args {
for _, arg := range argToken.Exprs {
val, err := EvaluateExpression(&arg)
if err != nil {
return value, err
}
args = append(args, val)
}
}
// -1 is set from /lib and should be ignored as it is handled there
if len(args) != f.NumArgs && f.NumArgs != -1 {
return value, fmt.Errorf(ErrIncorrectArgs.Error(), f.Name, f.NumArgs, len(args), call.Line)
}
// Errors from lib need line format
value, err = f.Call(args...)
return value, util.FormatError(err, call.Line)
}
return value, fmt.Errorf(ErrNotFunction.Error(), util.GetType(callee), call.Line)
}
func evalGetter(getter *Expression) (value interface{}, err error) {
line := getter.Line
name := getter.Right.Name
// No name before dot raises error here. No name after dot raises error in lexer.
if getter.Left.Type == EmptyExpression {
return value, fmt.Errorf(ErrInvalidExpression.Error(), line)
}
// Recursively get parent expression, must be object
parent, err := EvaluateExpression(getter.Left)
if err != nil {
return value, err
}
// Only objects allow getter expressions
if obj, ok := parent.(*env.Object); ok {
value, err = obj.Get(name)
if err != nil {
return value, fmt.Errorf(err.Error(), obj.Name, name, line)
}
return value, err
}
return value, fmt.Errorf(ErrNotObject.Error(), util.GetType(parent), line)
}
func evalArray(array *Expression) (value interface{}, err error) {
inner := array.Inner
if inner.Type == EmptyExpression {
return &env.Array{}, err
}
values := []interface{}{}
// Single argument
if inner.Type != Args && inner.Type != EmptyExpression {
v, err := EvaluateExpression(inner)
if err != nil {
return value, err
}
values = append(values, v)
}
// Multiple arguments
if inner.Type == Args {
for _, expr := range inner.Exprs {
v, err := EvaluateExpression(&expr)
if err != nil {
return value, err
}
values = append(values, v)
}
}
return &env.Array{Values: values, Length: len(values)}, err
}
func evalIndex(array *Expression) (value interface{}, err error) {
line := array.Line
arr, err := EvaluateExpression(array.Left)
if err != nil {
return value, err
}
index, err := EvaluateExpression(array.Right)
if err != nil {
return value, err
}
// Get index as integer. If not return error
indexInt, ok := util.IsInt(index)
if !ok {
return value, fmt.Errorf(ErrNotInteger.Error(), line)
}
if a, ok := arr.(*env.Array); ok {
// Env handles getting index and errors for out of range etc
value, err = a.Get(indexInt)
if err != nil {
return value, fmt.Errorf(err.Error(), line)
}
return value, err
}
// Get string index
if s, ok := arr.(string); ok {
if indexInt > len(s) {
return value, env.ErrIndexOutOfRange
}
return string(s[indexInt]), err
}
// arr is not array (or string)
return value, fmt.Errorf(env.ErrNotArray.Error(), util.GetType(arr), line)
}
func isTruthy(value interface{}) bool {
return value != false && value != nil
}
func isNumber(value interface{}) bool {
return util.GetType(value) == "number"
} | expr/evaluate.go | 0.736495 | 0.45538 | evaluate.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.