code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package speculative
import (
"bufio"
"fmt"
"os"
"reflect"
"strings"
"log"
sp "github.com/sensssz/spinner"
)
func min(num1 int, num2 int) int {
if num1 <= num2 {
return num1
}
return num2
}
func max(num1 int, num2 int) int {
if num1 >= num2 {
return num1
}
return num2
}
func nonNegative(num int) int {
if num > 0 {
return num
}
return 0
}
// Prediction represents a prediction on how *all* the parameters
// of a query are calculated.
type Prediction struct {
QueryID int
ParamOps []Operation
HitCount int
IsRandom bool
}
// NewPrediction creates a new Prediction object.
func NewPrediction(queryID int, parameters []Operation) *Prediction {
prediction := Prediction{queryID, parameters, 0, false}
for _, param := range parameters {
switch param.(type) {
case RandomOperation:
prediction.IsRandom = true
break
}
}
return &prediction
}
// NewRandomPrediction creates a random prediction
func NewRandomPrediction(queryID int, numOps int) *Prediction {
ops := make([]Operation, numOps)
for i := 0; i < numOps; i++ {
ops[i] = RandomOperation{}
}
return &Prediction{queryID, ops, 0, true}
}
// Hit increases the HitCount of this prediction.
func (prediction *Prediction) Hit() {
prediction.HitCount++
}
// MatchesQuery true if the current prediction perfectly matches the given query.
func (prediction *Prediction) MatchesQuery(trx []*Query, query *Query) bool {
if prediction.QueryID != query.QueryID {
return false
}
if prediction.IsRandom {
return true
}
for i := 0; i < len(query.Arguments); i++ {
if !prediction.ParamOps[i].MatchesValue(trx, query.Arguments[i]) {
return false
}
}
return true
}
// PredictionTrees contains all the trees for prediction
type PredictionTrees struct {
trees map[int]*Node
}
// NewPredictionTrees creates a new prediciton tree.
func NewPredictionTrees() *PredictionTrees {
return &PredictionTrees{make(map[int]*Node)}
}
// Predictor does prediction using the prediction trees.
type Predictor struct {
pt *PredictionTrees
newTrx bool
currentNode *Node
currentTrx []*Query
queryParser *QueryParser
manager QueryManager
}
// PrintCurrentTree prints out the tree in a pretty format.
func (pt *Predictor) PrintCurrentTree() {
node := pt.currentNode
for node.Parent != nil {
node = node.Parent
}
fmt.Println(node.ToString())
}
// PredictNextSQL returns the most possible next query in SQL form.
func (pt *Predictor) PredictNextSQL() string {
query := pt.PredictNextQuery()
if query == nil {
return ""
}
return fillTemplate(query.QueryID, pt.manager, query.Arguments)
}
// PredictNextQuery returns the most possible next query.
func (pt *Predictor) PredictNextQuery() *Query {
if pt.currentNode == nil ||
len(pt.currentNode.Children) == 0 {
pt.currentNode = nil
return nil
}
queryFrequencies := make(map[int]int)
mostFrequentQuery := -1
for _, child := range pt.currentNode.Children {
queryID := child.Payload.(*Prediction).QueryID
queryFrequencies[queryID]++
if queryFrequencies[queryID] > queryFrequencies[mostFrequentQuery] {
mostFrequentQuery = queryID
}
}
if !strings.HasPrefix(pt.manager.GetTemplate(mostFrequentQuery), "SELECT") {
return nil
}
var maxHitChild *Node
maxHitChild = nil
for _, child := range pt.currentNode.Children {
if !child.Payload.(*Prediction).IsRandom &&
(maxHitChild == nil ||
maxHitChild.Payload.(*Prediction).QueryID != mostFrequentQuery ||
maxHitChild.Payload.(*Prediction).HitCount < child.Payload.(*Prediction).HitCount) {
maxHitChild = child
}
}
if maxHitChild == nil {
maxHitChild = pt.currentNode.Children[0]
for _, child := range pt.currentNode.Children {
if maxHitChild.Payload.(*Prediction).QueryID != mostFrequentQuery ||
maxHitChild.Payload.(*Prediction).HitCount < child.Payload.(*Prediction).HitCount {
maxHitChild = child
}
}
}
prediction := maxHitChild.Payload.(*Prediction)
if prediction.IsRandom {
return nil
}
arguments := make([]interface{}, len(prediction.ParamOps))
for i, paramOp := range prediction.ParamOps {
arguments[i] = paramOp.GetValue(pt.currentTrx)
}
return &Query{prediction.QueryID, [][]interface{}{}, arguments, true}
}
// MoveToNext query.
func (pt *Predictor) MoveToNext(query *Query) {
sql := query.GetSQL(pt.manager)
if sql == "BEGIN" || sql == "COMMIT" {
pt.currentTrx = []*Query{}
pt.currentNode = nil
pt.newTrx = true
}
if pt.currentNode == nil && pt.newTrx {
pt.newTrx = false
pt.currentTrx = append(pt.currentTrx, query)
pt.currentNode = pt.pt.GetTreeWithRoot(query.QueryID, len(query.Arguments))
return
}
if pt.currentNode == nil {
return
}
pt.currentTrx = append(pt.currentTrx, query)
children := pt.currentNode.Children
pt.currentNode = nil
for _, child := range children {
prediction := child.Payload.(*Prediction)
if !prediction.MatchesQuery(pt.currentTrx, query) {
continue
}
if pt.currentNode == nil ||
prediction.HitCount > pt.currentNode.Payload.(*Prediction).HitCount {
pt.currentNode = child
}
}
}
// PrintCurrntTrx prints the query templates of the current transaction.
func (pt *Predictor) PrintCurrntTrx() {
for _, query := range pt.currentTrx {
fmt.Printf("%d, %s\n", query.QueryID, pt.manager.GetTemplate(query.QueryID))
}
}
// EndTransaction ends the current transaction
func (pt *Predictor) EndTransaction() {
pt.currentNode = nil
pt.newTrx = true
pt.currentTrx = []*Query{}
}
// NewPredictor creates predictor using the this prediction tree
func (pt *PredictionTrees) NewPredictor(manager QueryManager) *Predictor {
return &Predictor{pt, true, nil, []*Query{}, NewQueryParser(manager), manager}
}
// GetTreeWithRoot returns the tree with the given query as root
func (pt *PredictionTrees) GetTreeWithRoot(queryID int, numOps int) *Node {
tree := pt.trees[queryID]
if tree == nil {
tree = NewNode(NewRandomPrediction(queryID, numOps), nil)
pt.trees[queryID] = tree
}
return tree
}
// ModelBuilder takes in a workload trace and generates a prediciton
// model from it.
type ModelBuilder struct {
QuerySet *QuerySet
Queries []*Query
Transactions [][]*Query
Clusters [][][]*Query
}
// NewModelBuilder creates a new ModelBuilder
func NewModelBuilder(path string) *ModelBuilder {
builder := &ModelBuilder{NewQuerySet(), []*Query{}, [][]*Query{}, [][][]*Query{}}
builder.parseQueriesFromFile(path)
builder.splitTransactions(true)
builder.clusterTransactions()
return builder
}
// NewModelBuilderFromContent creates a new ModelBuilder using the given queries
func NewModelBuilderFromContent(queries string) *ModelBuilder {
builder := &ModelBuilder{NewQuerySet(), []*Query{}, [][]*Query{}, [][][]*Query{}}
builder.parseQueries(queries)
builder.splitTransactions(true)
builder.clusterTransactions()
return builder
}
// ParseQueries parses all queries from the workload trace.
func (builder *ModelBuilder) parseQueriesFromFile(path string) {
if queryFile, err := os.Open(path); err == nil {
defer queryFile.Close()
scanner := bufio.NewScanner(queryFile)
queryParser := NewQueryParser(builder.QuerySet)
spinner := sp.NewSpinnerWithProgress(19, "Parsing query %d...", -1)
//adjust the capacity to your need (max characters in line)
const maxCapacity = 1024 * 1024 * 1024
buf := make([]byte, maxCapacity)
scanner.Buffer(buf, maxCapacity)
spinner.SetCompletionMessage("All queries parsed.")
spinner.Start()
i := 0
for scanner.Scan() {
spinner.UpdateProgress(i)
i++
line := scanner.Text()
if len(line) <= 1 {
continue
}
builder.Queries = append(builder.Queries, queryParser.ParseQuery(line))
}
spinner.Stop()
} else {
log.Fatal(err)
}
}
func (builder *ModelBuilder) parseQueries(queries string) {
queryParser := NewQueryParser(builder.QuerySet)
lines := strings.Split(queries, "\n")
for _, line := range lines {
builder.Queries = append(builder.Queries, queryParser.ParseQuery(line))
}
}
func (builder *ModelBuilder) queryIs(query *Query, sql string) bool {
return query.GetSQL(builder.QuerySet) == sql
}
func (builder *ModelBuilder) trxEnds(query *Query, startsWithBegin bool) bool {
return (startsWithBegin && builder.queryIs(query, "COMMIT")) ||
(!startsWithBegin && builder.queryIs(query, "BEGIN"))
}
// moveToNextQuery returns true if the pointers are successfully moved to the next query, and false it reaches the end of the queries.
func (builder *ModelBuilder) moveToNextQuery(queryIndex *int, startsWithBegin *bool) bool {
query := builder.Queries[*queryIndex]
if builder.queryIs(query, "COMMIT") {
(*queryIndex)++
if *queryIndex >= len(builder.Queries) {
return false
}
*startsWithBegin = builder.queryIs(builder.Queries[*queryIndex], "BEGIN")
if *startsWithBegin {
(*queryIndex)++
}
} else {
*startsWithBegin = true
(*queryIndex)++
}
return true
}
// If clusterSingle is ture, all consecutive single query transactions will be viewed as one single transaction.
func (builder *ModelBuilder) splitTransactions(clusterSingle bool) {
currentTrx := []*Query{}
startsWithBegin := builder.queryIs(builder.Queries[0], "BEGIN")
queryIndex := 0
if startsWithBegin {
queryIndex++
}
for queryIndex < len(builder.Queries) {
query := builder.Queries[queryIndex]
if builder.trxEnds(query, startsWithBegin) {
if len(currentTrx) > 0 && (startsWithBegin || clusterSingle) {
builder.Transactions = append(builder.Transactions, currentTrx)
}
currentTrx = make([]*Query, 0)
if !builder.moveToNextQuery(&queryIndex, &startsWithBegin) {
break
}
} else {
currentTrx = append(currentTrx, query)
queryIndex++
}
}
if len(currentTrx) > 0 {
builder.Transactions = append(builder.Transactions, currentTrx)
}
}
func (builder *ModelBuilder) trxToString(trx []*Query) string {
idStrings := make([]string, len(trx))
for i, query := range trx {
idStrings[i] = string(query.QueryID)
}
return strings.Join(idStrings, ",")
}
func (builder *ModelBuilder) clusterTransactions() {
clusters := make(map[string][][]*Query)
for _, trx := range builder.Transactions {
trxID := builder.trxToString(trx)
clusters[trxID] = append(clusters[trxID], trx)
}
builder.Clusters = make([][][]*Query, len(clusters))
index := 0
for _, cluster := range clusters {
builder.Clusters[index] = cluster
index++
}
}
func (builder *ModelBuilder) enumerateConstOperand(query *Query, numOpsAllQueries *[][]Operand, strOpsAllQueries *[][]Operand) {
numOps := make([]Operand, 0, len(query.Arguments))
strOps := make([]Operand, 0, len(query.Arguments))
for _, arg := range query.Arguments {
op := ConstOperand{arg}
switch arg.(type) {
case string:
strOps = append(strOps, op)
case float64:
numOps = append(numOps, op)
}
}
*numOpsAllQueries = append(*numOpsAllQueries, numOps)
*strOpsAllQueries = append(*strOpsAllQueries, strOps)
}
func (builder *ModelBuilder) enumerateResultOperand(queryIndex int, query *Query, numOps *[]Operand, strOps *[]Operand) {
if len(query.ResultSet) != 1 {
return
}
for j, cell := range query.ResultSet[0] {
op := QueryResultOperand{query.QueryID, queryIndex, 0, j}
switch cell.(type) {
case string:
*strOps = append(*strOps, op)
case float64:
*numOps = append(*numOps, op)
}
}
}
func (builder *ModelBuilder) enumerateAggregationOperand(queryIndex int, query *Query, numOps *[]Operand, aggregators []Aggregator) {
if len(query.ResultSet) == 0 {
return
}
for i, cell := range query.ResultSet[0] {
switch cell.(type) {
case float64:
break
default:
continue
}
for _, aggregator := range aggregators {
aggregation := AggregationOperand{queryIndex, aggregator, i}
*numOps = append(*numOps, aggregation)
}
}
}
func (builder *ModelBuilder) enumerateArgumentOperand(queryIndex int, query *Query, numOps *[]Operand, strOps *[]Operand) {
for i, arg := range query.Arguments {
op := QueryArgumentOperand{query.QueryID, queryIndex, i}
switch arg.(type) {
case string:
*strOps = append(*strOps, op)
case float64:
*numOps = append(*numOps, op)
}
}
}
func (builder *ModelBuilder) enumerateArgumentListOperand(queryIndex int, query *Query, numLists *[]Operand, strLists *[]Operand) {
for i, arg := range query.Arguments {
if set, ok := arg.(*UnorderedSet); ok {
if set.Size() == 0 {
continue
}
op := ArgumentListOperand{query.QueryID, queryIndex, i}
switch set.Elements()[0].(type) {
case string:
*strLists = append(*strLists, op)
case float64:
*numLists = append(*numLists, op)
}
}
}
}
func (builder *ModelBuilder) getColumnType(query *Query, columnIndex int) reflect.Kind {
var kind reflect.Kind
for _, row := range query.ResultSet {
if row[columnIndex] != nil {
kind = reflect.TypeOf(row[columnIndex]).Kind()
break
}
}
return kind
}
func (builder *ModelBuilder) enumerateColumnListOperand(queryIndex int, query *Query, numLists *[]Operand, strLists *[]Operand) {
if len(query.ResultSet) == 0 {
return
}
firstRow := query.ResultSet[0]
for i := 0; i < len(firstRow); i++ {
kind := builder.getColumnType(query, i)
op := ColumnListOperand{query.QueryID, queryIndex, i}
switch kind {
case reflect.String:
*strLists = append(*strLists, op)
case reflect.Float64:
*numLists = append(*numLists, op)
}
}
}
func (builder *ModelBuilder) enumerateAllOperands(queryIndex int, query *Query, numOpsAllQueries *[][]Operand, strOpsAllQueries *[][]Operand, numListOpsAllQueries *[][]Operand, strListOpsAllQueries *[][]Operand) {
numOps := []Operand{}
strOps := []Operand{}
numListOps := []Operand{}
strListOps := []Operand{}
builder.enumerateResultOperand(queryIndex, query, &numOps, &strOps)
builder.enumerateArgumentOperand(queryIndex, query, &numOps, &strOps)
// builder.enumerateAggregationOperand(queryIndex, query, Aggregators, &numOps)
builder.enumerateArgumentListOperand(queryIndex, query, &numListOps, &strListOps)
builder.enumerateColumnListOperand(queryIndex, query, &numListOps, &strListOps)
// For numOps and strOps, the slice for the query at queryIndex has been inserted at enumerateConstOperands
(*numOpsAllQueries)[queryIndex] = append((*numOpsAllQueries)[queryIndex], numOps...)
(*strOpsAllQueries)[queryIndex] = append((*strOpsAllQueries)[queryIndex], strOps...)
*numListOpsAllQueries = append(*numListOpsAllQueries, numListOps)
*strListOpsAllQueries = append(*strListOpsAllQueries, strListOps)
}
// Search for unary operations that matches the columnIndex-th parameter of the queryIndex-th query.
func (builder *ModelBuilder) searchForUnaryOps(transactions [][]*Query, operands [][]Operand, queryIndex int, columnIndex int) []Operation {
unaryOperations := make([]Operation, 0, len(operands))
for i := len(operands) - 1; i >= 0; i-- {
for j := 0; j < len(operands[i]); j++ {
operand := operands[i][j]
matches := true
for trxIndex := 0; trxIndex < len(transactions); trxIndex++ {
targetQuery := transactions[trxIndex][queryIndex]
if !valueEqual(operand.GetValue(transactions[trxIndex]), targetQuery.Arguments[columnIndex]) {
matches = false
break
}
}
if matches {
unaryOperations = append(unaryOperations, UnaryOperation{operand})
}
}
}
if len(unaryOperations) == 0 {
unaryOperations = append(unaryOperations, RandomOperation{})
}
return unaryOperations
}
func (builder *ModelBuilder) enumeratePredictionsFromParaOps(paraOps [][]Operation, queryID int) []*Prediction {
var numCombis int64
numCombis = 1
for _, para := range paraOps {
numCombis *= int64(len(para))
if numCombis == 0 {
break
}
}
predictions := make([]*Prediction, 0, numCombis)
if numCombis > 0 {
currentCombi := []Operation{}
builder.operationCombinations(paraOps, queryID, 0, currentCombi, &predictions)
}
return predictions
}
func (builder *ModelBuilder) operationCombinations(paraOps [][]Operation, queryID int, paraIndex int, currentCombi []Operation, allPredictions *[]*Prediction) {
if paraIndex >= len(paraOps) {
*allPredictions = append(*allPredictions, NewPrediction(queryID, currentCombi))
return
}
for i := 0; i < len(paraOps[paraIndex]); i++ {
builder.operationCombinations(paraOps, queryID, paraIndex+1, append(currentCombi, paraOps[paraIndex][i]), allPredictions)
}
}
func (builder *ModelBuilder) collapseArgOperand(parent *Node, parentLevel int, op Operand) Operand {
if parent == nil {
return op
}
targetQueryIndex := 0
targetArgIndex := 0
switch op.(type) {
case QueryArgumentOperand:
argOp := op.(QueryArgumentOperand)
targetQueryIndex = argOp.QueryIndex
targetArgIndex = argOp.ArgIndex
case ArgumentListOperand:
argOp := op.(ArgumentListOperand)
targetQueryIndex = argOp.QueryIndex
targetArgIndex = argOp.ArgIndex
}
for parentLevel > targetQueryIndex {
parent = parent.Parent
parentLevel--
if parent == nil {
return op
}
}
prediction := parent.Payload.(*Prediction)
argOperation := prediction.ParamOps[targetArgIndex]
if _, ok := argOperation.(RandomOperation); ok {
// Prediction for the target arg is random, not need to collapse.
return op
}
argOperand := argOperation.(UnaryOperation).Operand
if _, ok := argOperand.(QueryArgumentOperand); ok {
return builder.collapseArgOperand(parent, parentLevel, argOperand)
}
if _, ok := argOperand.(ArgumentListOperand); ok {
return builder.collapseArgOperand(parent, parentLevel, argOperand)
}
return argOperand
}
func (builder *ModelBuilder) collapseOperands(parent *Node, parentLevel int, operands [][]Operand) [][]Operand {
for i := len(operands) - 1; i >= 0; i-- {
ops := operands[i]
for i, op := range ops {
switch op.(type) {
case QueryArgumentOperand:
collapsedOp := builder.collapseArgOperand(parent, parentLevel, op)
ops[i] = collapsedOp
case ArgumentListOperand:
collapsedOp := builder.collapseArgOperand(parent, parentLevel, op)
ops[i] = collapsedOp
}
}
}
// Deduplicate ops
opExistsence := make(map[Operand]bool)
deduplicatedOps := make([][]Operand, len(operands))
for i := len(operands) - 1; i >= 0; i-- {
ops := operands[i]
deduplicated := make([]Operand, 0, len(ops))
for _, op := range ops {
if !opExistsence[op] {
opExistsence[op] = true
deduplicated = append(deduplicated, op)
}
}
deduplicatedOps[i] = deduplicated
}
return deduplicatedOps
}
func (builder *ModelBuilder) enumeratePredictionsForQuery(parent *Node, transactions [][]*Query, queryIndex int, numOps [][]Operand, strOps [][]Operand, numListOps [][]Operand, strListOps [][]Operand) []*Node {
query := transactions[0][queryIndex]
numOps = builder.collapseOperands(parent, queryIndex-1, numOps)
strOps = builder.collapseOperands(parent, queryIndex-1, strOps)
numListOps = builder.collapseOperands(parent, queryIndex-1, numListOps)
strListOps = builder.collapseOperands(parent, queryIndex-1, strListOps)
opsForArgs := make([][]Operation, len(query.Arguments))
for i, arg := range query.Arguments {
var candidateOps [][]Operand
switch arg.(type) {
case float64:
candidateOps = numOps
case string:
candidateOps = strOps
case *UnorderedSet:
if len(arg.(*UnorderedSet).Elements()) == 0 {
break
}
switch arg.(*UnorderedSet).Elements()[0].(type) {
case float64:
candidateOps = numListOps
case string:
candidateOps = strListOps
}
}
ops := builder.searchForUnaryOps(transactions, candidateOps, queryIndex, i)
opsForArgs[i] = append(opsForArgs[i], ops...)
}
predictions := builder.enumeratePredictionsFromParaOps(opsForArgs, query.QueryID)
if len(predictions) == 0 {
predictions = append(predictions, NewRandomPrediction(query.QueryID, len(query.Arguments)))
}
nodes := make([]*Node, len(predictions))
for i, prediction := range predictions {
nodes[i] = NewNode(prediction, parent)
}
return nodes
}
// UpdateModel updates the model using the supplied transactions.
func (builder *ModelBuilder) UpdateModel(transactions [][]*Query, pt *PredictionTrees) {
exampleTrx := transactions[0]
numTrx := min(10, len(transactions))
firstTen := transactions[:numTrx]
numOpsAllQueries := [][]Operand{}
strOpsAllQueries := [][]Operand{}
numListOpsAllQueries := [][]Operand{}
strListOpsAllQueries := [][]Operand{}
root := pt.GetTreeWithRoot(exampleTrx[0].QueryID, len(exampleTrx[0].Arguments))
currentLevel := []*Node{root}
nextLevel := []*Node{}
builder.enumerateConstOperand(exampleTrx[0], &numOpsAllQueries, &strOpsAllQueries)
builder.enumerateAllOperands(0, exampleTrx[0], &numOpsAllQueries, &strOpsAllQueries, &numListOpsAllQueries, &strListOpsAllQueries)
for index, query := range exampleTrx[1:] {
i := index + 1
builder.enumerateConstOperand(query, &numOpsAllQueries, &strOpsAllQueries)
for _, node := range currentLevel {
predictionsForThisQuery := node.FilterChildren(func(payload interface{}) bool {
if prediction, ok := payload.(*Prediction); ok {
return prediction.QueryID == query.QueryID
}
return false
})
if len(predictionsForThisQuery) == 0 {
numOpsLen := len(numOpsAllQueries)
strOpsLen := len(strOpsAllQueries)
numListOpsLen := len(numListOpsAllQueries)
strListOpsLen := len(strListOpsAllQueries)
lastN := 7
numOpsLastN := numOpsAllQueries[nonNegative(numOpsLen-lastN):numOpsLen]
strOpsLastN := strOpsAllQueries[nonNegative(strOpsLen-lastN):strOpsLen]
numListOpsLastN := numListOpsAllQueries[nonNegative(numListOpsLen-lastN):numListOpsLen]
strListOpsLastN := strListOpsAllQueries[nonNegative(strListOpsLen-lastN):strListOpsLen]
predictionsForThisQuery = builder.enumeratePredictionsForQuery(node, firstTen, i, numOpsLastN, strOpsLastN, numListOpsLastN, strListOpsLastN)
node.AddChildren(predictionsForThisQuery)
}
matchedPredictions := make([]*Node, 0, len(predictionsForThisQuery))
for _, node := range predictionsForThisQuery {
hits := false
prediction := node.Payload.(*Prediction)
for _, trx := range transactions {
if prediction.MatchesQuery(trx, trx[i]) {
hits = true
prediction.Hit()
}
}
if hits {
matchedPredictions = append(matchedPredictions, node)
}
}
if len(matchedPredictions) == 0 {
newChild := []*Node{NewNode(NewRandomPrediction(i, len(query.Arguments)), node)}
node.AddChildren(newChild)
matchedPredictions = append(matchedPredictions, newChild[0])
}
nextLevel = append(nextLevel, matchedPredictions...)
if len(nextLevel) > 10000 {
break
}
}
currentLevel = nextLevel
nextLevel = []*Node{}
builder.enumerateAllOperands(i, exampleTrx[i], &numOpsAllQueries, &strOpsAllQueries, &numListOpsAllQueries, &strListOpsAllQueries)
}
} | prediction.go | 0.682679 | 0.495606 | prediction.go | starcoder |
package plain
import (
"bytes"
"image"
"image/draw"
"image/gif"
"image/jpeg"
"image/png"
"io"
"github.com/kpacha/treemap"
)
// NewPNG returns the image of the received tree encoded as a PNG
func NewPNG(tree *treemap.Block, width, height float64) (io.WriterTo, error) {
return newEncoder(tree, width, height, pngEncode)
}
// NewJPEG returns the image of the received tree encoded as a JPEG
func NewJPEG(tree *treemap.Block, width, height float64) (io.WriterTo, error) {
return newEncoder(tree, width, height, jpegEncode)
}
// NewGIF returns the image of the received tree encoded as a GIF
func NewGIF(tree *treemap.Block, width, height float64) (io.WriterTo, error) {
return newEncoder(tree, width, height, gifEncode)
}
type encodeFunc func(io.Writer, image.Image) error
func pngEncode(w io.Writer, i image.Image) error { return png.Encode(w, i) }
func jpegEncode(w io.Writer, i image.Image) error { return jpeg.Encode(w, i, nil) }
func gifEncode(w io.Writer, i image.Image) error { return gif.Encode(w, i, nil) }
func newEncoder(block *treemap.Block, width, height float64, enc encodeFunc) (io.WriterTo, error) {
rectImage, err := Image(block, width, height)
if err != nil {
return nil, err
}
buf := new(bytes.Buffer)
if err := enc(buf, rectImage); err != nil {
return nil, err
}
return buf, nil
}
// Image returns an image of the tree using a vertincal projection of the treemap
func Image(tree *treemap.Block, width, height float64) (image.Image, error) {
dst := image.NewRGBA(bounds(width, height))
if err := drawSubBlock(tree, dst, image.ZP, treemap.Position{X: width / tree.Width, Y: height / tree.Depth}); err != nil {
return nil, err
}
return dst, nil
}
func drawSubBlock(b *treemap.Block, dst draw.Image, offset image.Point, p treemap.Position) error {
off := offset.Add(image.Pt(int(b.Position.X*p.X), int(b.Position.Y*p.Y)))
color, err := treemap.Color(b.Color[2:]).Decode()
if err != nil {
return err
}
draw.Draw(dst, bounds(b.Width*p.X, b.Depth*p.Y).Add(off), &image.Uniform{color}, image.ZP, draw.Src)
for _, c := range b.Children {
if err = drawSubBlock(c, dst, off, p); err != nil {
return err
}
}
return nil
}
func bounds(x, y float64) image.Rectangle {
return image.Rectangle{
Min: image.Point{X: int(-x / 2), Y: int(-y / 2)},
Max: image.Point{X: int(x / 2), Y: int(y / 2)},
}
} | plain/render.go | 0.848062 | 0.4016 | render.go | starcoder |
package tuple
import (
"math"
"github.com/anolson/rtc/util"
)
const (
pointType = float64(1)
vectorType = float64(0)
)
// Tuple represents a position
type Tuple struct {
X float64
Y float64
Z float64
W float64
}
// New returns a new a Tuple object
func New(x, y, z, w float64) *Tuple {
return &Tuple{
X: x,
Y: y,
Z: z,
W: w,
}
}
func (t *Tuple) isPoint() bool {
return t.W == pointType
}
func (t *Tuple) isVector() bool {
return t.W == vectorType
}
// Equal returns true if a Tuple is equal to another, otherwise false
func (t *Tuple) Equal(other *Tuple) bool {
return util.Approx(t.X, other.X) &&
util.Approx(t.Y, other.Y) &&
util.Approx(t.Z, other.Z) &&
util.Approx(t.W, other.W)
}
// Add a Tuple to another one
func Add(a, b *Tuple) *Tuple {
return &Tuple{
X: a.X + b.X,
Y: a.Y + b.Y,
Z: a.Z + b.Z,
W: a.W + b.W,
}
}
// Subtract a Tuple from another one
func Subtract(a, b *Tuple) *Tuple {
return &Tuple{
X: a.X - b.X,
Y: a.Y - b.Y,
Z: a.Z - b.Z,
W: a.W - b.W,
}
}
// Negate a Tuple
func Negate(t *Tuple) *Tuple {
return &Tuple{
X: float64(0) - t.X,
Y: float64(0) - t.Y,
Z: float64(0) - t.Z,
W: float64(0) - t.W,
}
}
// Multiply a Tuple by a value
func Multiply(t *Tuple, value float64) *Tuple {
return &Tuple{
X: t.X * value,
Y: t.Y * value,
Z: t.Z * value,
W: t.W * value,
}
}
// Divide a Tuple by a value
func Divide(t *Tuple, value float64) *Tuple {
return &Tuple{
X: t.X / value,
Y: t.Y / value,
Z: t.Z / value,
W: t.W / value,
}
}
// Magnitude calculate the length of a vector
func (t *Tuple) Magnitude() float64 {
squares := math.Pow(t.X, 2) + math.Pow(t.Y, 2) + math.Pow(t.Z, 2) + math.Pow(t.W, 2)
return math.Sqrt(squares)
}
// Normalize convert a vector to a unit vector
func (t *Tuple) Normalize() *Tuple {
magnitude := t.Magnitude()
return &Tuple{
X: t.X / magnitude,
Y: t.Y / magnitude,
Z: t.Z / magnitude,
W: t.W / magnitude,
}
}
// Dot calulates the dot product of two vectors
func Dot(a, b *Tuple) float64 {
return (a.X * b.X) + (a.Y * b.Y) + (a.Z * b.Z) + (a.W * b.W)
}
// Cross calulates the cross product of two vectors
func Cross(a, b *Tuple) *Tuple {
return Vector(
(a.Y*b.Z)-(a.Z*b.Y),
(a.Z*b.X)-(a.X*b.Z),
(a.X*b.Y)-(a.Y*b.X),
)
}
// Reflect returns the result of the in vector around the normal
func Reflect(in, normal *Tuple) *Tuple {
dot := Dot(in, normal)
return Subtract(in, Multiply(Multiply(normal, 2), dot))
} | tuple/tuple.go | 0.885186 | 0.637327 | tuple.go | starcoder |
package basic
import "strings"
// FilterMapIONumber is template to generate itself for different combination of data type.
func FilterMapIONumber() string {
return `
func TestFilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(t *testing.T) {
// Test : some logic
expectedList := []<OUTPUT_TYPE>{3, 4}
newList := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(notOne<FINPUT_TYPE><FOUTPUT_TYPE>, plusOne<FINPUT_TYPE><FOUTPUT_TYPE>, []<INPUT_TYPE>{1, 2, 3})
if newList[0] != expectedList[0] || newList[1] != expectedList[1] {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE> failed. expected=%v, actual=%v", expectedList, newList)
}
if len(FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, nil, nil)) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
if len(FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, nil, []<INPUT_TYPE>{})) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
reflect.TypeOf("Nandeshwar") // Leaving it here to make use of import reflect
}
func notOne<FINPUT_TYPE><FOUTPUT_TYPE>(num <INPUT_TYPE>) bool {
return num != 1
}
`
}
// FilterMapIOStrNumber is template to generate itself for different combination of data type.
func FilterMapIOStrNumber() string {
return `
func TestFilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(t *testing.T) {
// Test : someLogic
expectedList := []<OUTPUT_TYPE>{10}
newList := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(notOne<FINPUT_TYPE><FOUTPUT_TYPE>, someLogic<FINPUT_TYPE><FOUTPUT_TYPE>, []<INPUT_TYPE>{"one", "ten"})
if newList[0] != expectedList[0] {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE> failed. expected=%v, actual=%v", expectedList, newList)
}
if len(FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, nil, nil)) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
if len(FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, nil, []<INPUT_TYPE>{})) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
reflect.TypeOf("Nandeshwar") // Leaving it here to make use of import reflect
}
func notOne<FINPUT_TYPE><FOUTPUT_TYPE>(num <INPUT_TYPE>) bool {
return num != "one"
}
`
}
// FilterMapIONumberStr is template to generate itself for different combination of data type.
func FilterMapIONumberStr() string {
return `
func TestFilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(t *testing.T) {
// Test : someLogic
expectedList := []<OUTPUT_TYPE>{"10"}
newList := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(notOne<FINPUT_TYPE><FOUTPUT_TYPE>, someLogic<FINPUT_TYPE><FOUTPUT_TYPE>, []<INPUT_TYPE>{1, 10})
if newList[0] != expectedList[0] {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE> failed. expected=%v, actual=%v", expectedList, newList)
}
if len(FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, nil, nil)) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
if len(FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, nil, []<INPUT_TYPE>{})) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
reflect.TypeOf("Nandeshwar") // Leaving it here to make use of import reflect
}
func notOne<FINPUT_TYPE><FOUTPUT_TYPE>(num <INPUT_TYPE>) bool {
return num != 1
}
`
}
// FilterMapIONumberBool is template to generate itself for different combination of data type.
func FilterMapIONumberBool() string {
return `
func TestFilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(t *testing.T) {
// Test : someLogic
expectedList := []<OUTPUT_TYPE>{true, false}
newList := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(notOne<FINPUT_TYPE><FOUTPUT_TYPE>, someLogic<FINPUT_TYPE><FOUTPUT_TYPE>, []<INPUT_TYPE>{1, 10, 0})
if newList[0] != expectedList[0] || newList[1] != expectedList[1] {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE> failed. expected=%v, actual=%v", expectedList, newList)
}
if len(FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, nil, nil)) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
if len(FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, nil, []<INPUT_TYPE>{})) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
reflect.TypeOf("Nandeshwar") // Leaving it here to make use of import reflect
}
func notOne<FINPUT_TYPE><FOUTPUT_TYPE>(num <INPUT_TYPE>) bool {
return num != 1
}
`
}
// FilterMapIOStrBool is template to generate itself for different combination of data type.
func FilterMapIOStrBool() string {
return `
func TestFilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(t *testing.T) {
// Test : someLogic
expectedList := []<OUTPUT_TYPE>{true, false}
newList := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(notOne<FINPUT_TYPE><FOUTPUT_TYPE>, someLogic<FINPUT_TYPE><FOUTPUT_TYPE>, []<INPUT_TYPE>{"1", "10", "0"})
if newList[0] != expectedList[0] || newList[1] != expectedList[1] {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE> failed. expected=%v, actual=%v", expectedList, newList)
}
if len(FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, nil, nil)) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
if len(FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, nil, []<INPUT_TYPE>{})) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
reflect.TypeOf("Nandeshwar") // Leaving it here to make use of import reflect
}
func notOne<FINPUT_TYPE><FOUTPUT_TYPE>(num <INPUT_TYPE>) bool {
return num != "1"
}
`
}
// FilterMapIOBoolNumber is template to generate itself for different combination of data type.
func FilterMapIOBoolNumber() string {
return `
func TestFilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(t *testing.T) {
// Test : someLogic
expectedList := []<OUTPUT_TYPE>{10, 10}
newList := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(notOne<FINPUT_TYPE><FOUTPUT_TYPE>, someLogic<FINPUT_TYPE><FOUTPUT_TYPE>, []<INPUT_TYPE>{true, true, false})
if newList[0] != expectedList[0] || newList[1] != expectedList[1] {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE> failed. expected=%v, actual=%v", expectedList, newList)
}
if len(FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, nil, nil)) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
if len(FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, nil, []<INPUT_TYPE>{})) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
reflect.TypeOf("Nandeshwar") // Leaving it here to make use of import reflect
}
func notOne<FINPUT_TYPE><FOUTPUT_TYPE>(num <INPUT_TYPE>) bool {
return num == true
}
`
}
// FilterMapIOBoolStr is template to generate itself for different combination of data type.
func FilterMapIOBoolStr() string {
return `
func TestFilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(t *testing.T) {
// Test : someLogic
expectedList := []<OUTPUT_TYPE>{"10", "10"}
newList := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(notOne<FINPUT_TYPE><FOUTPUT_TYPE>, someLogic<FINPUT_TYPE><FOUTPUT_TYPE>, []<INPUT_TYPE>{true, true, false})
if newList[0] != expectedList[0] || newList[1] != expectedList[1] {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE> failed. expected=%v, actual=%v", expectedList, newList)
}
if len(FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, nil, nil)) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
if len(FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>(nil, nil, []<INPUT_TYPE>{})) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE> failed")
}
reflect.TypeOf("Nandeshwar") // Leaving it here to make use of import reflect
}
func notOne<FINPUT_TYPE><FOUTPUT_TYPE>(num <INPUT_TYPE>) bool {
return num == true
}
`
}
//**********************************Err***********************************
// FilterMapIONumberErrTest is template to generate itself for different combination of data type.
func FilterMapIONumberErrTest() string {
return `
func TestFilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(t *testing.T) {
// Test : some logic
var v1 <INPUT_TYPE> = 1
var v2 <INPUT_TYPE> = 2
var v3 <INPUT_TYPE> = 3
var v4 <INPUT_TYPE> = 4
var v5 <INPUT_TYPE> = 5
var vo5 <OUTPUT_TYPE> = 5
var vo6 <OUTPUT_TYPE> = 6
expectedList := []<OUTPUT_TYPE>{vo5, vo6}
newList, _ := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(notOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, plusOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{v1, v4, v5})
if newList[0] != expectedList[0] || newList[1] != expectedList[1] {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed. expected=%v, actual=%v", expectedList, newList)
}
r, _ := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, nil, nil)
if len(r) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
r, _ = FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, nil, []<INPUT_TYPE>{})
if len(r) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>PtrErr failed")
}
_, err := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(notOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, plusOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{v2, v3})
if err == nil {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(notOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, plusOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{v3})
if err == nil {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
}
func notOne<FINPUT_TYPE><FOUTPUT_TYPE>Err(num <INPUT_TYPE>) (bool, error) {
if num == 2 {
return false, errors.New(" 2 is not valid number for this test")
}
return num != 1, nil
}
func plusOne<FINPUT_TYPE><FOUTPUT_TYPE>Err(num <INPUT_TYPE>) (<OUTPUT_TYPE>, error) {
if num == 3 {
return 0, errors.New("3 is not valid number for this test")
}
c := <OUTPUT_TYPE>(num + 1)
return c, nil
}
`
}
// FilterMapIOStrNumberErrTest is template to generate itself for different combination of data type.
func FilterMapIOStrNumberErrTest() string {
return `
func TestFilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(t *testing.T) {
// Test : someLogic
var vo10 <OUTPUT_TYPE> = 10
var vOne <INPUT_TYPE> = "one"
var vTwo <INPUT_TYPE> = "two"
var vThree <INPUT_TYPE> = "three"
var vTen <INPUT_TYPE> = "ten"
expectedList := []<OUTPUT_TYPE>{vo10}
newList, _ := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(notOne<FINPUT_TYPE><FOUTPUT_TYPE>StrErr, someLogic<FINPUT_TYPE><FOUTPUT_TYPE>StrErr, []<INPUT_TYPE>{vOne, vTen})
if newList[0] != expectedList[0] {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed. expected=%v, actual=%v", expectedList, newList)
}
r, _ := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, nil, nil)
if len(r) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
r, _ = FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, nil, []<INPUT_TYPE>{})
if len(r) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(notOne<FINPUT_TYPE><FOUTPUT_TYPE>StrErr, someLogic<FINPUT_TYPE><FOUTPUT_TYPE>StrErr, []<INPUT_TYPE>{vTwo, vThree})
if err == nil {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(notOne<FINPUT_TYPE><FOUTPUT_TYPE>StrErr, someLogic<FINPUT_TYPE><FOUTPUT_TYPE>StrErr, []<INPUT_TYPE>{vThree})
if err == nil {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
}
func notOne<FINPUT_TYPE><FOUTPUT_TYPE>StrErr(num <INPUT_TYPE>) (bool, error) {
if num == "two" {
return false, errors.New("Two is not valid for this test")
}
return num != "one", nil
}
func someLogic<FINPUT_TYPE><FOUTPUT_TYPE>StrErr(num string) (<OUTPUT_TYPE>, error) {
var r <OUTPUT_TYPE> = <OUTPUT_TYPE>(0)
if num == "three" {
return 0, errors.New("three is not valid value for this test")
}
if num == "ten" {
r = <OUTPUT_TYPE>(10)
return r, nil
}
return r, nil
}
`
}
// FilterMapIONumberStrErrTest is template to generate itself for different combination of data type.
func FilterMapIONumberStrErrTest() string {
return `
func TestFilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(t *testing.T) {
// Test : someLogic
var ov10 <OUTPUT_TYPE> = "10"
var iv1 <INPUT_TYPE> = 1
var iv2 <INPUT_TYPE> = 2
var iv3 <INPUT_TYPE> = 3
var iv10 <INPUT_TYPE> = 10
expectedList := []<OUTPUT_TYPE>{ov10}
newList, _ := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(notOne<FINPUT_TYPE><FOUTPUT_TYPE>NumErr, someLogic<FINPUT_TYPE><FOUTPUT_TYPE>NumErr, []<INPUT_TYPE>{iv1, iv10})
if newList[0] != expectedList[0] {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed. expected=%v, actual=%v", expectedList, newList)
}
r, _ := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, nil, nil)
if len(r) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
r, _ = FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, nil, []<INPUT_TYPE>{})
if len(r) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(notOne<FINPUT_TYPE><FOUTPUT_TYPE>NumErr, someLogic<FINPUT_TYPE><FOUTPUT_TYPE>NumErr, []<INPUT_TYPE>{iv1, iv2, iv10})
if err == nil {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(notOne<FINPUT_TYPE><FOUTPUT_TYPE>NumErr, someLogic<FINPUT_TYPE><FOUTPUT_TYPE>NumErr, []<INPUT_TYPE>{iv1, iv3, iv10})
if err == nil {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
}
func notOne<FINPUT_TYPE><FOUTPUT_TYPE>NumErr(num <INPUT_TYPE>) (bool, error) {
if num == 2 {
return false, errors.New("2 is not valid number for this test")
}
return num != 1, nil
}
func someLogic<FINPUT_TYPE><FOUTPUT_TYPE>NumErr(num <INPUT_TYPE>) (<OUTPUT_TYPE>, error) {
var r <OUTPUT_TYPE> = <OUTPUT_TYPE>(0)
if num == 3 {
return "0", errors.New("3 is not valid number for this test")
}
if num == 10 {
r = "10"
return r, nil
}
return r, nil
}
`
}
// FilterMapIONumberBoolErrTest is template to generate itself for different combination of data type.
func FilterMapIONumberBoolErrTest() string {
return `
func TestFilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(t *testing.T) {
// Test : someLogic
var vto <OUTPUT_TYPE> = true
var vfo <OUTPUT_TYPE> = false
var vi1 <INPUT_TYPE> = 1
var vi2 <INPUT_TYPE> = 2
var vi3 <INPUT_TYPE> = 3
var vi10 <INPUT_TYPE> = 10
var vi0 <INPUT_TYPE>
expectedList := []<OUTPUT_TYPE>{vto, vfo}
newList, _ := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(notOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{vi1, vi10, vi0})
if newList[0] != expectedList[0] || newList[1] != expectedList[1] {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed. expected=%v, actual=%v", expectedList, newList)
}
r, _ := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, nil, nil)
if len(r) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
r, _ = FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, nil, []<INPUT_TYPE>{})
if len(r) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>PtrErr failed")
}
_, err := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(notOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{vi2, vi10, vi0})
if err == nil {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(notOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{vi3, vi10, vi0})
if err == nil {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
}
func notOne<FINPUT_TYPE><FOUTPUT_TYPE>Err(num <INPUT_TYPE>) (bool, error) {
if num == 2 {
return false, errors.New("2 is not valid number for this test")
}
return num != 1, nil
}
func someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err(num <INPUT_TYPE>) (bool, error) {
if num == 3 {
return false, errors.New("3 is not valid number for this test")
}
r := num > 0
return r, nil
}
`
}
// FilterMapIOStrBoolErrTest is template to generate itself for different combination of data type.
func FilterMapIOStrBoolErrTest() string {
return `
func TestFilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(t *testing.T) {
// Test : someLogic
var vto <OUTPUT_TYPE> = true
var vfo <OUTPUT_TYPE> = false
var vi1 <INPUT_TYPE> = "1"
var vi2 <INPUT_TYPE> = "2"
var vi3 <INPUT_TYPE> = "3"
var vi10 <INPUT_TYPE> = "10"
var vi0 <INPUT_TYPE> = "0"
expectedList := []<OUTPUT_TYPE>{vto, vfo}
newList, _ := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(notOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{vi1, vi10, vi0})
if newList[0] != expectedList[0] || newList[1] != expectedList[1] {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed. expected=%v, actual=%v", expectedList, newList)
}
r, _ := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, nil, nil)
if len(r) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
r, _ = FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, nil, []<INPUT_TYPE>{})
if len(r) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(notOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{vi1, vi10, vi2})
if err == nil {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(notOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{vi1, vi10, vi3})
if err == nil {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
}
func notOne<FINPUT_TYPE><FOUTPUT_TYPE>Err(num <INPUT_TYPE>) (bool, error) {
if num == "2" {
return false, errors.New("2 is not valid value for this test")
}
return num != "1", nil
}
func someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err(num <INPUT_TYPE>) (bool, error) {
if num == "3" {
return false, errors.New("3 is not valid value for this test")
}
var t bool = true
var f bool = false
if num == "10" {
return t, nil
}
return f, nil
}
`
}
// FilterMapIOBoolNumberErrTest is template to generate itself for different combination of data type.
func FilterMapIOBoolNumberErrTest() string {
return `
func TestFilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(t *testing.T) {
// Test : someLogic
var vo10 <OUTPUT_TYPE> = 10
var vit <INPUT_TYPE> = true
var vif <INPUT_TYPE> = false
expectedList := []<OUTPUT_TYPE>{vo10, vo10}
newList, _ := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(notOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{vit, vit})
if newList[0] != expectedList[0] || newList[1] != expectedList[1] {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed. expected=%v, actual=%v", expectedList, newList)
}
r, _ := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, nil, nil)
if len(r) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
r, _ = FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, nil, []<INPUT_TYPE>{})
if len(r) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(notOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{vit, vit, vif})
if err == nil {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(notOne<FINPUT_TYPE><FOUTPUT_TYPE>Err2, someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{vit, vit, vif})
if err == nil {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
}
func notOne<FINPUT_TYPE><FOUTPUT_TYPE>Err(num <INPUT_TYPE>) (bool, error) {
if num == false {
return false, errors.New("nil is error in this test")
}
return num == true, nil
}
func notOne<FINPUT_TYPE><FOUTPUT_TYPE>Err2(num <INPUT_TYPE>) (bool, error) {
if num == false {
return true, nil
}
return true, nil
}
func someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err(num bool) (<OUTPUT_TYPE>, error) {
if num == false {
return 0, errors.New("false is error for this test")
}
var v10 <OUTPUT_TYPE> = 10
var v0 <OUTPUT_TYPE>
if num == true {
return v10, nil
}
return v0, nil
}
`
}
// FilterMapIOBoolStrErrTest is template to generate itself for different combination of data type.
func FilterMapIOBoolStrErrTest() string {
return `
func TestFilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(t *testing.T) {
// Test : someLogic
var vo10 <OUTPUT_TYPE> = "10"
var vit <INPUT_TYPE> = true
var vif <INPUT_TYPE> = false
expectedList := []<OUTPUT_TYPE>{vo10, vo10}
newList, _ := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(notOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{vit, vit})
if newList[0] != expectedList[0] || newList[1] != expectedList[1] {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed. expected=%v, actual=%v", expectedList, newList)
}
r, _ := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, nil, nil)
if len(r) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
r, _ = FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(nil, nil, []<INPUT_TYPE>{})
if len(r) > 0 {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err := FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(notOne<FINPUT_TYPE><FOUTPUT_TYPE>Err, someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{vit, vit, vif})
if err == nil {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
_, err = FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err(notOne<FINPUT_TYPE><FOUTPUT_TYPE>Err2, someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err, []<INPUT_TYPE>{vit, vit, vif})
if err == nil {
t.Errorf("FilterMap<FINPUT_TYPE><FOUTPUT_TYPE>Err failed")
}
}
func notOne<FINPUT_TYPE><FOUTPUT_TYPE>Err(num <INPUT_TYPE>) (bool, error) {
if num == false {
return false, errors.New("nil is error in this test")
}
return num == true, nil
}
func notOne<FINPUT_TYPE><FOUTPUT_TYPE>Err2(num <INPUT_TYPE>) (bool, error) {
if num == false {
return false, errors.New("nil is error in this test")
}
return true, nil
}
func someLogic<FINPUT_TYPE><FOUTPUT_TYPE>Err(num bool) (<OUTPUT_TYPE>, error) {
if num == false {
return "", errors.New("false is error in this test")
}
var v10 <OUTPUT_TYPE> = "10"
var v0 <OUTPUT_TYPE> = "0"
if num == true {
return v10, nil
}
return v0, nil
}
`
}
// ReplaceActivityFilterMapIOErr replaces ...
func ReplaceActivityFilterMapIOErr(code string) string {
s1 := `_ "errors"
"reflect"
"testing"
)
func TestFilterMapIntInt64Err(t *testing.T) {`
s2 := `"errors"
"testing"
)
func TestFilterMapIntInt64Err(t *testing.T) {`
code = strings.Replace(code, s1, s2, -1)
return code
} | internal/template/basic/filtermapiotest.go | 0.575707 | 0.415136 | filtermapiotest.go | starcoder |
package leetcode
/**
* @title 设计循环队列
*
* 设计你的循环队列实现。 循环队列是一种线性数据结构,其操作表现基于 FIFO(先进先出)原则并且队尾被连接在队首之后以形成一个循环。它也被称为“环形缓冲器”。
* 循环队列的一个好处是我们可以利用这个队列之前用过的空间。在一个普通队列里,一旦一个队列满了,我们就不能插入下一个元素,即使在队列前面仍有空间。
* 但是使用循环队列,我们能使用这些空间去存储新的值。
*
* 你的实现应该支持如下操作:
* MyCircularQueue(k): 构造器,设置队列长度为 k 。
* Front: 从队首获取元素。如果队列为空,返回 -1 。
* Rear: 获取队尾元素。如果队列为空,返回 -1 。
* enQueue(value): 向循环队列插入一个元素。如果成功插入则返回真。
* deQueue(): 从循环队列中删除一个元素。如果成功删除则返回真。
* isEmpty(): 检查循环队列是否为空。
* isFull(): 检查循环队列是否已满。
*
* 示例:
* MyCircularQueue circularQueue = new MycircularQueue(3); // 设置长度为3
* circularQueue.enQueue(1); // 返回true
* circularQueue.enQueue(2); // 返回true
* circularQueue.enQueue(3); // 返回true
* circularQueue.enQueue(4); // 返回false,队列已满
* circularQueue.Rear(); // 返回3
* circularQueue.isFull(); // 返回true
* circularQueue.deQueue(); // 返回true
* circularQueue.enQueue(4); // 返回true
* circularQueue.Rear(); // 返回4
*
* 提示:
* 所有的值都在 1 至 1000 的范围内;
* 操作数将在 1 至 1000 的范围内;
* 请不要使用内置的队列库。
*
* @see https://leetcode-cn.com/problems/design-circular-queue/description/
* @difficulty Easy
*/
type MyCircularQueue struct {
Data []int
Size int
Head int
Tail int
}
/** Initialize your data structure here. Set the size of the queue to be k. */
func Constructor622(k int) MyCircularQueue {
return MyCircularQueue{make([]int, k), k, -1, -1}
}
/** Insert an element into the circular queue. Return true if the operation is successful. */
func (this *MyCircularQueue) EnQueue(value int) bool {
if this.IsFull() {
return false
}
if this.Head == -1 {
this.Head = 0
}
if this.Tail == this.Size-1 {
this.Tail = 0
} else {
this.Tail++
}
this.Data[this.Tail] = value
return true
}
/** Delete an element from the circular queue. Return true if the operation is successful. */
func (this *MyCircularQueue) DeQueue() bool {
if this.IsEmpty() {
return false
}
this.Data[this.Head] = 0
if this.Head == this.Tail { //只有一个元素,队列置空
this.Head = -1
this.Tail = -1
} else if this.Head == this.Size-1 { //head指向末尾,重头开始
this.Head = 0
} else {
this.Head++
}
return true
}
/** Get the front item from the queue. */
func (this *MyCircularQueue) Front() int {
if this.Head > -1 {
return this.Data[this.Head]
}
return -1
}
/** Get the last item from the queue. */
func (this *MyCircularQueue) Rear() int {
if this.Tail > -1 {
return this.Data[this.Tail]
}
return -1
}
/** Checks whether the circular queue is empty or not. */
func (this *MyCircularQueue) IsEmpty() bool {
if this.Tail == -1 {
return true
} else {
return false
}
}
/** Checks whether the circular queue is full or not. */
func (this *MyCircularQueue) IsFull() bool {
if this.Tail-this.Head == this.Size-1 || this.Head-this.Tail == 1 {
return true
} else {
return false
}
}
/**
* Your MyCircularQueue object will be instantiated and called as such:
* obj := Constructor(k);
* param_1 := obj.EnQueue(value);
* param_2 := obj.DeQueue();
* param_3 := obj.Front();
* param_4 := obj.Rear();
* param_5 := obj.IsEmpty();
* param_6 := obj.IsFull();
*/ | src/0622.design-circular-queue.go | 0.620966 | 0.681952 | 0622.design-circular-queue.go | starcoder |
package world
import "math"
// Globe is centered at (0,0) with radius 1.0
const InRadian = (math.Pi / 180.0)
const InDegree = (180.0 / math.Pi)
// ------------------------------------------------------------------------
// Longitude/Latitude => X/Y/Z
// ------------------------------------------------------------------------
func GetXYZFromLonLat(lon_in_degree float32, lat_in_degree float32, radius float32) [3]float32 {
// Get XYZ world coordinates from longitude(λ)/latitude(φ) in degree
return GetXYZFromLL(lon_in_degree*InRadian, lat_in_degree*InRadian, radius)
}
func GetXYZFromLL(lon_in_radian float32, lat_in_radian float32, radius float32) [3]float32 {
// Get XYZ world coordinates from longitude(λ)/latitude(φ) in radian
lon := float64(lon_in_radian) // λ(lambda; longitude)
lat := float64(lat_in_radian) // φ(phi; latitude )
return [3]float32{
radius * float32(math.Cos(lon)*math.Cos(lat)), // dist * cosλ * cosφ;
radius * float32(math.Sin(lon)*math.Cos(lat)), // dist * sinλ * cosφ;
radius * float32(math.Sin(lat))} // dist * sinφ;
}
// ------------------------------------------------------------------------
// X/Y/Z => Longitude/Latitude/R
// ------------------------------------------------------------------------
// TODO : Compare the implementation of 'getLLFromXYZ()' with the below:
// Ref: https://en.wikipedia.org/wiki/Vector_fields_in_cylindrical_and_spherical_coordinates
// radius = Math.sqrt(x*x + y*y + z*z);
// λ = arctan( y / x ) 0 <= λ <= 2π
// (π - φ) = arccos( z / radius ) 0 <= (π - φ) <= π
func GetLLFromXYZ(x float32, y float32, z float32) [3]float32 {
// Get longitude(λ)/latitude(φ) in radian + radius from XYZ world coordinates
radius := math.Sqrt(float64(x*x + y*y + z*z))
if radius == 0 {
return [3]float32{0, 0, 0}
}
lon := math.Asin(float64(z) / radius)
lat := math.Atan2(float64(y), float64(x))
// let cosφ = Math.cos(φ);
// let cosλ = (x / radius) / cosφ;
// let sinλ = (y / radius) / cosφ;
// let λ = Math.asin( sinλ ); // -PI/2 ~ +PI/2
// λ = (cosλ >= 0 ? λ : (sinλ > 0 ? (+Math.PI - λ) : (-Math.PI - λ)));
return [3]float32{float32(lon), float32(lat), float32(radius)}
}
func GetLonLatFromXYZ(x float32, y float32, z float32) [3]float32 {
// Get longitude(λ)/latitude(φ) in degree + radius from XYZ world coordinates
llr := GetLLFromXYZ(x, y, z)
return [3]float32{llr[0] * InDegree, llr[1] * InDegree, llr[2]}
} | world/geography.go | 0.641085 | 0.580501 | geography.go | starcoder |
package iso20022
// Provides the details of each individual overnight index swap transaction.
type OvernightIndexSwapTransaction3 struct {
// Defines the status of the reported transaction, that is details on whether the transaction is a new transaction, an amendment of a previously reported transaction, a cancellation of a previously reported transaction or a correction to a previously reported and rejected transaction.
ReportedTransactionStatus *TransactionOperationType1Code `xml:"RptdTxSts"`
// Unique and unambiguous legal entity identification of the branch of the reporting agent in which the transaction has been booked.
//
// Usage: This field must only be provided if the transaction has been conducted and booked by a branch of the reporting agent and only if this branch has its own LEI that the reporting agent can clearly identify.
// Where the transaction has been booked by the head office or the reporting agent cannot be identified by a unique branch-specific LEI, the reporting agent must provide the LEI of the head office.
BranchIdentification *LEIIdentifier `xml:"BrnchId,omitempty"`
// Unique transaction identifier will be created at the time a transaction is first executed, shared with all registered entities and counterparties involved in the transaction, and used to track that particular transaction during its lifetime.
UniqueTransactionIdentifier *Max105Text `xml:"UnqTxIdr,omitempty"`
// Internal unique transaction identifier used by the reporting agent for each transaction.
ProprietaryTransactionIdentification *Max105Text `xml:"PrtryTxId"`
// Internal unique proprietary transaction identifier as assigned by the counterparty of the reporting agent for each transaction.
CounterpartyProprietaryTransactionIdentification *Max105Text `xml:"CtrPtyPrtryTxId,omitempty"`
// Identification of the counterparty of the reporting agent for the reported transaction.
CounterpartyIdentification *CounterpartyIdentification2Choice `xml:"CtrPtyId"`
// Date and time on which the parties entered into the reported transaction.
//
// Usage: when time is available, it must be reported.
//
// It is to be reported with only the date when the time of the transaction is not available.
//
// The reported time is the execution time when available or otherwise the time at which the transaction entered the trading system of the reporting agent.
TradeDate *DateAndDateTimeChoice `xml:"TradDt"`
// Represents the date as of which the overnight rate of the floating leg is computed.
StartDate *ISODate `xml:"StartDt"`
// Last date of the term over which the compounded overnight rate is calculated.
MaturityDate *ISODate `xml:"MtrtyDt"`
// Fixed rate used for the calculation of the overnight index swap pay out.
FixedInterestRate *Rate2 `xml:"FxdIntrstRate"`
// Defines whether the fixed interest rate is paid or received by the reporting agent.
TransactionType *OvernightIndexSwapType1Code `xml:"TxTp"`
// Notional amount of the overnight index swap.
TransactionNominalAmount *ActiveCurrencyAndAmount `xml:"TxNmnlAmt"`
// Additional information that can not be captured in the structured fields and/or any other specific block.
SupplementaryData []*SupplementaryData1 `xml:"SplmtryData,omitempty"`
}
func (o *OvernightIndexSwapTransaction3) SetReportedTransactionStatus(value string) {
o.ReportedTransactionStatus = (*TransactionOperationType1Code)(&value)
}
func (o *OvernightIndexSwapTransaction3) SetBranchIdentification(value string) {
o.BranchIdentification = (*LEIIdentifier)(&value)
}
func (o *OvernightIndexSwapTransaction3) SetUniqueTransactionIdentifier(value string) {
o.UniqueTransactionIdentifier = (*Max105Text)(&value)
}
func (o *OvernightIndexSwapTransaction3) SetProprietaryTransactionIdentification(value string) {
o.ProprietaryTransactionIdentification = (*Max105Text)(&value)
}
func (o *OvernightIndexSwapTransaction3) SetCounterpartyProprietaryTransactionIdentification(value string) {
o.CounterpartyProprietaryTransactionIdentification = (*Max105Text)(&value)
}
func (o *OvernightIndexSwapTransaction3) AddCounterpartyIdentification() *CounterpartyIdentification2Choice {
o.CounterpartyIdentification = new(CounterpartyIdentification2Choice)
return o.CounterpartyIdentification
}
func (o *OvernightIndexSwapTransaction3) AddTradeDate() *DateAndDateTimeChoice {
o.TradeDate = new(DateAndDateTimeChoice)
return o.TradeDate
}
func (o *OvernightIndexSwapTransaction3) SetStartDate(value string) {
o.StartDate = (*ISODate)(&value)
}
func (o *OvernightIndexSwapTransaction3) SetMaturityDate(value string) {
o.MaturityDate = (*ISODate)(&value)
}
func (o *OvernightIndexSwapTransaction3) AddFixedInterestRate() *Rate2 {
o.FixedInterestRate = new(Rate2)
return o.FixedInterestRate
}
func (o *OvernightIndexSwapTransaction3) SetTransactionType(value string) {
o.TransactionType = (*OvernightIndexSwapType1Code)(&value)
}
func (o *OvernightIndexSwapTransaction3) SetTransactionNominalAmount(value, currency string) {
o.TransactionNominalAmount = NewActiveCurrencyAndAmount(value, currency)
}
func (o *OvernightIndexSwapTransaction3) AddSupplementaryData() *SupplementaryData1 {
newValue := new (SupplementaryData1)
o.SupplementaryData = append(o.SupplementaryData, newValue)
return newValue
} | OvernightIndexSwapTransaction3.go | 0.807688 | 0.612078 | OvernightIndexSwapTransaction3.go | starcoder |
package ast
// exp ::= `nil` | `false` | `true` | Numeral | LiteralString | `...` | functiondef |
// prefixexp | tableconstructor | exp binop exp | unop exp
// Exp is expression interface
type Exp interface{}
// NilExp is `nil` expression
type NilExp struct {
Line int
}
// TrueExp is `true` expression
type TrueExp struct {
Line int
}
// FalseExp is `false` expression
type FalseExp struct {
Line int
}
// IntegerExp is integer expression
type IntegerExp struct {
Line int
Val int64
}
// FloatExp is floating point expression
type FloatExp struct {
Line int
Val float64
}
// StringExp is string expression
type StringExp struct {
Line int
Str string
}
// VarargExp is `...` expression
type VarargExp struct {
Line int
}
// TableConstructionExp is table construction expression
// tableconstructor ::= `{` [fieldlist] `}`
// fieldlist ::= field {fieldsep field} [fieldsep]
// field ::= `[` exp `]` `=` exp | Name `=` exp | exp
// fieldsep ::= `,` | `;`
type TableConstructionExp struct {
FirstLine int // line of `{`
LastLine int // line of `}`
KeyExps []Exp
ValExps []Exp
}
// FuncDefExp is function define expression
// functiondef ::= `function` funcbody
// funcbody ::= `(` [parlist] `)` block end
// parlist ::= namelist [`,` `...`] | `...`
// namelist ::= Name {`,` Name}
type FuncDefExp struct {
FirstLine int
LastLine int // line of `end`
ParList []string
IsVararg bool
MBlock *Block
}
// prefixexp includes var expression, function call expression
// and parentheses expression
// prefixexp ::= var | functioncall | `(` exp `)`
// var ::= Name | prefixexp `[` exp `]` | prefixexp `.` Name
// functioncall ::= prefixexp args | prefixexp `:` Name args
// =>
// prefixexp ::= Name |
// `(` exp `)`
// prefixexp `[` exp `]`
// prefixexp `.` Name
// prefixexp [`:` Name] args
// NameExp is identifier name expression
type NameExp struct {
Line int
Name string
}
// TableAccessExp is table access expression
type TableAccessExp struct {
LastLine int
PrefixExp Exp
Key Exp
}
// FuncCallExp is functioncall expression
type FuncCallExp struct {
FirstLine int
LastLine int
PrefixExp Exp
FNameExp *StringExp
Args []Exp
}
// ParensExp is parentheses expression
type ParensExp struct {
MExp Exp
}
// UnOpExp is unary expression
// unop ::= `-` | `not` | `#` | `~`
type UnOpExp struct {
Line int
Op int
MExp Exp
}
// BinOpExp is binary expression
// binop ::= `+` | `-` | `*` | `/` | `//` | `^` | `%` |
// `&` | `~` | `|` | `>>` | `<<` | `..` |
// `<` | `<=` | `>` | `>=` | `==` | `~=` |
// `and` | `or`
type BinOpExp struct {
Line int
Op int
Exp1 Exp
Exp2 Exp
}
// ConcatExp is `..` expression, for optimizing the concatenating operation
type ConcatExp struct {
Line int
Exps []Exp
} | compiler/ast/exp.go | 0.652906 | 0.533944 | exp.go | starcoder |
package trie
import (
"context"
"encoding/hex"
"github.com/pkg/errors"
)
// TwoLayerTrie is a trie data structure with two layers
type TwoLayerTrie struct {
layerOne Trie
layerTwo map[string]Trie
kvStore KVStore
rootKey string
}
// NewTwoLayerTrie creates a two layer trie
func NewTwoLayerTrie(dbForTrie KVStore, rootKey string) *TwoLayerTrie {
return &TwoLayerTrie{
kvStore: dbForTrie,
rootKey: rootKey,
}
}
func (tlt *TwoLayerTrie) layerTwoTrie(key []byte, layerTwoTrieKeyLen int) (Trie, error) {
hk := hex.EncodeToString(key)
if lt, ok := tlt.layerTwo[hk]; ok {
return lt, nil
}
opts := []Option{KVStoreOption(tlt.kvStore), KeyLengthOption(layerTwoTrieKeyLen)}
value, err := tlt.layerOne.Get(key)
switch errors.Cause(err) {
case ErrNotExist:
// start an empty trie
case nil:
opts = append(opts, RootHashOption(value))
default:
return nil, err
}
lt, err := NewTrie(opts...)
if err != nil {
return nil, err
}
return lt, lt.Start(context.Background())
}
// Start starts the layer one trie
func (tlt *TwoLayerTrie) Start(ctx context.Context) error {
layerOne, err := NewTrie(
KVStoreOption(tlt.kvStore),
RootKeyOption(tlt.rootKey),
)
if err != nil {
return errors.Wrapf(err, "failed to generate trie for %s", tlt.rootKey)
}
tlt.layerOne = layerOne
tlt.layerTwo = make(map[string]Trie)
return tlt.layerOne.Start(ctx)
}
// Stop stops the layer one trie
func (tlt *TwoLayerTrie) Stop(ctx context.Context) error {
for _, lt := range tlt.layerTwo {
if err := lt.Stop(ctx); err != nil {
return err
}
}
return tlt.layerOne.Stop(ctx)
}
// RootHash returns the layer one trie root
func (tlt *TwoLayerTrie) RootHash() []byte {
return tlt.layerOne.RootHash()
}
// SetRootHash sets root hash for layer one trie
func (tlt *TwoLayerTrie) SetRootHash(rh []byte) error {
return tlt.layerOne.SetRootHash(rh)
}
// Get returns the value in layer two
func (tlt *TwoLayerTrie) Get(layerOneKey []byte, layerTwoKey []byte) ([]byte, error) {
layerTwo, err := tlt.layerTwoTrie(layerOneKey, len(layerTwoKey))
if err != nil {
return nil, err
}
return layerTwo.Get(layerTwoKey)
}
// Upsert upserts an item in layer two
func (tlt *TwoLayerTrie) Upsert(layerOneKey []byte, layerTwoKey []byte, value []byte) error {
layerTwo, err := tlt.layerTwoTrie(layerOneKey, len(layerTwoKey))
if err != nil {
return err
}
if err := layerTwo.Upsert(layerTwoKey, value); err != nil {
return err
}
return tlt.layerOne.Upsert(layerOneKey, layerTwo.RootHash())
}
// Delete deletes an item in layer two
func (tlt *TwoLayerTrie) Delete(layerOneKey []byte, layerTwoKey []byte) error {
layerTwo, err := tlt.layerTwoTrie(layerOneKey, len(layerTwoKey))
if err != nil {
return err
}
if err := layerTwo.Delete(layerTwoKey); err != nil {
return err
}
if !layerTwo.IsEmpty() {
return tlt.layerOne.Upsert(layerOneKey, layerTwo.RootHash())
}
return tlt.layerOne.Delete(layerOneKey)
} | db/trie/twolayertrie.go | 0.771672 | 0.475179 | twolayertrie.go | starcoder |
package rui
import (
"strings"
"unicode"
)
// DataValue interface of a data node value
type DataValue interface {
IsObject() bool
Object() DataObject
Value() string
}
// DataObject interface of a data object
type DataObject interface {
DataValue
Tag() string
PropertyCount() int
Property(index int) DataNode
PropertyWithTag(tag string) DataNode
PropertyValue(tag string) (string, bool)
PropertyObject(tag string) DataObject
SetPropertyValue(tag, value string)
SetPropertyObject(tag string, object DataObject)
}
const (
// TextNode - node is the pair "tag - text value". Syntax: <tag> = <text>
TextNode = 0
// ObjectNode - node is the pair "tag - object". Syntax: <tag> = <object name>{...}
ObjectNode = 1
// ArrayNode - node is the pair "tag - object". Syntax: <tag> = [...]
ArrayNode = 2
)
// DataNode interface of a data node
type DataNode interface {
Tag() string
Type() int
Text() string
Object() DataObject
ArraySize() int
ArrayElement(index int) DataValue
ArrayElements() []DataValue
}
/******************************************************************************/
type dataStringValue struct {
value string
}
func (value *dataStringValue) Value() string {
return value.value
}
func (value *dataStringValue) IsObject() bool {
return false
}
func (value *dataStringValue) Object() DataObject {
return nil
}
/******************************************************************************/
type dataObject struct {
tag string
property []DataNode
}
// NewDataObject create new DataObject with the tag and empty property list
func NewDataObject(tag string) DataObject {
obj := new(dataObject)
obj.tag = tag
obj.property = []DataNode{}
return obj
}
func (object *dataObject) Value() string {
return ""
}
func (object *dataObject) IsObject() bool {
return true
}
func (object *dataObject) Object() DataObject {
return object
}
func (object *dataObject) Tag() string {
return object.tag
}
func (object *dataObject) PropertyCount() int {
if object.property != nil {
return len(object.property)
}
return 0
}
func (object *dataObject) Property(index int) DataNode {
if object.property == nil || index < 0 || index >= len(object.property) {
return nil
}
return object.property[index]
}
func (object *dataObject) PropertyWithTag(tag string) DataNode {
if object.property != nil {
for _, node := range object.property {
if node.Tag() == tag {
return node
}
}
}
return nil
}
func (object *dataObject) PropertyValue(tag string) (string, bool) {
if node := object.PropertyWithTag(tag); node != nil && node.Type() == TextNode {
return node.Text(), true
}
return "", false
}
func (object *dataObject) PropertyObject(tag string) DataObject {
if node := object.PropertyWithTag(tag); node != nil && node.Type() == ObjectNode {
return node.Object()
}
return nil
}
func (object *dataObject) setNode(node DataNode) {
if object.property == nil || len(object.property) == 0 {
object.property = []DataNode{node}
} else {
tag := node.Tag()
for i, p := range object.property {
if p.Tag() == tag {
object.property[i] = node
return
}
}
object.property = append(object.property, node)
}
}
// SetPropertyValue - set a string property with tag by value
func (object *dataObject) SetPropertyValue(tag, value string) {
val := new(dataStringValue)
val.value = value
node := new(dataNode)
node.tag = tag
node.value = val
object.setNode(node)
}
// SetPropertyObject - set a property with tag by object
func (object *dataObject) SetPropertyObject(tag string, obj DataObject) {
node := new(dataNode)
node.tag = tag
node.value = obj
object.setNode(node)
}
/******************************************************************************/
type dataNode struct {
tag string
value DataValue
array []DataValue
}
func (node *dataNode) Tag() string {
return node.tag
}
func (node *dataNode) Type() int {
if node.array != nil {
return ArrayNode
}
if node.value.IsObject() {
return ObjectNode
}
return TextNode
}
func (node *dataNode) Text() string {
if node.value != nil {
return node.value.Value()
}
return ""
}
func (node *dataNode) Object() DataObject {
if node.value != nil {
return node.value.Object()
}
return nil
}
func (node *dataNode) ArraySize() int {
if node.array != nil {
return len(node.array)
}
return 0
}
func (node *dataNode) ArrayElement(index int) DataValue {
if node.array != nil && index >= 0 && index < len(node.array) {
return node.array[index]
}
return nil
}
func (node *dataNode) ArrayElements() []DataValue {
if node.array != nil {
return node.array
}
return []DataValue{}
}
// ParseDataText - parse text and return DataNode
func ParseDataText(text string) DataObject {
if strings.ContainsAny(text, "\r") {
text = strings.Replace(text, "\r\n", "\n", -1)
text = strings.Replace(text, "\r", "\n", -1)
}
data := append([]rune(text), rune(0))
pos := 0
size := len(data) - 1
line := 1
lineStart := 0
skipSpaces := func(skipNewLine bool) {
for pos < size {
switch data[pos] {
case '\n':
if !skipNewLine {
return
}
line++
lineStart = pos + 1
case '/':
if pos+1 < size {
switch data[pos+1] {
case '/':
pos += 2
for pos < size && data[pos] != '\n' {
pos++
}
pos--
case '*':
pos += 3
for {
if pos >= size {
ErrorLog("Unexpected end of file")
return
}
if data[pos-1] == '*' && data[pos] == '/' {
break
}
if data[pos-1] == '\n' {
line++
lineStart = pos
}
pos++
}
default:
return
}
}
case ' ', '\t':
// do nothing
default:
if !unicode.IsSpace(data[pos]) {
return
}
}
pos++
}
}
parseTag := func() (string, bool) {
skipSpaces(true)
startPos := pos
if data[pos] == '`' {
pos++
startPos++
for data[pos] != '`' {
pos++
if pos >= size {
ErrorLog("Unexpected end of text")
return string(data[startPos:size]), false
}
}
str := string(data[startPos:pos])
pos++
return str, true
} else if data[pos] == '\'' || data[pos] == '"' {
stopSymbol := data[pos]
pos++
startPos++
slash := false
for stopSymbol != data[pos] {
if data[pos] == '\\' {
pos += 2
slash = true
} else {
pos++
}
if pos >= size {
ErrorLog("Unexpected end of text")
return string(data[startPos:size]), false
}
}
if !slash {
str := string(data[startPos:pos])
pos++
skipSpaces(false)
return str, true
}
buffer := make([]rune, pos-startPos+1)
n1 := 0
n2 := startPos
invalidEscape := func() (string, bool) {
str := string(data[startPos:pos])
pos++
ErrorLogF("Invalid escape sequence in \"%s\" (position %d)", str, n2-2-startPos)
return str, false
}
for n2 < pos {
if data[n2] != '\\' {
buffer[n1] = data[n2]
n2++
} else {
n2 += 2
switch data[n2-1] {
case 'n':
buffer[n1] = '\n'
case 'r':
buffer[n1] = '\r'
case 't':
buffer[n1] = '\t'
case '"':
buffer[n1] = '"'
case '\'':
buffer[n1] = '\''
case '\\':
buffer[n1] = '\\'
case 'x', 'X':
if n2+2 > pos {
return invalidEscape()
}
x := 0
for i := 0; i < 2; i++ {
ch := data[n2]
if ch >= '0' && ch <= '9' {
x = x*16 + int(ch-'0')
} else if ch >= 'a' && ch <= 'f' {
x = x*16 + int(ch-'a'+10)
} else if ch >= 'A' && ch <= 'F' {
x = x*16 + int(ch-'A'+10)
} else {
return invalidEscape()
}
n2++
}
buffer[n1] = rune(x)
case 'u', 'U':
if n2+4 > pos {
return invalidEscape()
}
x := 0
for i := 0; i < 4; i++ {
ch := data[n2]
if ch >= '0' && ch <= '9' {
x = x*16 + int(ch-'0')
} else if ch >= 'a' && ch <= 'f' {
x = x*16 + int(ch-'a'+10)
} else if ch >= 'A' && ch <= 'F' {
x = x*16 + int(ch-'A'+10)
} else {
return invalidEscape()
}
n2++
}
buffer[n1] = rune(x)
default:
str := string(data[startPos:pos])
ErrorLogF("Invalid escape sequence in \"%s\" (position %d)", str, n2-2-startPos)
return str, false
}
}
n1++
}
pos++
skipSpaces(false)
return string(buffer[0:n1]), true
}
stopSymbol := func(symbol rune) bool {
if unicode.IsSpace(symbol) {
return true
}
for _, sym := range []rune{'=', '{', '}', '[', ']', ',', ' ', '\t', '\n', '\'', '"', '`', '/'} {
if sym == symbol {
return true
}
}
return false
}
for pos < size && !stopSymbol(data[pos]) {
pos++
}
endPos := pos
skipSpaces(false)
if startPos == endPos {
ErrorLog("empty tag")
return "", false
}
return string(data[startPos:endPos]), true
}
var parseObject func(tag string) DataObject
var parseArray func() []DataValue
parseNode := func() DataNode {
var tag string
var ok bool
if tag, ok = parseTag(); !ok {
return nil
}
skipSpaces(true)
if data[pos] != '=' {
ErrorLogF("expected '=' after a tag name (line: %d, position: %d)", line, pos-lineStart)
return nil
}
pos++
skipSpaces(true)
switch data[pos] {
case '[':
node := new(dataNode)
node.tag = tag
if node.array = parseArray(); node.array == nil {
return nil
}
return node
case '{':
node := new(dataNode)
node.tag = tag
if node.value = parseObject("_"); node.value == nil {
return nil
}
return node
case '}', ']', '=':
ErrorLogF("Expected '[', '{' or a tag name after '=' (line: %d, position: %d)", line, pos-lineStart)
return nil
default:
var str string
if str, ok = parseTag(); !ok {
return nil
}
node := new(dataNode)
node.tag = tag
if data[pos] == '{' {
if node.value = parseObject(str); node.value == nil {
return nil
}
} else {
val := new(dataStringValue)
val.value = str
node.value = val
}
return node
}
}
parseObject = func(tag string) DataObject {
if data[pos] != '{' {
ErrorLogF("Expected '{' (line: %d, position: %d)", line, pos-lineStart)
return nil
}
pos++
obj := new(dataObject)
obj.tag = tag
obj.property = []DataNode{}
for pos < size {
var node DataNode
skipSpaces(true)
if data[pos] == '}' {
pos++
skipSpaces(false)
return obj
}
if node = parseNode(); node == nil {
return nil
}
obj.property = append(obj.property, node)
if data[pos] == '}' {
pos++
skipSpaces(true)
return obj
} else if data[pos] != ',' && data[pos] != '\n' {
ErrorLogF(`Expected '}', '\n' or ',' (line: %d, position: %d)`, line, pos-lineStart)
return nil
}
if data[pos] != '\n' {
pos++
}
skipSpaces(true)
for data[pos] == ',' {
pos++
skipSpaces(true)
}
}
ErrorLog("Unexpected end of text")
return nil
}
parseArray = func() []DataValue {
pos++
skipSpaces(true)
array := []DataValue{}
for pos < size {
var tag string
var ok bool
skipSpaces(true)
for data[pos] == ',' && pos < size {
pos++
skipSpaces(true)
}
if pos >= size {
break
}
if data[pos] == ']' {
pos++
skipSpaces(true)
return array
}
if tag, ok = parseTag(); !ok {
return nil
}
if data[pos] == '{' {
obj := parseObject(tag)
if obj == nil {
return nil
}
array = append(array, obj)
} else {
val := new(dataStringValue)
val.value = tag
array = append(array, val)
}
switch data[pos] {
case ']', ',', '\n':
default:
ErrorLogF("Expected ']' or ',' (line: %d, position: %d)", line, pos-lineStart)
return nil
}
/*
if data[pos] == ']' {
pos++
skipSpaces()
return array, nil
} else if data[pos] != ',' {
return nil, fmt.Errorf("Expected ']' or ',' (line: %d, position: %d)", line, pos-lineStart)
}
pos++
skipSpaces()
*/
}
ErrorLog("Unexpected end of text")
return nil
}
if tag, ok := parseTag(); ok {
return parseObject(tag)
}
return nil
} | data.go | 0.531939 | 0.524577 | data.go | starcoder |
package taskmaster
import (
"time"
"github.com/go-ole/go-ole"
"github.com/rickb777/date/period"
)
// Day is a day of the week.
type Day int
const (
Sunday Day = 0x01
Monday Day = 0x02
Tuesday Day = 0x04
Wednesday Day = 0x08
Thursday Day = 0x10
Friday Day = 0x20
Saturday Day = 0x40
)
// DayInterval specifies if a task runs every day or every other day.
type DayInterval int
const (
EveryDay DayInterval = 1
EveryOtherDay DayInterval = 2
)
// DayOfMonth is a day of a month.
type DayOfMonth int
const (
LastDayOfMonth = 32
)
// Month is one of the 12 months.
type Month int
const (
January Month = 0x01
February Month = 0x02
March Month = 0x04
April Month = 0x08
May Month = 0x10
June Month = 0x20
July Month = 0x40
August Month = 0x80
September Month = 0x100
October Month = 0x200
November Month = 0x400
December Month = 0x800
)
// Week specifies what week of the month a task will run on.
type Week int
const (
First Week = 0x01
Second Week = 0x02
Third Week = 0x04
Fourth Week = 0x08
Last Week = 0x10
)
// WeekInterval specifies if a task runs every week or every other week.
type WeekInterval int
const (
EveryWeek WeekInterval = 1
EveryOtherWeek WeekInterval = 2
)
// TaskActionType specifies the type of a task action.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/ne-taskschd-task_action_type
type TaskActionType int
const (
TASK_ACTION_EXEC TaskActionType = 0
TASK_ACTION_COM_HANDLER TaskActionType = 5
TASK_ACTION_SEND_EMAIL TaskActionType = 6
TASK_ACTION_SHOW_MESSAGE TaskActionType = 7
)
func (t TaskActionType) String() string {
switch t {
case TASK_ACTION_EXEC:
return "exec"
case TASK_ACTION_COM_HANDLER:
return "com handler"
case TASK_ACTION_SEND_EMAIL:
return "send email"
case TASK_ACTION_SHOW_MESSAGE:
return "show message"
default:
return ""
}
}
// TaskCompatibility specifies the compatibility of a registered task.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/ne-taskschd-task_compatibility
type TaskCompatibility int
const (
TASK_COMPATIBILITY_AT TaskCompatibility = iota
TASK_COMPATIBILITY_V1
TASK_COMPATIBILITY_V2
TASK_COMPATIBILITY_V2_1
TASK_COMPATIBILITY_V2_2
TASK_COMPATIBILITY_V2_3
TASK_COMPATIBILITY_V2_4
)
// TaskCreationFlags specifies how a task will be created.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/ne-taskschd-task_creation
type TaskCreationFlags int
const (
TASK_VALIDATE_ONLY TaskCreationFlags = 0x01
TASK_CREATE TaskCreationFlags = 0x02
TASK_UPDATE TaskCreationFlags = 0x04
TASK_CREATE_OR_UPDATE TaskCreationFlags = 0x06
TASK_DISABLE TaskCreationFlags = 0x08
TASK_DONT_ADD_PRINCIPAL_ACE TaskCreationFlags = 0x10
TASK_IGNORE_REGISTRATION_TRIGGERS TaskCreationFlags = 0x20
)
// TaskEnumFlags specifies how tasks will be enumerated.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/ne-taskschd-task_enum_flags
type TaskEnumFlags int
const (
TASK_ENUM_HIDDEN TaskEnumFlags = 1 // enumerate all tasks, including tasks that are hidden
)
// TaskInstancesPolicy specifies what the Task Scheduler service will do when
// multiple instances of a task are triggered or operating at once.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/ne-taskschd-task_instances_policy
type TaskInstancesPolicy int
const (
TASK_INSTANCES_PARALLEL TaskInstancesPolicy = iota // start new instance while an existing instance is running
TASK_INSTANCES_QUEUE // start a new instance of the task after all other instances of the task are complete
TASK_INSTANCES_IGNORE_NEW // do not start a new instance if an existing instance of the task is running
TASK_INSTANCES_STOP_EXISTING // stop an existing instance of the task before it starts a new instance
)
func (t TaskInstancesPolicy) String() string {
switch t {
case TASK_INSTANCES_PARALLEL:
return "run parallel"
case TASK_INSTANCES_QUEUE:
return "queue instances"
case TASK_INSTANCES_IGNORE_NEW:
return "ignore new"
case TASK_INSTANCES_STOP_EXISTING:
return "stop existing"
default:
return ""
}
}
// TaskLogonType specifies how a registered task will authenticate when it executes.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/ne-taskschd-task_logon_type
type TaskLogonType int
const (
TASK_LOGON_NONE TaskLogonType = iota // the logon method is not specified. Used for non-NT credentials
TASK_LOGON_PASSWORD // use a password for logging on the user. The password must be supplied at registration time
TASK_LOGON_S4U // the service will log the user on using Service For User (S4U), and the task will run in a non-interactive desktop. When an S4U logon is used, no password is stored by the system and there is no access to either the network or to encrypted files
TASK_LOGON_INTERACTIVE_TOKEN // user must already be logged on. The task will be run only in an existing interactive session
TASK_LOGON_GROUP // group activation
TASK_LOGON_SERVICE_ACCOUNT // indicates that a Local System, Local Service, or Network Service account is being used as a security context to run the task
TASK_LOGON_INTERACTIVE_TOKEN_OR_PASSWORD // first use the interactive token. If the user is not logged on (no interactive token is available), then the password is used. The password must be specified when a task is registered. This flag is not recommended for new tasks because it is less reliable than TASK_LOGON_PASSWORD
)
func (t TaskLogonType) String() string {
switch t {
case TASK_LOGON_NONE:
return "none"
case TASK_LOGON_PASSWORD:
return "password"
case TASK_LOGON_S4U:
return "s4u"
case TASK_LOGON_INTERACTIVE_TOKEN:
return "interactive token"
case TASK_LOGON_GROUP:
return "group"
case TASK_LOGON_SERVICE_ACCOUNT:
return "service account"
case TASK_LOGON_INTERACTIVE_TOKEN_OR_PASSWORD:
return "interactive token or password"
default:
return ""
}
}
// TaskRunFlags specifies how a task will be executed.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/ne-taskschd-task_run_flags
type TaskRunFlags int
const (
TASK_RUN_NO_FLAGS TaskRunFlags = 0 // the task is run with all flags ignored
TASK_RUN_AS_SELF TaskRunFlags = 1 // the task is run as the user who is calling the Run method
TASK_RUN_IGNORE_CONSTRAINTS TaskRunFlags = 2 // the task is run regardless of constraints such as "do not run on batteries" or "run only if idle"
TASK_RUN_USE_SESSION_ID TaskRunFlags = 4 // the task is run using a terminal server session identifier
TASK_RUN_USER_SID TaskRunFlags = 8 // the task is run using a security identifier
)
// TaskRunLevel specifies whether the task will be run with full permissions or not.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/ne-taskschd-task_runlevel_type
type TaskRunLevel int
const (
TASK_RUNLEVEL_LUA TaskRunLevel = iota // task will be run with the least privileges
TASK_RUNLEVEL_HIGHEST // task will be run with the highest privileges
)
func (t TaskRunLevel) String() string {
switch t {
case TASK_RUNLEVEL_LUA:
return "least"
case TASK_RUNLEVEL_HIGHEST:
return "highest"
default:
return ""
}
}
// TaskSessionStateChangeType specifies the type of session state change that a
// SessionStateChange trigger will trigger on.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/ne-taskschd-task_session_state_change_type
type TaskSessionStateChangeType int
const (
TASK_CONSOLE_CONNECT TaskSessionStateChangeType = 1 // Terminal Server console connection state change. For example, when you connect to a user session on the local computer by switching users on the computer
TASK_CONSOLE_DISCONNECT TaskSessionStateChangeType = 2 // Terminal Server console disconnection state change. For example, when you disconnect to a user session on the local computer by switching users on the computer
TASK_REMOTE_CONNECT TaskSessionStateChangeType = 3 // Terminal Server remote connection state change. For example, when a user connects to a user session by using the Remote Desktop Connection program from a remote computer
TASK_REMOTE_DISCONNECT TaskSessionStateChangeType = 4 // Terminal Server remote disconnection state change. For example, when a user disconnects from a user session while using the Remote Desktop Connection program from a remote computer
TASK_SESSION_LOCK TaskSessionStateChangeType = 7 // Terminal Server session locked state change. For example, this state change causes the task to run when the computer is locked
TASK_SESSION_UNLOCK TaskSessionStateChangeType = 8 // Terminal Server session unlocked state change. For example, this state change causes the task to run when the computer is unlocked
)
func (t TaskSessionStateChangeType) String() string {
switch t {
case TASK_CONSOLE_CONNECT:
return "console connect"
case TASK_CONSOLE_DISCONNECT:
return "console disconnect"
case TASK_REMOTE_CONNECT:
return "remote connect"
case TASK_REMOTE_DISCONNECT:
return "remote disconnect"
case TASK_SESSION_LOCK:
return "session lock"
case TASK_SESSION_UNLOCK:
return "session unlock"
default:
return ""
}
}
// TaskState specifies the state of a running or registered task.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/ne-taskschd-task_state
type TaskState int
const (
TASK_STATE_UNKNOWN TaskState = iota // the state of the task is unknown
TASK_STATE_DISABLED // the task is registered but is disabled and no instances of the task are queued or running. The task cannot be run until it is enabled
TASK_STATE_QUEUED // instances of the task are queued
TASK_STATE_READY // the task is ready to be executed, but no instances are queued or running
TASK_STATE_RUNNING // one or more instances of the task is running
)
func (t TaskState) String() string {
switch t {
case TASK_STATE_UNKNOWN:
return "unknown"
case TASK_STATE_DISABLED:
return "disabled"
case TASK_STATE_QUEUED:
return "queued"
case TASK_STATE_READY:
return "ready"
case TASK_STATE_RUNNING:
return "running"
default:
return ""
}
}
// TaskTriggerType specifies the type of a task trigger.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/ne-taskschd-task_trigger_type2
type TaskTriggerType int
const (
TASK_TRIGGER_EVENT TaskTriggerType = 0
TASK_TRIGGER_TIME TaskTriggerType = 1
TASK_TRIGGER_DAILY TaskTriggerType = 2
TASK_TRIGGER_WEEKLY TaskTriggerType = 3
TASK_TRIGGER_MONTHLY TaskTriggerType = 4
TASK_TRIGGER_MONTHLYDOW TaskTriggerType = 5
TASK_TRIGGER_IDLE TaskTriggerType = 6
TASK_TRIGGER_REGISTRATION TaskTriggerType = 7
TASK_TRIGGER_BOOT TaskTriggerType = 8
TASK_TRIGGER_LOGON TaskTriggerType = 9
TASK_TRIGGER_SESSION_STATE_CHANGE TaskTriggerType = 11
TASK_TRIGGER_CUSTOM_TRIGGER_01 TaskTriggerType = 12
)
func (t TaskTriggerType) String() string {
switch t {
case TASK_TRIGGER_EVENT:
return "event"
case TASK_TRIGGER_TIME:
return "time"
case TASK_TRIGGER_DAILY:
return "daily"
case TASK_TRIGGER_WEEKLY:
return "weekly"
case TASK_TRIGGER_MONTHLY:
return "monthly"
case TASK_TRIGGER_MONTHLYDOW:
return "monthly day of the week"
case TASK_TRIGGER_IDLE:
return "idle"
case TASK_TRIGGER_REGISTRATION:
return "registration"
case TASK_TRIGGER_BOOT:
return "boot"
case TASK_TRIGGER_LOGON:
return "logon"
case TASK_TRIGGER_SESSION_STATE_CHANGE:
return "session state change"
case TASK_TRIGGER_CUSTOM_TRIGGER_01:
return "custom"
default:
return ""
}
}
type TaskService struct {
taskServiceObj *ole.IDispatch
rootFolderObj *ole.IDispatch
isInitialized bool
isConnected bool
connectedDomain string
connectedComputerName string
connectedUser string
}
type TaskFolder struct {
Name string
Path string
SubFolders []TaskFolder
RegisteredTasks []RegisteredTask
}
// RunningTask is a task that is currently running.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/nn-taskschd-irunningtask
type RunningTask struct {
taskObj *ole.IDispatch
isReleased bool
CurrentAction string // the name of the current action that the running task is performing
EnginePID int // the process ID for the engine (process) which is running the task
InstanceGUID string // the GUID identifier for this instance of the task
Name string // the name of the task
Path string // the path to where the task is stored
State TaskState // an identifier for the state of the running task
}
// RegisteredTask is a task that is registered in the Task Scheduler database.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/nn-taskschd-iregisteredtask
type RegisteredTask struct {
taskObj *ole.IDispatch
Name string // the name of the registered task
Path string // the path to where the registered task is stored
Definition Definition
Enabled bool
State TaskState // the operational state of the registered task
MissedRuns int // the number of times the registered task has missed a scheduled run
NextRunTime time.Time // the time when the registered task is next scheduled to run
LastRunTime time.Time // the time the registered task was last run
LastTaskResult int // the results that were returned the last time the registered task was run
}
// Definition defines all the components of a task, such as the task settings, triggers, actions, and registration information
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/nn-taskschd-itaskdefinition
type Definition struct {
Actions []Action
Context string // specifies the security context under which the actions of the task are performed
Data string // the data that is associated with the task
Principal Principal
RegistrationInfo RegistrationInfo
Settings TaskSettings
Triggers []Trigger
XMLText string // the XML-formatted definition of the task
}
type Action interface {
GetID() string
GetType() TaskActionType
}
type taskActionTypeHolder struct {
actionType TaskActionType
}
type TaskAction struct {
ID string
taskActionTypeHolder
}
// ExecAction is an action that performs a command-line operation.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/nn-taskschd-iexecaction
type ExecAction struct {
TaskAction
Path string
Args string
WorkingDir string
}
// ComHandlerAction is an action that fires a COM handler. Can only be used if TASK_COMPATIBILITY_V2 or above is set.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/nn-taskschd-icomhandleraction
type ComHandlerAction struct {
TaskAction
ClassID string
Data string
}
// Principal provides security credentials that define the security context for the tasks that are associated with it.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/nn-taskschd-iprincipal
type Principal struct {
Name string // the name of the principal
GroupID string // the identifier of the user group that is required to run the tasks
ID string // the identifier of the principal
LogonType TaskLogonType // the security logon method that is required to run the tasks
RunLevel TaskRunLevel // the identifier that is used to specify the privilege level that is required to run the tasks
UserID string // the user identifier that is required to run the tasks
}
// RegistrationInfo provides the administrative information that can be used to describe the task
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/nn-taskschd-iregistrationinfo
type RegistrationInfo struct {
Author string
Date time.Time
Description string
Documentation string
SecurityDescriptor string
Source string
URI string
Version string
}
// TaskSettings provides the settings that the Task Scheduler service uses to perform the task
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/nn-taskschd-itasksettings
type TaskSettings struct {
AllowDemandStart bool // indicates that the task can be started by using either the Run command or the Context menu
AllowHardTerminate bool // indicates that the task may be terminated by the Task Scheduler service using TerminateProcess
Compatibility TaskCompatibility // indicates which version of Task Scheduler a task is compatible with
DeleteExpiredTaskAfter string // the amount of time that the Task Scheduler will wait before deleting the task after it expires
DontStartOnBatteries bool // indicates that the task will not be started if the computer is running on batteries
Enabled bool // indicates that the task is enabled
TimeLimit period.Period // the amount of time that is allowed to complete the task
Hidden bool // indicates that the task will not be visible in the UI
IdleSettings
MultipleInstances TaskInstancesPolicy // defines how the Task Scheduler deals with multiple instances of the task
NetworkSettings
Priority int // the priority level of the task, ranging from 0 - 10, where 0 is the highest priority, and 10 is the lowest. Only applies to ComHandler, Email, and MessageBox actions
RestartCount int // the number of times that the Task Scheduler will attempt to restart the task
RestartInterval period.Period // specifies how long the Task Scheduler will attempt to restart the task
RunOnlyIfIdle bool // indicates that the Task Scheduler will run the task only if the computer is in an idle condition
RunOnlyIfNetworkAvailable bool // indicates that the Task Scheduler will run the task only when a network is available
StartWhenAvailable bool // indicates that the Task Scheduler can start the task at any time after its scheduled time has passed
StopIfGoingOnBatteries bool // indicates that the task will be stopped if the computer is going onto batteries
WakeToRun bool // indicates that the Task Scheduler will wake the computer when it is time to run the task, and keep the computer awake until the task is completed
}
// IdleSettings specifies how the Task Scheduler performs tasks when the computer is in an idle condition.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/nn-taskschd-iidlesettings
type IdleSettings struct {
IdleDuration period.Period // the amount of time that the computer must be in an idle state before the task is run
RestartOnIdle bool // whether the task is restarted when the computer cycles into an idle condition more than once
StopOnIdleEnd bool // indicates that the Task Scheduler will terminate the task if the idle condition ends before the task is completed
WaitTimeout period.Period // the amount of time that the Task Scheduler will wait for an idle condition to occur
}
// NetworkSettings provides the settings that the Task Scheduler service uses to obtain a network profile.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/nn-taskschd-inetworksettings
type NetworkSettings struct {
ID string // a GUID value that identifies a network profile
Name string // the name of a network profile
}
type Trigger interface {
GetEnabled() bool
GetEndBoundary() time.Time
GetExecutionTimeLimit() period.Period
GetID() string
GetRepetitionDuration() period.Period
GetRepetitionInterval() period.Period
GetStartBoundary() time.Time
GetStopAtDurationEnd() bool
GetType() TaskTriggerType
}
type taskTriggerTypeHolder struct {
triggerType TaskTriggerType
}
// TaskTrigger provides the common properties that are inherited by all trigger objects.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/nn-taskschd-itrigger
type TaskTrigger struct {
Enabled bool // indicates whether the trigger is enabled
EndBoundary time.Time // the date and time when the trigger is deactivated
ExecutionTimeLimit period.Period // the maximum amount of time that the task launched by this trigger is allowed to run
ID string // the identifier for the trigger
RepetitionPattern
StartBoundary time.Time // the date and time when the trigger is activated
taskTriggerTypeHolder
}
// RepetitionPattern defines how often the task is run and how long the repetition pattern is repeated after the task is started.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/nn-taskschd-irepetitionpattern
type RepetitionPattern struct {
RepetitionDuration period.Period // how long the pattern is repeated
RepetitionInterval period.Period // the amount of time between each restart of the task. Required if RepetitionDuration is specified. Minimum time is one minute
StopAtDurationEnd bool // indicates if a running instance of the task is stopped at the end of the repetition pattern duration
}
// BootTrigger triggers the task when the computer boots. Only Administrators can create tasks with a BootTrigger.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/nn-taskschd-iboottrigger
type BootTrigger struct {
TaskTrigger
Delay period.Period // indicates the amount of time between when the system is booted and when the task is started
}
// DailyTrigger triggers the task on a daily schedule. For example, the task starts at a specific time every day, every other day, or every third day. The time of day that the task is started is set by StartBoundary, which must be set.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/nn-taskschd-idailytrigger
type DailyTrigger struct {
TaskTrigger
DayInterval DayInterval // the interval between the days in the schedule
RandomDelay period.Period // a delay time that is randomly added to the start time of the trigger
}
// EventTrigger triggers the task when a specific event occurs. A maximum of 500 tasks with event subscriptions can be created.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/nn-taskschd-ieventtrigger
type EventTrigger struct {
TaskTrigger
Delay period.Period // indicates the amount of time between when the event occurs and when the task is started
Subscription string // a query string that identifies the event that fires the trigger
ValueQueries map[string]string // a collection of named XPath queries. Each query in the collection is applied to the last matching event XML returned from the subscription query
}
// IdleTrigger triggers the task when the computer goes into an idle state. An IdleTrigger will only trigger a task action if the computer goes into an idle state after the start boundary of the trigger
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/nn-taskschd-iidletrigger
type IdleTrigger struct {
TaskTrigger
}
// LogonTrigger triggers the task when a specific user logs on. When the Task Scheduler service starts, all logged-on users are enumerated and any tasks registered with logon triggers that match the logged on user are run.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/nn-taskschd-ilogontrigger
type LogonTrigger struct {
TaskTrigger
Delay period.Period // indicates the amount of time between when the user logs on and when the task is started
UserID string // the identifier of the user. If left empty, the trigger will fire when any user logs on
}
// MonthlyDOWTrigger triggers the task on a monthly day-of-week schedule. For example, the task starts on a specific days of the week, weeks of the month, and months of the year. The time of day that the task is started is set by StartBoundary, which must be set.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/nn-taskschd-imonthlydowtrigger
type MonthlyDOWTrigger struct {
TaskTrigger
DaysOfWeek Day // the days of the week during which the task runs
MonthsOfYear Month // the months of the year during which the task runs
RandomDelay period.Period // a delay time that is randomly added to the start time of the trigger
RunOnLastWeekOfMonth bool // indicates that the task runs on the last week of the month
WeeksOfMonth Week // the weeks of the month during which the task runs
}
// MonthlyTrigger triggers the task on a monthly schedule. For example, the task starts on specific days of specific months.
// The time of day that the task is started is set by StartBoundary, which must be set.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/nn-taskschd-imonthlytrigger
type MonthlyTrigger struct {
TaskTrigger
DaysOfMonth DayOfMonth // the days of the month during which the task runs
MonthsOfYear Month // the months of the year during which the task runs
RandomDelay period.Period // a delay time that is randomly added to the start time of the trigger
RunOnLastWeekOfMonth bool // indicates that the task runs on the last week of the month
}
// RegistrationTrigger triggers the task when the task is registered.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/nn-taskschd-iregistrationtrigger
type RegistrationTrigger struct {
TaskTrigger
Delay period.Period // the amount of time between when the task is registered and when the task is started
}
// SessionStateChangeTrigger triggers the task when a specific user session state changes.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/nn-taskschd-isessionstatechangetrigger
type SessionStateChangeTrigger struct {
TaskTrigger
Delay period.Period // indicates how long of a delay takes place before a task is started after a Terminal Server session state change is detected
StateChange TaskSessionStateChangeType // the kind of Terminal Server session change that would trigger a task launch
UserId string // the user for the Terminal Server session. When a session state change is detected for this user, a task is started
}
// TimeTrigger triggers the task at a specific time of day. StartBoundary determines when the trigger fires.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/nn-taskschd-itimetrigger
type TimeTrigger struct {
TaskTrigger
RandomDelay period.Period // a delay time that is randomly added to the start time of the trigger
}
// WeeklyTrigger triggers the task on a weekly schedule. The time of day that the task is started is set by StartBoundary, which must be set.
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/nn-taskschd-iweeklytrigger
type WeeklyTrigger struct {
TaskTrigger
DaysOfWeek Day // the days of the week in which the task runs
RandomDelay period.Period // a delay time that is randomly added to the start time of the trigger
WeekInterval WeekInterval // the interval between the weeks in the schedule
}
type CustomTrigger struct {
TaskTrigger
}
func (t TaskService) IsConnected() bool {
return t.isConnected
}
func (t TaskService) GetConnectedDomain() string {
return t.connectedDomain
}
func (t TaskService) GetConnectedComputerName() string {
return t.connectedComputerName
}
func (t TaskService) GetConnectedUser() string {
return t.connectedUser
}
func (a TaskAction) GetID() string {
return a.ID
}
func (t taskActionTypeHolder) GetType() TaskActionType {
return t.actionType
}
func (t taskTriggerTypeHolder) GetType() TaskTriggerType {
return t.triggerType
}
func (t TaskTrigger) GetRepetitionDuration() period.Period {
return t.RepetitionDuration
}
func (t TaskTrigger) GetEnabled() bool {
return t.Enabled
}
func (t TaskTrigger) GetEndBoundary() time.Time {
return t.EndBoundary
}
func (t TaskTrigger) GetExecutionTimeLimit() period.Period {
return t.ExecutionTimeLimit
}
func (t TaskTrigger) GetID() string {
return t.ID
}
func (t TaskTrigger) GetRepetitionInterval() period.Period {
return t.RepetitionInterval
}
func (t TaskTrigger) GetStartBoundary() time.Time {
return t.StartBoundary
}
func (t TaskTrigger) GetStopAtDurationEnd() bool {
return t.StopAtDurationEnd
} | types.go | 0.594434 | 0.470311 | types.go | starcoder |
package matrix
import (
"bytes"
"fmt"
)
var weight [4]uint64 = [4]uint64{
0x6996966996696996, 0x9669699669969669,
0x9669699669969669, 0x6996966996696996,
}
// A binary row / vector in GF(2)^n.
type Row []byte
// NewRow returns an empty n-component row.
func NewRow(n int) Row {
return Row(make([]byte, rowsToColumns(n)))
}
// LessThan returns true if row i is "less than" row j. If you use sort a permutation matrix according to LessThan,
// you'll always get the identity matrix.
func LessThan(i, j Row) bool {
if i.Size() != j.Size() {
panic("Can't compare rows that are different sizes!")
}
for k, _ := range i {
if i[k] != 0x00 || j[k] != 0x00 {
if i[k] == 0x00 {
return false
} else if j[k] == 0x00 {
return true
} else if i[k]&-i[k] < j[k]&-j[k] {
return true
} else {
return false
}
}
}
return false
}
// Add adds (XORs) two vectors.
func (e Row) Add(f Row) Row {
le, lf := len(e), len(f)
if le != lf {
panic("Can't add rows that are different sizes!")
}
out := make([]byte, le)
for i := 0; i < le; i++ {
out[i] = e[i] ^ f[i]
}
return Row(out)
}
// Mul component-wise multiplies (ANDs) two vectors.
func (e Row) Mul(f Row) Row {
le, lf := len(e), len(f)
if le != lf {
panic("Can't multiply rows that are different sizes!")
}
out := make([]byte, le)
for i := 0; i < le; i++ {
out[i] = e[i] & f[i]
}
return Row(out)
}
// DotProduct computes the dot product of two vectors.
func (e Row) DotProduct(f Row) bool {
parity := uint64(0)
for _, g_i := range e.Mul(f) {
parity ^= (weight[g_i/64] >> (g_i % 64)) & 1
}
return parity == 1
}
// Weight returns the hamming weight of this row.
func (e Row) Weight() (w int) {
for i := 0; i < e.Size(); i++ {
if e.GetBit(i) == 1 {
w += 1
}
}
return
}
// Returns true if e should be used to cancel out a bit in f.
func (e Row) Cancels(f Row) bool {
for i, _ := range e {
if e[i] != 0x00 {
if e[i]&-e[i]&f[i] != 0x00 {
return true
} else {
return false
}
}
}
return false
}
// GetBit returns the ith component of the vector: 0x00 or 0x01.
func (e Row) GetBit(i int) byte {
return (e[i/8] >> (uint(i) % 8)) & 1
}
// SetBit sets the ith component of the vector to 0x01 is x = true and 0x00 if x = false.
func (e Row) SetBit(i int, x bool) {
y := e.GetBit(i)
if y == 0 && x || y == 1 && !x {
e[i/8] ^= 1 << (uint(i) % 8)
}
}
// IsZero returns true if the row is identically zero.
func (e Row) IsZero() bool {
for _, e_i := range e {
if e_i != 0x00 {
return false
}
}
return true
}
// Height returns the position of the first non-zero entry in the row, or -1 if the row is zero.
func (e Row) Height() int {
for i := 0; i < e.Size(); i++ {
if e.GetBit(i) == 1 {
return i
}
}
return -1
}
// Equals returns true if two rows are equal and false otherwise.
func (e Row) Equals(f Row) bool {
return bytes.Equal(e, f)
}
// Size returns the dimension of the vector.
func (e Row) Size() int {
return 8 * len(e)
}
// Dup returns a duplicate of this row.
func (e Row) Dup() Row {
f := Row(make([]byte, len(e)))
copy(f, e)
return f
}
// String converts the row into space-and-dot notation.
func (e Row) String() string {
out := []rune{'|'}
for _, elem := range e {
b := []rune(fmt.Sprintf("%8.8b", elem))
for pos := 7; pos >= 0; pos-- {
if b[pos] == '0' {
out = append(out, ' ')
} else {
out = append(out, '•')
}
}
}
return string(append(out, '|', '\n'))
}
// OctaveString converts the row into a string that can be imported into Octave.
func (e Row) OctaveString() string {
out := []rune{}
for _, elem := range e {
b := []rune(fmt.Sprintf("%8.8b", elem))
for pos := 7; pos >= 0; pos-- {
if b[pos] == '0' {
out = append(out, '0', ' ')
} else {
out = append(out, '1', ' ')
}
}
}
return string(append(out, '\n'))
} | matrix/row.go | 0.722723 | 0.449211 | row.go | starcoder |
package polo
import (
"fmt"
"math/rand"
"strings"
"github.com/emicklei/dot"
)
// State is a string.
type State = string
// Chain is a Sequence of random states -> probabilities.
type Chain struct {
StateTransitions map[State]Probabilities
Order int
}
// Probabilities gives the probabilities of going to the next state given some state.
type Probabilities map[State]float64
// New is a constructor of Chain.
func New(order int) Chain {
return Chain{
Order: order,
StateTransitions: map[string]Probabilities{},
}
}
// Set sets the probability matrix of the to state coming from the from states
func (c Chain) Set(to State, probability float64, from ...State) {
if len(from) != c.Order {
panic("Wrong amount of states provided")
}
current := strings.Join(from, " ")
// If the key state doesn't exist, initialize it
if _, ok := c.StateTransitions[current]; !ok {
c.StateTransitions[current] = Probabilities{}
}
c.StateTransitions[current][to] = probability
}
func (c Chain) String() string {
sb := strings.Builder{}
for current, matrix := range c.StateTransitions {
for next, probability := range matrix {
sb.WriteString(fmt.Sprintf("P('%s'|'%s') = %.02f\n", next, current, probability))
}
sb.WriteString("\n")
}
return sb.String()
}
// Probability returns the probability of the next state happening given the current one.
func (c Chain) Probability(next State, current State) float64 {
return c.StateTransitions[current][next]
}
// Next gives the next state given the current state.
func (c Chain) Next(current State) State {
probs := []float64{}
states := []State{}
for state, probability := range c.StateTransitions[current] {
probs = append(probs, probability)
states = append(states, state)
}
sum := cumsum(probs)
sample := rand.Float64()
for index, val := range sum {
if sample <= val {
return states[index]
}
}
return current
}
func (c Chain) Graph() string {
g := dot.NewGraph(dot.Directed)
for from, probabilities := range c.StateTransitions {
fromNode := g.Node(from)
for to, prob := range probabilities {
toNode := g.Node(to)
g.Edge(fromNode, toNode, fmt.Sprintf("%.2f", prob))
}
}
return g.String()
}
// NextUntilEnd generates states until it reaches either itself or EndState
func (c Chain) NextUntilEnd(input State) State {
final := input + " "
prev := input
next := ""
words := strings.Fields(prev)
for next != EndState {
next = c.Next(strings.Join(words, " "))
if next == prev {
return final
}
if next != EndState {
final += next + " "
}
words = append(words[1:], next)
prev = next
}
return final
}
func (c Chain) RandomState() State {
i := rand.Intn(len(c.StateTransitions))
for k := range c.StateTransitions {
if i == 0 {
return k
}
i--
}
return ""
}
func cumsum(p []float64) []float64 {
sums := make([]float64, len(p))
sum := 0.0
for i, p := range p {
sum += p
sums[i] = sum
}
return sums
} | polo/polo.go | 0.771241 | 0.478773 | polo.go | starcoder |
package main
import (
"errors"
"fmt"
"os"
)
const (
rows, columns = 9, 9
empty = 0
)
// Cell is a square on the Sudoku grid.
type Cell struct {
digit int8
fixed bool
}
// Grid is a Sudoku grid.
type Grid [rows][columns]Cell
// Errors that could occur.
var (
ErrBounds = errors.New("out of bounds")
ErrDigit = errors.New("invalid digit")
ErrInRow = errors.New("digit already present in this row")
ErrInColumn = errors.New("digit already present in this column")
ErrInRegion = errors.New("digit already present in this region")
ErrFixedDigit = errors.New("initial digits cannot be overwritten")
)
// NewSudoku makes a new Sudoku grid.
func NewSudoku(digits [rows][columns]int8) *Grid {
var grid Grid
for r := 0; r < rows; r++ {
for c := 0; c < columns; c++ {
d := digits[r][c]
if d != empty {
grid[r][c].digit = d
grid[r][c].fixed = true
}
}
}
return &grid
}
// Set a digit on a Sudoku grid.
func (g *Grid) Set(row, column int, digit int8) error {
switch {
case !inBounds(row, column):
return ErrBounds
case !validDigit(digit):
return ErrDigit
case g.isFixed(row, column):
return ErrFixedDigit
case g.inRow(row, digit):
return ErrInRow
case g.inColumn(column, digit):
return ErrInColumn
case g.inRegion(row, column, digit):
return ErrInRegion
}
g[row][column].digit = digit
return nil
}
// Clear a cell from the Sudoku grid.
func (g *Grid) Clear(row, column int) error {
switch {
case !inBounds(row, column):
return ErrBounds
case g.isFixed(row, column):
return ErrFixedDigit
}
g[row][column].digit = empty
return nil
}
func inBounds(row, column int) bool {
if row < 0 || row >= rows || column < 0 || column >= columns {
return false
}
return true
}
func validDigit(digit int8) bool {
return digit >= 1 && digit <= 9
}
func (g *Grid) inRow(row int, digit int8) bool {
for c := 0; c < columns; c++ {
if g[row][c].digit == digit {
return true
}
}
return false
}
func (g *Grid) inColumn(column int, digit int8) bool {
for r := 0; r < rows; r++ {
if g[r][column].digit == digit {
return true
}
}
return false
}
func (g *Grid) inRegion(row, column int, digit int8) bool {
startRow, startColumn := row/3*3, column/3*3
for r := startRow; r < startRow+3; r++ {
for c := startColumn; c < startColumn+3; c++ {
if g[r][c].digit == digit {
return true
}
}
}
return false
}
func (g *Grid) isFixed(row, column int) bool {
return g[row][column].fixed
}
func main() {
s := NewSudoku([rows][columns]int8{
{5, 3, 0, 0, 7, 0, 0, 0, 0},
{6, 0, 0, 1, 9, 5, 0, 0, 0},
{0, 9, 8, 0, 0, 0, 0, 6, 0},
{8, 0, 0, 0, 6, 0, 0, 0, 3},
{4, 0, 0, 8, 0, 3, 0, 0, 1},
{7, 0, 0, 0, 2, 0, 0, 0, 6},
{0, 6, 0, 0, 0, 0, 2, 8, 0},
{0, 0, 0, 4, 1, 9, 0, 0, 5},
{0, 0, 0, 0, 8, 0, 0, 7, 9},
})
err := s.Set(1, 1, 4)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
for _, row := range s {
fmt.Println(row)
}
} | solutions/capstone29/sudoku/sudoku.go | 0.572006 | 0.437824 | sudoku.go | starcoder |
//go:build go1.18
// +build go1.18
/*
Command govulncheck reports known vulnerabilities that affect Go code. It uses
static analysis or a binary's symbol table to narrow down reports to only those
that potentially affect the application. For more information about the API
behind govulncheck, see https://go.dev/security/vulncheck.
By default, govulncheck uses the Go vulnerability database at
https://vuln.go.dev. Set the GOVULNDB environment variable to specify a different database.
The database must follow the specification at https://go.dev/security/vulndb.
Govulncheck requires Go version 1.18 or higher to run.
WARNING: govulncheck is still EXPERIMENTAL and neither its output or the vulnerability
database should be relied on to be stable or comprehensive.
Usage
To analyze source code, run govulncheck from the module directory, using the
same package path syntax that the go command uses:
$ cd my-module
$ govulncheck ./...
If no vulnerabilities are found, govulncheck produces no output and exits with code 0.
If there are vulnerabilities, each is displayed briefly, with a summary of a call stack,
and govulncheck exits with code 3.
The call stack summary shows in brief how the package calls a vulnerable function.
For example, it might say
mypackage.main calls golang.org/x/text/language.Parse
For more detailed call path that resemble Go panic stack traces, use the -v flag.
To control which files are processed, use the -tags flag to provide a
comma-separate list of build tags, and the -tests flag to indicate that test
files should be included.
To run govulncheck on a compiled binary, pass it the path to the binary file:
$ govulncheck $HOME/go/bin/my-go-program
Govulncheck uses the binary's symbol information to find mentions of vulnerable functions.
Its output and exit codes are as described above, except that without source it cannot
produce call stacks.
Other Modes
A few flags control govulncheck's output. Regardless of output, govulncheck
exits with code 0 if there are no vulnerabilities and 3 if there are.
The -v flag outputs more information about call stacks when run on source. It has
no effect when run on a binary.
The -html flag outputs HTML instead of plain text.
The -json flag outputs a JSON object with vulnerability information. The output
corresponds to the type golang.org/x/vuln/vulncheck.Result.
Weaknesses
Govulncheck uses static analysis, which is inherently imprecise. If govulncheck
identifies a sequence of calls in your program that leads to a vulnerable
function, that path may never be executed because of conditions in the code, or
it may call the vulnerable function with harmless input.
The call graph analysis that govulncheck performs cannot find calls that use
Go's reflect or unsafe packages. It is possible for govulncheck to miss
vulnerabilities in programs that call functions in these unusual ways.
*/
package main | cmd/govulncheck/doc.go | 0.544801 | 0.568296 | doc.go | starcoder |
package output
import (
"fmt"
"github.com/Jeffail/benthos/v3/internal/component/output"
"github.com/Jeffail/benthos/v3/internal/docs"
"github.com/Jeffail/benthos/v3/internal/interop"
"github.com/Jeffail/benthos/v3/lib/broker"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/types"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeFallback] = TypeSpec{
constructor: newFallback,
Version: "3.58.0",
Summary: `
Attempts to send each message to a child output, starting from the first output on the list. If an output attempt fails then the next output in the list is attempted, and so on.`,
Description: `
This pattern is useful for triggering events in the case where certain output targets have broken. For example, if you had an output type ` + "`http_client`" + ` but wished to reroute messages whenever the endpoint becomes unreachable you could use this pattern:
` + "```yaml" + `
output:
fallback:
- http_client:
url: http://foo:4195/post/might/become/unreachable
retries: 3
retry_period: 1s
- http_client:
url: http://bar:4196/somewhere/else
retries: 3
retry_period: 1s
processors:
- bloblang: 'root = "failed to send this message to foo: " + content()'
- file:
path: /usr/local/benthos/everything_failed.jsonl
` + "```" + `
### Batching
When an output within a fallback sequence uses batching, like so:
` + "```yaml" + `
output:
fallback:
- aws_dynamodb:
table: foo
string_columns:
id: ${!json("id")}
content: ${!content()}
batching:
count: 10
period: 1s
- file:
path: /usr/local/benthos/failed_stuff.jsonl
` + "```" + `
Benthos makes a best attempt at inferring which specific messages of the batch failed, and only propagates those individual messages to the next fallback tier.
However, depending on the output and the error returned it is sometimes not possible to determine the individual messages that failed, in which case the whole batch is passed to the next tier in order to preserve at-least-once delivery guarantees.`,
Categories: []Category{
CategoryUtility,
},
config: docs.FieldComponent().Array().HasType(docs.FieldTypeOutput),
}
}
//------------------------------------------------------------------------------
func newFallback(
conf Config,
mgr types.Manager,
log log.Modular,
stats metrics.Type,
pipelines ...types.PipelineConstructorFunc,
) (Type, error) {
pipelines = AppendProcessorsFromConfig(conf, mgr, log, stats, pipelines...)
outputConfs := conf.Fallback
if len(outputConfs) == 0 {
return nil, ErrBrokerNoOutputs
}
outputs := make([]types.Output, len(outputConfs))
maxInFlight := 1
var err error
for i, oConf := range outputConfs {
oMgr, oLog, oStats := interop.LabelChild(fmt.Sprintf("fallback.%v", i), mgr, log, stats)
oStats = metrics.Combine(stats, oStats)
if outputs[i], err = New(oConf, oMgr, oLog, oStats); err != nil {
return nil, fmt.Errorf("failed to create output '%v' type '%v': %v", i, oConf.Type, err)
}
if mif, ok := output.GetMaxInFlight(outputs[i]); ok && mif > maxInFlight {
maxInFlight = mif
}
}
if maxInFlight <= 1 {
maxInFlight = 50
}
var t *broker.Try
if t, err = broker.NewTry(outputs, stats); err != nil {
return nil, err
}
t.WithMaxInFlight(maxInFlight)
t.WithOutputMetricsPrefix("fallback.outputs")
return WrapWithPipelines(t, pipelines...)
}
//------------------------------------------------------------------------------ | lib/output/fallback.go | 0.687945 | 0.746162 | fallback.go | starcoder |
package checkup
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"strconv"
"strings"
"time"
"github.com/sourcegraph/checkup/utils"
)
/*
```
Summary: Get node information.
https://docs.binance.org/api-reference/node-rpc.html#node-rpc
URL for mainnet: http://dataseed1.binance.org:80/status
URL for testnet: http://data-seed-pre-0-s1.binance.org:80/status
latest_block_height
{
"jsonrpc": "2.0",
"id": "",
"result": {
"node_info": {
"protocol_version": {
"p2p": "7",
"block": "10",
"app": "0"
},
"id": "782303c9060d46211225662fdd1dd411c638263a",
"listen_addr": "172.16.17.32:27146",
"network": "Binance-Chain-Tigris",
"version": "0.30.1",
"channels": "354020212223303800",
"moniker": "data-seed-0",
"other": {
"tx_index": "on",
"rpc_address": "tcp://0.0.0.0:27147"
}
},
"sync_info": {
"latest_block_hash": "FF42CE48AC5987F7CD4A051B757A1B58B066081A2DDC006AA8F168CD5045C835",
"latest_app_hash": "D7C80CE18D1D1D5103CFA3221DF5C51EE8D3F5949DA3E943E782B7B227123D96",
"latest_block_height": "12766888",
"latest_block_time": "2019-06-13T06:37:04.78651439Z",
"catching_up": false
},
"validator_info": {
"address": "A88BAB486162E44380AA456DFA7C1DCD997985D9",
"pub_key": {
"type": "tendermint/PubKeyEd25519",
"value": "<KEY>
},
"voting_power": "0"
}
}
}
```
*/
type BNBStatus struct {
StatusRPCVer string `json:"jsonrpc`
StatusResult StatusResultStruct `json:"result"`
// TODO: Transactions
}
/*
{
"node_info": {
"protocol_version": {
"p2p": 7,
"block": 10,
"app": 0
},
"id": "f52252fcda9c161c0089d971c9f1b941a26023ef",
"listen_addr": "10.211.33.206:27146",
"network": "Binance-Chain-Tigris",
"version": "0.30.1",
"channels": "3540202122233038",
"moniker": "Everest",
"other": {
"tx_index": "on",
"rpc_address": "tcp://0.0.0.0:27147"
}
},
"sync_info": {
"latest_block_hash": "0A82DF62E127C346DF3227A2E11B880A05FC4BEE3D1D97A24577B3506CCF9FD7",
"latest_app_hash": "C427816633C9B631B8059A3391B94F3ADEDD716DE6E682325B55AB7A929D47A2",
"latest_block_height": 16723750,
"latest_block_time": "2019-06-30T03:27:11.31300562Z",
"catching_up": false
},*/
type BNBDEXAPIStatus struct {
SyncInfoData SyncInfoStruct `json:"sync_info"`
// TODO: Transactions
}
type StatusResultStruct struct {
SyncInfoData SyncInfoStruct `json:"sync_info"`
}
type SyncInfoStruct struct {
SyncInfo_latest_block_hash string `json:"latest_block_hash"`
SyncInfo_latest_app_hash string `json:"latest_app_hash"`
SyncInfo_latest_block_height string `json:"latest_block_height"`
SyncInfo_latest_block_time string `json:"latest_block_time"`
SyncInfo_catching_up bool `json:"catching_up"`
}
// GetBlockHeight return the block height
func (bc *BNBStatus) GetLatestBlockHeight() string {
return bc.StatusResult.SyncInfoData.SyncInfo_latest_block_height
}
// GetTimestamp return the block timestamp
func (bc *BNBStatus) GetLatestBlockTimestamp() string {
return bc.StatusResult.SyncInfoData.SyncInfo_latest_block_time
}
// GetBlockHashString return the hex encoded string of the block hash
func (bc *BNBStatus) GetLatestBlockHashString() string {
return bc.StatusResult.SyncInfoData.SyncInfo_latest_block_hash
}
// BNCChecker implements a Checker for Binance chain endpoints.
type BNCChecker struct {
// Name is the name of the endpoint.
Name string `json:"endpoint_name"`
// URL is the URL of the endpoint.
URL string `json:"endpoint_url"`
// User is the user name
User string `json:"user"`
// Password is the password
Password string `json:"password"`
// BlockHeightBehind is the threshold of the current block height behind etherscan
BlockHeightBehind uint32 `json:"blockHeightBehind"`
// URL is the URL of the endpoint.
ReferURL string `json:"refer_url,omitempty"`
// UpStatus is the HTTP status code expected by
// a healthy endpoint. Default is http.StatusOK.
UpStatus int `json:"up_status,omitempty"`
// ThresholdRTT is the maximum round trip time to
// allow for a healthy endpoint. If non-zero and a
// request takes longer than ThresholdRTT, the
// endpoint will be considered unhealthy. Note that
// this duration includes any in-between network
// latency.
ThresholdRTT time.Duration `json:"threshold_rtt,omitempty"`
// MustContain is a string that the response body
// must contain in order to be considered up.
// NOTE: If set, the entire response body will
// be consumed, which has the potential of using
// lots of memory and slowing down checks if the
// response body is large.
MustContain string `json:"must_contain,omitempty"`
// MustNotContain is a string that the response
// body must NOT contain in order to be considered
// up. If both MustContain and MustNotContain are
// set, they are and-ed together. NOTE: If set,
// the entire response body will be consumed, which
// has the potential of using lots of memory and
// slowing down checks if the response body is large.
MustNotContain string `json:"must_not_contain,omitempty"`
// Attempts is how many requests the client will
// make to the endpoint in a single check.
Attempts int `json:"attempts,omitempty"`
// AttemptSpacing spaces out each attempt in a check
// by this duration to avoid hitting a remote too
// quickly in succession. By default, no waiting
// occurs between attempts.
AttemptSpacing time.Duration `json:"attempt_spacing,omitempty"`
// Client is the http.Client with which to make
// requests. If not set, DefaultHTTPClient is
// used.
Client *http.Client `json:"-"`
// Headers contains headers to added to the request
// that is sent for the check
Headers http.Header `json:"headers,omitempty"`
}
// Check performs checks using c according to its configuration.
// An error is only returned if there is a configuration error.
func (c BNCChecker) Check() (Result, error) {
if c.Attempts < 1 {
c.Attempts = 1
}
if c.Client == nil {
c.Client = DefaultHTTPClient
}
if c.UpStatus == 0 {
c.UpStatus = http.StatusOK
}
result := Result{Title: c.Name, Endpoint: c.URL, Timestamp: Timestamp()}
req, err := http.NewRequest("POST", c.URL, bytes.NewBuffer([]byte("")))
req.Header.Set("Content-Type", "application/json")
if c.User != "" && c.Password != "" {
req.SetBasicAuth(c.User, c.Password)
}
if err != nil {
return result, err
}
if c.Headers != nil {
for key, header := range c.Headers {
req.Header.Add(key, strings.Join(header, ", "))
}
}
result.Times = c.doChecks(req)
return c.conclude(result), nil
}
// doChecks executes req using c.Client and returns each attempt.
func (c BNCChecker) doChecks(req *http.Request) Attempts {
checks := make(Attempts, c.Attempts)
for i := 0; i < c.Attempts; i++ {
start := time.Now()
resp, err := c.Client.Do(req)
checks[i].RTT = time.Since(start)
if err != nil {
checks[i].Error = err.Error()
continue
}
err = c.checkDown(resp)
if err != nil {
checks[i].Error = err.Error()
}
resp.Body.Close()
if c.AttemptSpacing > 0 {
time.Sleep(c.AttemptSpacing)
}
}
return checks
}
// conclude takes the data in result from the attempts and
// computes remaining values needed to fill out the result.
// It detects degraded (high-latency) responses and makes
// the conclusion about the result's status.
func (c BNCChecker) conclude(result Result) Result {
result.ThresholdRTT = c.ThresholdRTT
// Check errors (down)
for i := range result.Times {
if result.Times[i].Error != "" {
result.Down = true
result.Message = result.Times[i].Error
return result
}
}
// Check round trip time (degraded)
if c.ThresholdRTT > 0 {
stats := result.ComputeStats()
if stats.Median > c.ThresholdRTT {
result.Notice = fmt.Sprintf("median round trip time exceeded threshold (%s)", c.ThresholdRTT)
result.Degraded = true
return result
}
}
result.Healthy = true
return result
}
// checkDown checks whether the endpoint is down based on resp and
// the configuration of c. It returns a non-nil error if down.
// Note that it does not check for degraded response.
func (c BNCChecker) checkDown(resp *http.Response) error {
// Check status code
if resp.StatusCode != c.UpStatus {
return fmt.Errorf("response status %s", resp.Status)
}
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("reading response body: %v", err)
}
// parse blockheight
var checkresult = new(BNBStatus)
err = json.Unmarshal(bodyBytes, &checkresult)
if err != nil {
return fmt.Errorf("Unmarshal response body: %v", err)
}
var lastBNBBlockNum int64
if c.ReferURL == "" {
c.ReferURL = "https://dex.binance.org/api/v1/node-info"
log.Printf("use default, set c.ReferURL:%s\n", c.ReferURL)
}
if c.ReferURL == "http://dataseed1.binance.org:80/status" {
lastBNBBlockNum, err = c.getHeightDataseed1()
} else if c.ReferURL == "https://dex.binance.org/api/v1/node-info" {
lastBNBBlockNum, err = c.getHeightDEXAPI()
} else {
return fmt.Errorf("unsupported c.ReferURL:%s", c.ReferURL)
}
lastBNBCheckBlockNum, _ := strconv.ParseInt(checkresult.GetLatestBlockHeight(), 10, 64)
blockDiff := lastBNBBlockNum - lastBNBCheckBlockNum
log.Printf("BNC(%s) %s BlockHeight:%d, check url:%s last block BlockHeight:%d\n", c.Name, c.ReferURL, lastBNBBlockNum, c.URL, lastBNBCheckBlockNum)
if (lastBNBBlockNum > lastBNBCheckBlockNum) && (blockDiff > int64(c.BlockHeightBehind)) {
return fmt.Errorf("blockheight(%d) was behind BNC %s (%d) > %d blocks, threshold(%d)", lastBNBCheckBlockNum, c.ReferURL, lastBNBBlockNum, blockDiff, c.BlockHeightBehind)
}
return nil
}
func (c BNCChecker) getHeightDataseed1() (int64, error) {
var responsebody []byte
url := "http://dataseed1.binance.org:80/status"
if c.ReferURL != "" {
log.Printf("BNC(%s) height ReferURL:%s\n", c.Name, c.ReferURL)
url = c.ReferURL
}
responsebodyStr := utils.GlobalCacheGetString(url)
if responsebodyStr == "" {
client := &http.Client{}
req, err := http.NewRequest("POST", url, bytes.NewBuffer([]byte("")))
req.Header.Set("Content-Type", "application/json")
res, err := client.Do(req)
if err != nil {
return 0, err
}
defer res.Body.Close()
responsebody, err = ioutil.ReadAll(res.Body)
if err != nil {
return 0, err
}
str := string(responsebody[:])
utils.GlobalCacheSetString(url, str)
} else {
log.Printf("BNC(%s) use cache key: %s", c.Name, url)
responsebody = []byte(responsebodyStr)
}
var bnbresult = new(BNBStatus)
err := json.Unmarshal(responsebody, &bnbresult)
if err != nil {
return 0, fmt.Errorf("url:%s Unmarshal response body: %v", url, err)
}
lastBNBBlockNum, err := strconv.ParseInt(bnbresult.GetLatestBlockHeight(), 10, 64)
return lastBNBBlockNum, err
}
func (c BNCChecker) getHeightDEXAPI() (int64, error) {
var responsebody []byte
url := "https://dex.binance.org/api/v1/node-info"
if c.ReferURL != "" {
log.Printf("BNC(%s) height ReferURL:%s\n", c.Name, c.ReferURL)
url = c.ReferURL
}
responsebodyStr := utils.GlobalCacheGetString(url)
if responsebodyStr == "" {
client := &http.Client{}
req, err := http.NewRequest("POST", url, bytes.NewBuffer([]byte("")))
req.Header.Set("Content-Type", "application/json")
res, err := client.Do(req)
if err != nil {
return 0, err
}
defer res.Body.Close()
responsebody, err = ioutil.ReadAll(res.Body)
if err != nil {
return 0, err
}
str := string(responsebody[:])
utils.GlobalCacheSetString(url, str)
} else {
log.Printf("BNC(%s) use cache key: %s", c.Name, url)
responsebody = []byte(responsebodyStr)
}
var bnbresult = new(BNBDEXAPIStatus)
err := json.Unmarshal(responsebody, &bnbresult)
if err != nil {
return 0, fmt.Errorf("url:%s Unmarshal response body: %v", url, err)
}
lastBNBBlockNum, err := strconv.ParseInt(bnbresult.SyncInfoData.SyncInfo_latest_block_height, 10, 64)
return lastBNBBlockNum, err
} | bncchecker.go | 0.686055 | 0.440409 | bncchecker.go | starcoder |
package money
import (
"errors"
)
// Amount is a datastructure that stores the amount being used for calculations.
type Amount struct {
val int64
}
// Money represents monetary value information, stores
// currency and amount value.
type Money struct {
amount *Amount
currency *Currency
}
// New creates and returns new instance of Money.
func New(amount int64, code string) *Money {
return &Money{
amount: &Amount{val: amount},
currency: newCurrency(code).get(),
}
}
// Currency returns the currency used by Money.
func (m *Money) Currency() *Currency {
return m.currency
}
// Amount returns a copy of the internal monetary value as an int64.
func (m *Money) Amount() int64 {
return m.amount.val
}
// SameCurrency check if given Money is equals by currency.
func (m *Money) SameCurrency(om *Money) bool {
return m.currency.equals(om.currency)
}
func (m *Money) assertSameCurrency(om *Money) error {
if !m.SameCurrency(om) {
return errors.New("currencies don't match")
}
return nil
}
func (m *Money) compare(om *Money) int {
switch {
case m.amount.val > om.amount.val:
return 1
case m.amount.val < om.amount.val:
return -1
}
return 0
}
// Equals checks equality between two Money types.
func (m *Money) Equals(om *Money) (bool, error) {
if err := m.assertSameCurrency(om); err != nil {
return false, err
}
return m.compare(om) == 0, nil
}
// GreaterThan checks whether the value of Money is greater than the other.
func (m *Money) GreaterThan(om *Money) (bool, error) {
if err := m.assertSameCurrency(om); err != nil {
return false, err
}
return m.compare(om) == 1, nil
}
// GreaterThanOrEqual checks whether the value of Money is greater or equal than the other.
func (m *Money) GreaterThanOrEqual(om *Money) (bool, error) {
if err := m.assertSameCurrency(om); err != nil {
return false, err
}
return m.compare(om) >= 0, nil
}
// LessThan checks whether the value of Money is less than the other.
func (m *Money) LessThan(om *Money) (bool, error) {
if err := m.assertSameCurrency(om); err != nil {
return false, err
}
return m.compare(om) == -1, nil
}
// LessThanOrEqual checks whether the value of Money is less or equal than the other.
func (m *Money) LessThanOrEqual(om *Money) (bool, error) {
if err := m.assertSameCurrency(om); err != nil {
return false, err
}
return m.compare(om) <= 0, nil
}
// IsZero returns boolean of whether the value of Money is equals to zero.
func (m *Money) IsZero() bool {
return m.amount.val == 0
}
// IsPositive returns boolean of whether the value of Money is positive.
func (m *Money) IsPositive() bool {
return m.amount.val > 0
}
// IsNegative returns boolean of whether the value of Money is negative.
func (m *Money) IsNegative() bool {
return m.amount.val < 0
}
// Absolute returns new Money struct from given Money using absolute monetary value.
func (m *Money) Absolute() *Money {
return &Money{amount: mutate.calc.absolute(m.amount), currency: m.currency}
}
// Negative returns new Money struct from given Money using negative monetary value.
func (m *Money) Negative() *Money {
return &Money{amount: mutate.calc.negative(m.amount), currency: m.currency}
}
// Add returns new Money struct with value representing sum of Self and Other Money.
func (m *Money) Add(om *Money) (*Money, error) {
if err := m.assertSameCurrency(om); err != nil {
return nil, err
}
return &Money{amount: mutate.calc.add(m.amount, om.amount), currency: m.currency}, nil
}
// Subtract returns new Money struct with value representing difference of Self and Other Money.
func (m *Money) Subtract(om *Money) (*Money, error) {
if err := m.assertSameCurrency(om); err != nil {
return nil, err
}
return &Money{amount: mutate.calc.subtract(m.amount, om.amount), currency: m.currency}, nil
}
// Multiply returns new Money struct with value representing Self multiplied value by multiplier.
func (m *Money) Multiply(mul int64) *Money {
return &Money{amount: mutate.calc.multiply(m.amount, mul), currency: m.currency}
}
// Divide returns new Money struct with value representing Self division value by given divider.
func (m *Money) Divide(div int64) *Money {
return &Money{amount: mutate.calc.divide(m.amount, div), currency: m.currency}
}
// Round returns new Money struct with value rounded to nearest zero.
func (m *Money) Round() *Money {
return &Money{amount: mutate.calc.round(m.amount, m.currency.Fraction), currency: m.currency}
}
// Split returns slice of Money structs with split Self value in given number.
// After division leftover pennies will be distributed round-robin amongst the parties.
// This means that parties listed first will likely receive more pennies than ones that are listed later.
func (m *Money) Split(n int) ([]*Money, error) {
if n <= 0 {
return nil, errors.New("split must be higher than zero")
}
a := mutate.calc.divide(m.amount, int64(n))
ms := make([]*Money, n)
for i := 0; i < n; i++ {
ms[i] = &Money{amount: a, currency: m.currency}
}
l := mutate.calc.modulus(m.amount, int64(n)).val
// Add leftovers to the first parties.
for p := 0; l != 0; p++ {
ms[p].amount = mutate.calc.add(ms[p].amount, &Amount{1})
l--
}
return ms, nil
}
// Allocate returns slice of Money structs with split Self value in given ratios.
// It lets split money by given ratios without losing pennies and as Split operations distributes
// leftover pennies amongst the parties with round-robin principle.
func (m *Money) Allocate(rs ...int) ([]*Money, error) {
if len(rs) == 0 {
return nil, errors.New("no ratios specified")
}
// Calculate sum of ratios.
var sum int
for _, r := range rs {
sum += r
}
var total int64
var ms []*Money
for _, r := range rs {
party := &Money{
amount: mutate.calc.allocate(m.amount, r, sum),
currency: m.currency,
}
ms = append(ms, party)
total += party.amount.val
}
// Calculate leftover value and divide to first parties.
lo := m.amount.val - total
sub := int64(1)
if lo < 0 {
sub = -sub
}
for p := 0; lo != 0; p++ {
ms[p].amount = mutate.calc.add(ms[p].amount, &Amount{sub})
lo -= sub
}
return ms, nil
}
// Display lets represent Money struct as string in given Currency value.
func (m *Money) Display() string {
c := m.currency.get()
return c.Formatter().Format(m.amount.val)
}
// Display lets represent Money struct as string in given Currency value.
func (m *Money) ToWords() string {
c := m.currency.get()
return GetCurrencyAmountWords(float64(m.Amount()), c.Code)
} | money.go | 0.918535 | 0.640762 | money.go | starcoder |
package usecase
import (
"context"
"strings"
"github.com/orvosi/api/entity"
)
// CreateMedicalRecord defines the business logic
// to create a medical record.
type CreateMedicalRecord interface {
// Create creates a new medical record.
Create(ctx context.Context, record *entity.MedicalRecord) *entity.Error
}
// InsertMedicalRecordRepository defines the business logic
// to insert a medical record into a repository.
type InsertMedicalRecordRepository interface {
// Insert inserts the medical record into the repository.
// This operation MUST set the inserted ID back to the medical record object.
Insert(ctx context.Context, record *entity.MedicalRecord) *entity.Error
}
// MedicalRecordCreator responsibles for medical record creation workflow.
type MedicalRecordCreator struct {
repo InsertMedicalRecordRepository
}
// NewMedicalRecordCreator creates an instance of MedicalRecordCreator.
func NewMedicalRecordCreator(repo InsertMedicalRecordRepository) *MedicalRecordCreator {
return &MedicalRecordCreator{
repo: repo,
}
}
// Create creates a new medical record and persist it into a repository
func (mrc *MedicalRecordCreator) Create(ctx context.Context, record *entity.MedicalRecord) *entity.Error {
if err := validateMedicalRecord(record); err != nil {
return err
}
return mrc.repo.Insert(ctx, record)
}
func validateMedicalRecord(record *entity.MedicalRecord) *entity.Error {
if record == nil {
return entity.ErrEmptyMedicalRecord
}
sanitizeMedicalRecord(record)
if !isMedicalRecordAttributesValid(record) {
return entity.ErrInvalidMedicalRecordAttribute
}
return nil
}
func sanitizeMedicalRecord(record *entity.MedicalRecord) {
record.Symptom = strings.TrimSpace(record.Symptom)
record.Diagnosis = strings.TrimSpace(record.Diagnosis)
record.Therapy = strings.TrimSpace(record.Therapy)
}
func isMedicalRecordAttributesValid(record *entity.MedicalRecord) bool {
return record.Symptom != "" &&
record.Diagnosis != "" &&
record.Therapy != "" &&
record.User != nil
} | usecase/medical_record_creator.go | 0.609757 | 0.473536 | medical_record_creator.go | starcoder |
package main
import (
"net/http"
"time"
chart "github.com/regorov/go-chart"
)
func drawChart(res http.ResponseWriter, req *http.Request) {
/*
This is an example of using the `TimeSeries` to automatically coerce time.Time values into a continuous xrange.
Note: chart.TimeSeries implements `ValueFormatterProvider` and as a result gives the XAxis the appropriate formatter to use for the ticks.
*/
graph := chart.Chart{
Series: []chart.Series{
chart.TimeSeries{
XValues: []time.Time{
time.Now().AddDate(0, 0, -10),
time.Now().AddDate(0, 0, -9),
time.Now().AddDate(0, 0, -8),
time.Now().AddDate(0, 0, -7),
time.Now().AddDate(0, 0, -6),
time.Now().AddDate(0, 0, -5),
time.Now().AddDate(0, 0, -4),
time.Now().AddDate(0, 0, -3),
time.Now().AddDate(0, 0, -2),
time.Now().AddDate(0, 0, -1),
time.Now(),
},
YValues: []float64{1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0},
},
},
}
res.Header().Set("Content-Type", "image/png")
graph.Render(chart.PNG, res)
}
func drawCustomChart(res http.ResponseWriter, req *http.Request) {
/*
This is basically the other timeseries example, except we switch to hour intervals and specify a different formatter from default for the xaxis tick labels.
*/
graph := chart.Chart{
XAxis: chart.XAxis{
ValueFormatter: chart.TimeHourValueFormatter,
},
Series: []chart.Series{
chart.TimeSeries{
XValues: []time.Time{
time.Now().Add(-10 * time.Hour),
time.Now().Add(-9 * time.Hour),
time.Now().Add(-8 * time.Hour),
time.Now().Add(-7 * time.Hour),
time.Now().Add(-6 * time.Hour),
time.Now().Add(-5 * time.Hour),
time.Now().Add(-4 * time.Hour),
time.Now().Add(-3 * time.Hour),
time.Now().Add(-2 * time.Hour),
time.Now().Add(-1 * time.Hour),
time.Now(),
},
YValues: []float64{1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0},
},
},
}
res.Header().Set("Content-Type", "image/png")
graph.Render(chart.PNG, res)
}
func main() {
http.HandleFunc("/", drawChart)
http.HandleFunc("/favicon.ico", func(res http.ResponseWriter, req *http.Request) {
res.Write([]byte{})
})
http.HandleFunc("/custom", drawCustomChart)
http.ListenAndServe(":8080", nil)
} | examples/timeseries/main.go | 0.705684 | 0.430147 | main.go | starcoder |
package geojson
import "github.com/tidwall/tile38/geojson/geohash"
// MultiPolygon is a geojson object with the type "MultiPolygon"
type MultiPolygon struct {
Coordinates [][][]Position
BBox *BBox
}
func fillMultiPolygon(coordinates [][][]Position, bbox *BBox, err error) (MultiPolygon, error) {
if err == nil {
outer:
for _, ps := range coordinates {
if len(ps) == 0 {
err = errMustBeALinearRing
break
}
for _, ps := range ps {
if !isLinearRing(ps) {
err = errMustBeALinearRing
break outer
}
}
}
}
return MultiPolygon{
Coordinates: coordinates,
BBox: bbox,
}, err
}
// CalculatedBBox is exterior bbox containing the object.
func (g MultiPolygon) CalculatedBBox() BBox {
return level4CalculatedBBox(g.Coordinates, g.BBox)
}
// CalculatedPoint is a point representation of the object.
func (g MultiPolygon) CalculatedPoint() Position {
return g.CalculatedBBox().center()
}
// Geohash converts the object to a geohash value.
func (g MultiPolygon) Geohash(precision int) (string, error) {
p := g.CalculatedPoint()
return geohash.Encode(p.Y, p.X, precision)
}
// PositionCount return the number of coordinates.
func (g MultiPolygon) PositionCount() int {
return level4PositionCount(g.Coordinates, g.BBox)
}
// Weight returns the in-memory size of the object.
func (g MultiPolygon) Weight() int {
return level4Weight(g.Coordinates, g.BBox)
}
// MarshalJSON allows the object to be encoded in json.Marshal calls.
func (g MultiPolygon) MarshalJSON() ([]byte, error) {
return []byte(g.JSON()), nil
}
// JSON is the json representation of the object. This might not be exactly the same as the original.
func (g MultiPolygon) JSON() string {
return level4JSON("MultiPolygon", g.Coordinates, g.BBox)
}
// String returns a string representation of the object. This might be JSON or something else.
func (g MultiPolygon) String() string {
return g.JSON()
}
func (g MultiPolygon) bboxPtr() *BBox {
return g.BBox
}
func (g MultiPolygon) hasPositions() bool {
if g.BBox != nil {
return true
}
for _, c := range g.Coordinates {
for _, c := range c {
if len(c) > 0 {
return true
}
}
}
return false
}
// WithinBBox detects if the object is fully contained inside a bbox.
func (g MultiPolygon) WithinBBox(bbox BBox) bool {
if g.BBox != nil {
return rectBBox(g.CalculatedBBox()).InsideRect(rectBBox(bbox))
}
if len(g.Coordinates) == 0 {
return false
}
for _, p := range g.Coordinates {
if !(Polygon{Coordinates: p}).WithinBBox(bbox) {
return false
}
}
return true
}
// IntersectsBBox detects if the object intersects a bbox.
func (g MultiPolygon) IntersectsBBox(bbox BBox) bool {
if g.BBox != nil {
return rectBBox(g.CalculatedBBox()).IntersectsRect(rectBBox(bbox))
}
for _, p := range g.Coordinates {
if (Polygon{Coordinates: p}).IntersectsBBox(bbox) {
return true
}
}
return false
}
// Within detects if the object is fully contained inside another object.
func (g MultiPolygon) Within(o Object) bool {
return withinObjectShared(g, o,
func(v Polygon) bool {
if len(g.Coordinates) == 0 {
return false
}
for _, p := range g.Coordinates {
if len(p) > 0 {
if !polyPositions(p[0]).Inside(polyExteriorHoles(v.Coordinates)) {
return false
}
}
}
return true
},
func(v MultiPolygon) bool {
if len(g.Coordinates) == 0 {
return false
}
for _, p := range g.Coordinates {
if len(p) > 0 {
for _, c := range v.Coordinates {
if !polyPositions(p[0]).Inside(polyExteriorHoles(c)) {
return false
}
}
}
}
return true
},
)
}
// Intersects detects if the object intersects another object.
func (g MultiPolygon) Intersects(o Object) bool {
return intersectsObjectShared(g, o,
func(v Polygon) bool {
if len(g.Coordinates) == 0 {
return false
}
for _, p := range g.Coordinates {
if len(p) > 0 {
if polyPositions(p[0]).Intersects(polyExteriorHoles(v.Coordinates)) {
return true
}
}
}
return false
},
func(v MultiPolygon) bool {
if len(g.Coordinates) == 0 {
return false
}
for _, p := range g.Coordinates {
if len(p) > 0 {
for _, c := range v.Coordinates {
if polyPositions(p[0]).Intersects(polyExteriorHoles(c)) {
return true
}
}
}
}
return false
},
)
}
// Nearby detects if the object is nearby a position.
func (g MultiPolygon) Nearby(center Position, meters float64) bool {
return nearbyObjectShared(g, center.X, center.Y, meters)
}
// IsBBoxDefined returns true if the object has a defined bbox.
func (g MultiPolygon) IsBBoxDefined() bool {
return g.BBox != nil
}
// IsGeometry return true if the object is a geojson geometry object. false if it something else.
func (g MultiPolygon) IsGeometry() bool {
return true
} | vendor/github.com/tidwall/tile38/geojson/multipolygon.go | 0.810854 | 0.584953 | multipolygon.go | starcoder |
package main
import "math"
func simpleGreyscale(iterations, iterationCap int, z, c complex) (R, G, B, A float64) {
col := float64(255*iterations) / float64(iterationCap)
return col, col, col, 255
}
func simpleGreyscaleShip(iterations, iterationCap int, z complex) (R, G, B, A float64) {
col := float64(255*iterations) / float64(iterationCap)
return col, col, col, 255
}
func whackyGrayscale(iterations, iterationCap int, z, c complex) (R, G, B, A float64) {
if iterations%2 == 0 {
return 0, 0, 0, 255
}
return 255, 255, 255, 255
}
func whackyGrayscaleShip(iterations, iterationCap int, z complex) (R, G, B, A float64) {
if iterations%2 == 0 {
return 0, 0, 0, 255
}
return 255, 255, 255, 255
}
func zGreyscale(iterations, iterationCap int, z, c complex) (R, G, B, A float64) {
col := 255.0 * (math.Mod(z.abs(), 2.0) / 2.0)
return col, col, col, 255
}
func smoothGreyscale(iterations, iterationCap int, z, c complex) (R, G, B, A float64) {
z = z.mul(z).add(c)
iterations++
z = z.mul(z).add(c)
iterations++
i := float64(iterations)
if iterations < iterationCap {
i = i - (math.Log(math.Log(z.abs())) / math.Log(2))
}
if int(math.Floor(i))%2 == 0 {
col := 255 * (math.Mod(i, 1))
return col, col, col, 255
}
col := 255 - (255 * math.Mod(i, 1))
return col, col, col, 255
}
func smoothColour(iterations, iterationCap int, z, c complex) (R, G, B, A float64) {
z = z.mul(z).add(c)
iterations++
z = z.mul(z).add(c)
iterations++
i := float64(iterations)
if iterations < iterationCap {
i = i - (math.Log(math.Log(z.abs())) / math.Log(2))
}
nu := math.Mod(i, 1)
switch {
case int(math.Floor(i))%3 == 0:
return 255 * nu, 255 * (1 - nu), 255, 255
case int(math.Floor(i))%3 == 1:
return 255, 255 * nu, 255 * (1 - nu), 255
case int(math.Floor(i))%3 == 2:
return 255 * (1 - nu), 255, 255 * nu, 255
}
return 0, 0, 0, 255
}
func smoothColour2(iterations, iterationCap int, z, c complex) (R, G, B, A float64) {
z = z.mul(z).add(c)
iterations++
z = z.mul(z).add(c)
iterations++
i := float64(iterations)
if iterations < iterationCap {
i = i - (math.Log(math.Log(z.abs())) / math.Log(2))
}
nu := math.Mod(i, 1)
switch {
case int(math.Floor(i))%3 == 0:
return 255 * (1 - nu), 255 * nu, 0, 255
case int(math.Floor(i))%3 == 1:
return 0, 255 * (1 - nu), 255 * nu, 255
case int(math.Floor(i))%3 == 2:
return 255 * nu, 0, 255 * (1 - nu), 255
}
return 0, 0, 0, 255
} | ColourFunctions.go | 0.764628 | 0.511412 | ColourFunctions.go | starcoder |
package memo
import (
plannercore "github.com/pingcap/tidb/planner/core"
)
// Operand is the node of a pattern tree, it represents a logical expression operator.
// Different from logical plan operator which holds the full information about an expression
// operator, Operand only stores the type information.
// An Operand may correspond to a concrete logical plan operator, or it can has special meaning,
// e.g, a placeholder for any logical plan operator.
type Operand int
const (
// OperandAny is a placeholder for any Operand.
OperandAny Operand = iota
// OperandJoin for LogicalJoin.
OperandJoin
// OperandAggregation for LogicalAggregation.
OperandAggregation
// OperandProjection for LogicalProjection.
OperandProjection
// OperandSelection for LogicalSelection.
OperandSelection
// OperandApply for LogicalApply.
OperandApply
// OperandMaxOneRow for LogicalMaxOneRow.
OperandMaxOneRow
// OperandTableDual for LogicalTableDual.
OperandTableDual
// OperandDataSource for DataSource.
OperandDataSource
// OperandUnionScan for LogicalUnionScan.
OperandUnionScan
// OperandUnionAll for LogicalUnionAll.
OperandUnionAll
// OperandSort for LogicalSort.
OperandSort
// OperandTopN for LogicalTopN.
OperandTopN
// OperandLock for LogicalLock.
OperandLock
// OperandLimit for LogicalLimit.
OperandLimit
// OperandTableGather for TableGather.
OperandTableGather
// OperandTableScan for TableScan.
OperandTableScan
// OperandUnsupported is upper bound of defined Operand yet.
OperandUnsupported
)
// GetOperand maps logical plan operator to Operand.
func GetOperand(p plannercore.LogicalPlan) Operand {
switch p.(type) {
case *plannercore.LogicalJoin:
return OperandJoin
case *plannercore.LogicalAggregation:
return OperandAggregation
case *plannercore.LogicalProjection:
return OperandProjection
case *plannercore.LogicalSelection:
return OperandSelection
case *plannercore.LogicalApply:
return OperandApply
case *plannercore.LogicalMaxOneRow:
return OperandMaxOneRow
case *plannercore.LogicalTableDual:
return OperandTableDual
case *plannercore.DataSource:
return OperandDataSource
case *plannercore.LogicalUnionScan:
return OperandUnionScan
case *plannercore.LogicalUnionAll:
return OperandUnionAll
case *plannercore.LogicalSort:
return OperandSort
case *plannercore.LogicalTopN:
return OperandTopN
case *plannercore.LogicalLock:
return OperandLock
case *plannercore.LogicalLimit:
return OperandLimit
case *plannercore.TableGather:
return OperandTableGather
case *plannercore.TableScan:
return OperandTableScan
default:
return OperandUnsupported
}
}
// Match checks if current Operand matches specified one.
func (o Operand) Match(t Operand) bool {
if o == OperandAny || t == OperandAny {
return true
}
if o == t {
return true
}
return false
}
// Pattern defines the Match pattern for a rule.
// It describes a piece of logical expression.
// It's a tree-like structure and each node in the tree is an Operand.
type Pattern struct {
Operand
Children []*Pattern
}
// NewPattern creats a pattern node according to the Operand.
func NewPattern(operand Operand) *Pattern {
return &Pattern{Operand: operand}
}
// SetChildren sets the Children information for a pattern node.
func (p *Pattern) SetChildren(children ...*Pattern) {
p.Children = children
}
// BuildPattern builds a Pattern from Operand and child Patterns.
// Used in GetPattern() of Transformation interface to generate a Pattern.
func BuildPattern(operand Operand, children ...*Pattern) *Pattern {
p := &Pattern{Operand: operand}
p.Children = children
return p
} | planner/memo/pattern.go | 0.698329 | 0.675576 | pattern.go | starcoder |
package processors
import (
"regexp"
"github.com/golangci/golangci-lint/pkg/result"
)
type replacePattern struct {
re string
repl string
}
type replaceRegexp struct {
re *regexp.Regexp
repl string
}
var replacePatterns = []replacePattern{
// unparam
{`^(\S+) - (\S+) is unused$`, "`${1}` - `${2}` is unused"},
{`^(\S+) - (\S+) always receives (\S+) \((.*)\)$`, "`${1}` - `${2}` always receives `${3}` (`${4}`)"},
{`^(\S+) - (\S+) always receives (.*)$`, "`${1}` - `${2}` always receives `${3}`"},
{`^(\S+) - result (\S+) is always (\S+)`, "`${1}` - result `${2}` is always `${3}`"},
// interfacer
{`^(\S+) can be (\S+)$`, "`${1}` can be `${2}`"},
// govet
{`^printf: (\S+) arg list ends with redundant newline$`, "printf: `${1}` arg list ends with redundant newline"},
{`^composites: (\S+) composite literal uses unkeyed fields$`, "composites: `${1}` composite literal uses unkeyed fields"},
// gosec
{`^(\S+): Blacklisted import (\S+): weak cryptographic primitive$`,
"${1}: Blacklisted import `${2}`: weak cryptographic primitive"},
{`^TLS InsecureSkipVerify set true.$`, "TLS `InsecureSkipVerify` set true."},
// gosimple
{`should replace loop with (.*)$`, "should replace loop with `${1}`"},
{`should use a simple channel send/receive instead of select with a single case`,
"should use a simple channel send/receive instead of `select` with a single case"},
{`should omit comparison to bool constant, can be simplified to (.+)$`,
"should omit comparison to bool constant, can be simplified to `${1}`"},
{`should write (.+) instead of (.+)$`, "should write `${1}` instead of `${2}`"},
{`redundant return statement$`, "redundant `return` statement"},
{`should replace this if statement with an unconditional strings.TrimPrefix`,
"should replace this `if` statement with an unconditional `strings.TrimPrefix`"},
// staticcheck
{`this value of (\S+) is never used$`, "this value of `${1}` is never used"},
{`should use time.Since instead of time.Now\(\).Sub$`,
"should use `time.Since` instead of `time.Now().Sub`"},
{`should check returned error before deferring response.Close\(\)$`,
"should check returned error before deferring `response.Close()`"},
{`no value of type uint is less than 0$`, "no value of type `uint` is less than `0`"},
// unused
{`(func|const|field|type|var) (\S+) is unused$`, "${1} `${2}` is unused"},
// typecheck
{`^unknown field (\S+) in struct literal$`, "unknown field `${1}` in struct literal"},
{`^invalid operation: (\S+) \(variable of type (\S+)\) has no field or method (\S+)$`,
"invalid operation: `${1}` (variable of type `${2}`) has no field or method `${3}`"},
{`^undeclared name: (\S+)$`, "undeclared name: `${1}`"},
{`^cannot use addr \(variable of type (\S+)\) as (\S+) value in argument to (\S+)$`,
"cannot use addr (variable of type `${1}`) as `${2}` value in argument to `${3}`"},
{`^other declaration of (\S+)$`, "other declaration of `${1}`"},
{`^(\S+) redeclared in this block$`, "`${1}` redeclared in this block"},
// golint
{`^exported (type|method|function|var|const) (\S+) should have comment or be unexported$`,
"exported ${1} `${2}` should have comment or be unexported"},
{`^comment on exported (type|method|function|var|const) (\S+) should be of the form "(\S+) ..."$`,
"comment on exported ${1} `${2}` should be of the form `${3} ...`"},
{`^should replace (.+) with (.+)$`, "should replace `${1}` with `${2}`"},
{`^if block ends with a return statement, so drop this else and outdent its block$`,
"`if` block ends with a `return` statement, so drop this `else` and outdent its block"},
{`^(struct field|var|range var|const|type|(?:func|method|interface method) (?:parameter|result)) (\S+) should be (\S+)$`,
"${1} `${2}` should be `${3}`"},
{`^don't use underscores in Go names; var (\S+) should be (\S+)$`,
"don't use underscores in Go names; var `${1}` should be `${2}`"},
}
type IdentifierMarker struct {
replaceRegexps []replaceRegexp
}
func NewIdentifierMarker() *IdentifierMarker {
var replaceRegexps []replaceRegexp
for _, p := range replacePatterns {
r := replaceRegexp{
re: regexp.MustCompile(p.re),
repl: p.repl,
}
replaceRegexps = append(replaceRegexps, r)
}
return &IdentifierMarker{
replaceRegexps: replaceRegexps,
}
}
func (im IdentifierMarker) Process(issues []result.Issue) ([]result.Issue, error) {
return transformIssues(issues, func(i *result.Issue) *result.Issue {
iCopy := *i
iCopy.Text = im.markIdentifiers(iCopy.Text)
return &iCopy
}), nil
}
func (im IdentifierMarker) markIdentifiers(s string) string {
for _, rr := range im.replaceRegexps {
rs := rr.re.ReplaceAllString(s, rr.repl)
if rs != s {
return rs
}
}
return s
}
func (im IdentifierMarker) Name() string {
return "identifier_marker"
}
func (im IdentifierMarker) Finish() {} | vendor/github.com/golangci/golangci-lint/pkg/result/processors/identifier_marker.go | 0.659953 | 0.634798 | identifier_marker.go | starcoder |
package trie
import (
"fmt"
"sort"
"go.skia.org/infra/go/util"
)
// Trie is a struct used for efficient searching on sets of strings.
type Trie struct {
root *trieNode
}
// New returns a Trie instance.
func New() *Trie {
return &Trie{
root: newTrieNode(),
}
}
func sorted(s []string) []string {
cpy := make([]string, len(s))
copy(cpy, s)
sort.Strings(cpy)
return cpy
}
// Insert inserts the data into the trie with the given string keys.
func (t *Trie) Insert(strs []string, data interface{}) {
t.root.Insert(sorted(strs), data)
}
// Delete removes the data from the trie.
func (t *Trie) Delete(strs []string, data interface{}) {
t.root.Delete(sorted(strs), data)
}
// Search returns all inserted data which exactly matches the given string keys.
func (t *Trie) Search(strs []string) []interface{} {
return t.root.Search(sorted(strs))
}
type searchContext struct {
count int
data [][]interface{}
}
// SearchSubset returns all inserted data which matches a subset of the given
// string keys.
func (t *Trie) SearchSubset(strs []string) []interface{} {
keys := make(map[string]bool, len(strs))
for _, k := range strs {
keys[k] = true
}
ctx := searchContext{
count: 0,
data: [][]interface{}{},
}
t.root.SearchSubset(keys, &ctx)
rv := make([]interface{}, ctx.count)
idx := 0
for _, d := range ctx.data {
copy(rv[idx:idx+len(d)], d)
idx += len(d)
}
return rv
}
// String returns a string representation of the Trie.
func (t *Trie) String() string {
return fmt.Sprintf("Trie(%s)", t.root.String(0))
}
func (t *Trie) Len() int {
return t.root.Len()
}
type trieNode struct {
children map[string]*trieNode
data []interface{}
}
func newTrieNode() *trieNode {
return &trieNode{
children: map[string]*trieNode{},
data: []interface{}{},
}
}
func (n *trieNode) Insert(strs []string, data interface{}) {
if len(strs) == 0 {
n.data = append(n.data, data)
} else {
child, ok := n.children[strs[0]]
if !ok {
child = newTrieNode()
n.children[strs[0]] = child
}
child.Insert(strs[1:], data)
}
}
func (n *trieNode) Delete(strs []string, data interface{}) {
if len(strs) == 0 {
idx := -1
for i, v := range n.data {
if v == data {
idx = i
}
}
if idx == -1 {
return
}
n.data = append(n.data[:idx], n.data[idx+1:]...)
} else if child, ok := n.children[strs[0]]; ok {
child.Delete(strs[1:], data)
}
}
func (n *trieNode) Search(strs []string) []interface{} {
if len(strs) == 0 {
return n.data
} else {
if child, ok := n.children[strs[0]]; ok {
return child.Search(strs[1:])
} else {
return []interface{}{}
}
}
}
func (n *trieNode) SearchSubset(strs map[string]bool, ctx *searchContext) {
ctx.count += len(n.data)
ctx.data = append(ctx.data, n.data)
for k, c := range n.children {
if strs[k] {
c.SearchSubset(strs, ctx)
}
}
}
func (n *trieNode) String(indent int) string {
rv := fmt.Sprintf("Node(%v, {", n.data)
if len(n.children) == 0 {
return rv + "})"
}
rv += "\n"
childKeys := make([]string, 0, len(n.children))
for k := range n.children {
childKeys = append(childKeys, k)
}
sort.Strings(childKeys)
for _, k := range childKeys {
rv += fmt.Sprintf("%s\"%s\": %s,\n", util.RepeatJoin(" ", "", indent+1), k, n.children[k].String(indent+1))
}
rv += fmt.Sprintf("%s})", util.RepeatJoin(" ", "", indent))
return rv
}
func (n *trieNode) Len() int {
rv := len(n.data)
for _, child := range n.children {
rv += child.Len()
}
return rv
} | go/trie/trie.go | 0.650911 | 0.49939 | trie.go | starcoder |
package utils
import (
"reflect"
"strconv"
"strings"
)
const (
StringType = "string"
NumberType = "number"
BoolType = "bool"
Unknown = "unknown"
)
func TypeOf(obj interface{}) string {
if obj == nil {
return ""
}
typ := reflect.TypeOf(obj)
return typeOf(typ)
}
func typeOf(typ reflect.Type) string {
switch typ.Kind() {
case reflect.String:
return StringType
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
reflect.Float32, reflect.Float64:
return NumberType
case reflect.Bool:
return BoolType
case reflect.Array, reflect.Slice:
if typ.Elem().Kind() == reflect.Uint8 {
return StringType
}
return typeOf(typ.Elem())
}
return Unknown
}
func ConvertDataType(obj interface{}, dataType string) (interface{}, error) {
switch TypeOf(obj) {
case StringType:
value, _ := ConvertString(obj)
switch dataType {
case StringType:
return value, nil
case NumberType:
return strconv.ParseFloat(value, 10)
case BoolType:
strconv.ParseBool(value)
}
case NumberType:
value, _ := ConvertFloat64(obj)
switch dataType {
case StringType:
return strconv.FormatFloat(value, 'f', -1, 64), nil
case NumberType:
return value, nil
case BoolType:
if value == 0 {
return false, nil
} else {
return true, nil
}
}
case BoolType:
value, _ := ConvertBool(obj)
switch dataType {
case StringType:
return strconv.FormatBool(value), nil
case NumberType:
if value {
return 1, nil
} else {
return 0, nil
}
case BoolType:
return value, nil
}
}
return nil, nil
}
func ConvertStructToMap(obj interface{}) map[string]interface{} {
t := reflect.TypeOf(obj)
v := reflect.ValueOf(obj)
m := make(map[string]interface{})
for i := 0; i < t.NumField(); i++ {
m[strings.ToLower(t.Field(i).Name)] = v.Field(i).Interface()
}
return m
}
func ConvertArrToMap(list []string) map[string]bool {
m := make(map[string]bool)
for _, item := range list {
m[item] = true
}
return m
}
func ConvertStringArrToInterfaceArr(list []string) []interface{} {
var arr []interface{}
for _, item := range list {
arr = append(arr, item)
}
return arr
}
func ConvertString(obj interface{}) (string, bool) {
switch val := obj.(type) {
case string:
return val, true
case []byte:
return string(val), true
}
return "", false
}
// ConvertStringArr interface转字符串数组
func ConvertStringArr(obj interface{}) ([]string, bool) {
if arr, ok := obj.([]interface{}); ok {
var result []string
for _, item := range arr {
if s, ok := ConvertString(item); ok {
result = append(result, s)
}
}
return result, true
}
return nil, false
}
func ConvertBool(obj interface{}) (bool, bool) {
switch val := obj.(type) {
case bool:
return val, true
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64:
return val != 0, true
}
return false, false
}
func ConvertInt64(obj interface{}) (int64, bool) {
switch val := obj.(type) {
case int:
return int64(val), true
case int8:
return int64(val), true
case int16:
return int64(val), true
case int32:
return int64(val), true
case int64:
return int64(val), true
case uint:
return int64(val), true
case uint8:
return int64(val), true
case uint16:
return int64(val), true
case uint32:
return int64(val), true
case uint64:
return int64(val), true
case float32:
return int64(val), true
case float64:
return int64(val), true
}
return 0, false
}
func ConvertUint64(obj interface{}) (uint64, bool) {
switch val := obj.(type) {
case int:
return uint64(val), true
case int8:
return uint64(val), true
case int16:
return uint64(val), true
case int32:
return uint64(val), true
case int64:
return uint64(val), true
case uint:
return uint64(val), true
case uint8:
return uint64(val), true
case uint16:
return uint64(val), true
case uint32:
return uint64(val), true
case uint64:
return uint64(val), true
case float32:
return uint64(val), true
case float64:
return uint64(val), true
}
return 0, false
}
func ConvertFloat64(obj interface{}) (float64, bool) {
switch val := obj.(type) {
case int:
return float64(val), true
case int8:
return float64(val), true
case int16:
return float64(val), true
case int32:
return float64(val), true
case int64:
return float64(val), true
case uint:
return float64(val), true
case uint8:
return float64(val), true
case uint16:
return float64(val), true
case uint32:
return float64(val), true
case uint64:
return float64(val), true
case float32:
return float64(val), true
case float64:
return float64(val), true
}
return 0, false
} | modules/monitor/utils/convert.go | 0.519034 | 0.437884 | convert.go | starcoder |
package iso20022
// Chain of parties involved in the settlement of a transaction, including receipts and deliveries, book transfers, treasury deals, or other activities, resulting in the movement of a security or amount of money from one account to another.
type DeliveringPartiesAndAccount8 struct {
// Party that sells goods or services, or a financial instrument.
DelivererDetails *InvestmentAccount24 `xml:"DlvrrDtls,omitempty"`
// Party that acts on behalf of the seller of securities when the seller does not have a direct relationship with the delivering agent.
DeliverersCustodianDetails *PartyIdentificationAndAccount5 `xml:"DlvrrsCtdnDtls,omitempty"`
// Party that the deliverer's custodian uses to effect the delivery of a security, when the deliverer's custodian does not have a direct relationship with the delivering agent.
DeliverersIntermediaryDetails *PartyIdentificationAndAccount5 `xml:"DlvrrsIntrmyDtls,omitempty"`
// Party that delivers securities to the receiving agent at the place of settlement, eg, central securities depository.
DeliveringAgentDetails *PartyIdentificationAndAccount4 `xml:"DlvrgAgtDtls"`
// Identifies the securities settlement system to be used.
SecuritiesSettlementSystem *Max35Text `xml:"SctiesSttlmSys,omitempty"`
// Place where settlement of the securities takes place.
PlaceOfSettlementDetails *PartyIdentification21 `xml:"PlcOfSttlmDtls,omitempty"`
}
func (d *DeliveringPartiesAndAccount8) AddDelivererDetails() *InvestmentAccount24 {
d.DelivererDetails = new(InvestmentAccount24)
return d.DelivererDetails
}
func (d *DeliveringPartiesAndAccount8) AddDeliverersCustodianDetails() *PartyIdentificationAndAccount5 {
d.DeliverersCustodianDetails = new(PartyIdentificationAndAccount5)
return d.DeliverersCustodianDetails
}
func (d *DeliveringPartiesAndAccount8) AddDeliverersIntermediaryDetails() *PartyIdentificationAndAccount5 {
d.DeliverersIntermediaryDetails = new(PartyIdentificationAndAccount5)
return d.DeliverersIntermediaryDetails
}
func (d *DeliveringPartiesAndAccount8) AddDeliveringAgentDetails() *PartyIdentificationAndAccount4 {
d.DeliveringAgentDetails = new(PartyIdentificationAndAccount4)
return d.DeliveringAgentDetails
}
func (d *DeliveringPartiesAndAccount8) SetSecuritiesSettlementSystem(value string) {
d.SecuritiesSettlementSystem = (*Max35Text)(&value)
}
func (d *DeliveringPartiesAndAccount8) AddPlaceOfSettlementDetails() *PartyIdentification21 {
d.PlaceOfSettlementDetails = new(PartyIdentification21)
return d.PlaceOfSettlementDetails
} | DeliveringPartiesAndAccount8.go | 0.624523 | 0.462898 | DeliveringPartiesAndAccount8.go | starcoder |
package funcs
// Func is general function, f:T -> R.
type Func[T, R any] func(T) R
// Predict is a function (f:T -> bool) predicts given value is true or false.
type Predict[T any] func(T) bool
// Unit is a function (f:empty -> R) mapping to R from empty set.
type Unit[R any] func() R
// Condition is a function (f:empty -> bool) mapping to true or false from empty set.
type Condition func() bool
// Partial is a function (f:T -> (R, bool)) converts partial value in T to R.
// ParialFunc in Scala means that not all value in domain T can be applied.
// ParialFunc has isDefinedAt method to verify given value is contained in function's domain.
type Partial[T, R any] func(T) (R, bool)
// ApplyOrElse returns result applying this to x if x is defined at its domain, or returns given default z.
func (p Partial[T, R]) ApplyOrElse(x T, z R) R {
if v, ok := p(x); ok {
return v
}
return z
}
// Try is a function (f:T -> (R, error)) transfers to R from T, and maybe error returned.
type Try[T, R any] func(T) (r R, err error)
// Transform is a function (f:(T, bool) -> R) transforms to R even if given value v is not fine.
type Transform[T, R any] func(v T, ok bool) R
// Recover is a function (f:(T, error) -> R) recover to R even if given value v is failed.
type Recover[T, R any] func(v T, err error) R
// Self always return given value v.
func Self[T any](v T) T {
return v
}
// Id returns a function always return given value v.
func Id[T any](v T) Unit[T] {
return func() T {
return v
}
}
// -----------------------------------------------------------------------------
// TODO: refactor following functions to methods when go 1.19 releases.
// AndThen return a new function (f:T -> R) applying given function g to result from f.
func AndThen[T, U, R any](f Func[T, U], g Func[U, R]) Func[T, R] {
return func(v T) R {
return g(f(v))
}
}
// UnitAndThen returns a new function (f:empty -> R) applying given function g to result from f.
func UnitAndThen[T, R any](f Unit[T], g Func[T, R]) Unit[R] {
return func() R {
return g(f())
}
}
// Compose returns a new function (f:T -> R) applying given function f to result from g.
func Compose[T, U, R any](f Func[U, R], g Func[T, U]) Func[T, R] {
return func(v T) R {
return f(g(v))
}
}
// ComposeUnit returns a new function (f:empty -> R) applying given function f to result from g.
func ComposeUnit[T, R any](f Func[T, R], g Unit[T]) Unit[R] {
return func() R {
return f(g())
}
}
// PartialTransform returns a new function (f:T -> R) applying given Transform f2 to result from f1.
func PartialTransform[T, U, R any](f1 Partial[T, U], f2 Transform[U, R]) Func[T, R] {
return func(v T) R {
return f2(f1(v))
}
}
// TryRecover returns a new function (f:T -> R) applying given Recover f2 to result from f1.
func TryRecover[T, U, R any](f1 Try[T, U], f2 Recover[U, R]) Func[T, R] {
return func(v T) R {
return f2(f1(v))
}
}
// Cond is a ternary returning given value succ if ok is true, or returning fail.
func Cond[T any](ok bool, succ T, fail T) T {
if ok {
return succ
}
return fail
}
/*
func ConfFunc[T any](p Condition, succ Unit[T], fail Unit[T]) T {
if p() {
return succ()
}
return fail()
}
*/ | funcs/funcs.go | 0.681091 | 0.826677 | funcs.go | starcoder |
package version220
// https://raw.githubusercontent.com/devfile/api/main/schemas/latest/devfile.json
const JsonSchema220 = `{
"description": "Devfile describes the structure of a cloud-native devworkspace and development environment.",
"type": "object",
"title": "Devfile schema - Version 2.2.0-alpha",
"required": [
"schemaVersion"
],
"properties": {
"attributes": {
"description": "Map of implementation-dependant free-form YAML attributes.",
"type": "object",
"additionalProperties": true
},
"commands": {
"description": "Predefined, ready-to-use, devworkspace-related commands",
"type": "array",
"items": {
"type": "object",
"required": [
"id"
],
"oneOf": [
{
"required": [
"exec"
]
},
{
"required": [
"apply"
]
},
{
"required": [
"composite"
]
}
],
"properties": {
"apply": {
"description": "Command that consists in applying a given component definition, typically bound to a devworkspace event.\n\nFor example, when an 'apply' command is bound to a 'preStart' event, and references a 'container' component, it will start the container as a K8S initContainer in the devworkspace POD, unless the component has its 'dedicatedPod' field set to 'true'.\n\nWhen no 'apply' command exist for a given component, it is assumed the component will be applied at devworkspace start by default.",
"type": "object",
"required": [
"component"
],
"properties": {
"component": {
"description": "Describes component that will be applied",
"type": "string"
},
"group": {
"description": "Defines the group this command is part of",
"type": "object",
"required": [
"kind"
],
"properties": {
"isDefault": {
"description": "Identifies the default command for a given group kind",
"type": "boolean"
},
"kind": {
"description": "Kind of group the command is part of",
"type": "string",
"enum": [
"build",
"run",
"test",
"debug"
]
}
},
"additionalProperties": false
},
"label": {
"description": "Optional label that provides a label for this command to be used in Editor UI menus for example",
"type": "string"
}
},
"additionalProperties": false
},
"attributes": {
"description": "Map of implementation-dependant free-form YAML attributes.",
"type": "object",
"additionalProperties": true
},
"composite": {
"description": "Composite command that allows executing several sub-commands either sequentially or concurrently",
"type": "object",
"properties": {
"commands": {
"description": "The commands that comprise this composite command",
"type": "array",
"items": {
"type": "string"
}
},
"group": {
"description": "Defines the group this command is part of",
"type": "object",
"required": [
"kind"
],
"properties": {
"isDefault": {
"description": "Identifies the default command for a given group kind",
"type": "boolean"
},
"kind": {
"description": "Kind of group the command is part of",
"type": "string",
"enum": [
"build",
"run",
"test",
"debug"
]
}
},
"additionalProperties": false
},
"label": {
"description": "Optional label that provides a label for this command to be used in Editor UI menus for example",
"type": "string"
},
"parallel": {
"description": "Indicates if the sub-commands should be executed concurrently",
"type": "boolean"
}
},
"additionalProperties": false
},
"exec": {
"description": "CLI Command executed in an existing component container",
"type": "object",
"required": [
"commandLine",
"component"
],
"properties": {
"commandLine": {
"description": "The actual command-line string\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.",
"type": "string"
},
"component": {
"description": "Describes component to which given action relates",
"type": "string"
},
"env": {
"description": "Optional list of environment variables that have to be set before running the command",
"type": "array",
"items": {
"type": "object",
"required": [
"name",
"value"
],
"properties": {
"name": {
"type": "string"
},
"value": {
"type": "string"
}
},
"additionalProperties": false
}
},
"group": {
"description": "Defines the group this command is part of",
"type": "object",
"required": [
"kind"
],
"properties": {
"isDefault": {
"description": "Identifies the default command for a given group kind",
"type": "boolean"
},
"kind": {
"description": "Kind of group the command is part of",
"type": "string",
"enum": [
"build",
"run",
"test",
"debug"
]
}
},
"additionalProperties": false
},
"hotReloadCapable": {
"description": "Whether the command is capable to reload itself when source code changes. If set to 'true' the command won't be restarted and it is expected to handle file changes on its own.\n\nDefault value is 'false'",
"type": "boolean"
},
"label": {
"description": "Optional label that provides a label for this command to be used in Editor UI menus for example",
"type": "string"
},
"workingDir": {
"description": "Working directory where the command should be executed\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.",
"type": "string"
}
},
"additionalProperties": false
},
"id": {
"description": "Mandatory identifier that allows referencing this command in composite commands, from a parent, or in events.",
"type": "string",
"maxLength": 63,
"pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"
}
},
"additionalProperties": false
}
},
"components": {
"description": "List of the devworkspace components, such as editor and plugins, user-provided containers, or other types of components",
"type": "array",
"items": {
"type": "object",
"required": [
"name"
],
"oneOf": [
{
"required": [
"container"
]
},
{
"required": [
"kubernetes"
]
},
{
"required": [
"openshift"
]
},
{
"required": [
"volume"
]
}
],
"properties": {
"attributes": {
"description": "Map of implementation-dependant free-form YAML attributes.",
"type": "object",
"additionalProperties": true
},
"container": {
"description": "Allows adding and configuring devworkspace-related containers",
"type": "object",
"required": [
"image"
],
"properties": {
"args": {
"description": "The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command.\n\nDefaults to an empty array, meaning use whatever is defined in the image.",
"type": "array",
"items": {
"type": "string"
}
},
"command": {
"description": "The command to run in the dockerimage component instead of the default one provided in the image.\n\nDefaults to an empty array, meaning use whatever is defined in the image.",
"type": "array",
"items": {
"type": "string"
}
},
"cpuLimit": {
"type": "string"
},
"cpuRequest": {
"type": "string"
},
"dedicatedPod": {
"description": "Specify if a container should run in its own separated pod, instead of running as part of the main development environment pod.\n\nDefault value is 'false'",
"type": "boolean"
},
"endpoints": {
"type": "array",
"items": {
"type": "object",
"required": [
"name",
"targetPort"
],
"properties": {
"attributes": {
"description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",",
"type": "object",
"additionalProperties": true
},
"exposure": {
"description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'",
"type": "string",
"default": "public",
"enum": [
"public",
"internal",
"none"
]
},
"name": {
"type": "string",
"maxLength": 63,
"pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"
},
"path": {
"description": "Path of the endpoint URL",
"type": "string"
},
"protocol": {
"description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'",
"type": "string",
"default": "http",
"enum": [
"http",
"https",
"ws",
"wss",
"tcp",
"udp"
]
},
"secure": {
"description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.",
"type": "boolean"
},
"targetPort": {
"type": "integer"
}
},
"additionalProperties": false
}
},
"env": {
"description": "Environment variables used in this container.\n\nThe following variables are reserved and cannot be overridden via env:\n\n - '$PROJECTS_ROOT'\n\n - '$PROJECT_SOURCE'",
"type": "array",
"items": {
"type": "object",
"required": [
"name",
"value"
],
"properties": {
"name": {
"type": "string"
},
"value": {
"type": "string"
}
},
"additionalProperties": false
}
},
"image": {
"type": "string"
},
"memoryLimit": {
"type": "string"
},
"memoryRequest": {
"type": "string"
},
"mountSources": {
"description": "Toggles whether or not the project source code should be mounted in the component.\n\nDefaults to true for all component types except plugins and components that set 'dedicatedPod' to true.",
"type": "boolean"
},
"sourceMapping": {
"description": "Optional specification of the path in the container where project sources should be transferred/mounted when 'mountSources' is 'true'. When omitted, the default value of /projects is used.",
"type": "string",
"default": "/projects"
},
"volumeMounts": {
"description": "List of volumes mounts that should be mounted is this container.",
"type": "array",
"items": {
"description": "Volume that should be mounted to a component container",
"type": "object",
"required": [
"name"
],
"properties": {
"name": {
"description": "The volume mount name is the name of an existing 'Volume' component. If several containers mount the same volume name then they will reuse the same volume and will be able to access to the same files.",
"type": "string",
"maxLength": 63,
"pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"
},
"path": {
"description": "The path in the component container where the volume should be mounted. If not path is mentioned, default path is the is '/\u003cname\u003e'.",
"type": "string"
}
},
"additionalProperties": false
}
}
},
"additionalProperties": false
},
"kubernetes": {
"description": "Allows importing into the devworkspace the Kubernetes resources defined in a given manifest. For example this allows reusing the Kubernetes definitions used to deploy some runtime components in production.",
"type": "object",
"oneOf": [
{
"required": [
"uri"
]
},
{
"required": [
"inlined"
]
}
],
"properties": {
"endpoints": {
"type": "array",
"items": {
"type": "object",
"required": [
"name",
"targetPort"
],
"properties": {
"attributes": {
"description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",",
"type": "object",
"additionalProperties": true
},
"exposure": {
"description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'",
"type": "string",
"default": "public",
"enum": [
"public",
"internal",
"none"
]
},
"name": {
"type": "string",
"maxLength": 63,
"pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"
},
"path": {
"description": "Path of the endpoint URL",
"type": "string"
},
"protocol": {
"description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'",
"type": "string",
"default": "http",
"enum": [
"http",
"https",
"ws",
"wss",
"tcp",
"udp"
]
},
"secure": {
"description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.",
"type": "boolean"
},
"targetPort": {
"type": "integer"
}
},
"additionalProperties": false
}
},
"inlined": {
"description": "Inlined manifest",
"type": "string"
},
"uri": {
"description": "Location in a file fetched from a uri.",
"type": "string"
}
},
"additionalProperties": false
},
"name": {
"description": "Mandatory name that allows referencing the component from other elements (such as commands) or from an external devfile that may reference this component through a parent or a plugin.",
"type": "string",
"maxLength": 63,
"pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"
},
"openshift": {
"description": "Allows importing into the devworkspace the OpenShift resources defined in a given manifest. For example this allows reusing the OpenShift definitions used to deploy some runtime components in production.",
"type": "object",
"oneOf": [
{
"required": [
"uri"
]
},
{
"required": [
"inlined"
]
}
],
"properties": {
"endpoints": {
"type": "array",
"items": {
"type": "object",
"required": [
"name",
"targetPort"
],
"properties": {
"attributes": {
"description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",",
"type": "object",
"additionalProperties": true
},
"exposure": {
"description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'",
"type": "string",
"default": "public",
"enum": [
"public",
"internal",
"none"
]
},
"name": {
"type": "string",
"maxLength": 63,
"pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"
},
"path": {
"description": "Path of the endpoint URL",
"type": "string"
},
"protocol": {
"description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'",
"type": "string",
"default": "http",
"enum": [
"http",
"https",
"ws",
"wss",
"tcp",
"udp"
]
},
"secure": {
"description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.",
"type": "boolean"
},
"targetPort": {
"type": "integer"
}
},
"additionalProperties": false
}
},
"inlined": {
"description": "Inlined manifest",
"type": "string"
},
"uri": {
"description": "Location in a file fetched from a uri.",
"type": "string"
}
},
"additionalProperties": false
},
"volume": {
"description": "Allows specifying the definition of a volume shared by several other components",
"type": "object",
"properties": {
"ephemeral": {
"description": "Ephemeral volumes are not stored persistently across restarts. Defaults to false",
"type": "boolean"
},
"size": {
"description": "Size of the volume",
"type": "string"
}
},
"additionalProperties": false
}
},
"additionalProperties": false
}
},
"events": {
"description": "Bindings of commands to events. Each command is referred-to by its name.",
"type": "object",
"properties": {
"postStart": {
"description": "IDs of commands that should be executed after the devworkspace is completely started. In the case of Che-Theia, these commands should be executed after all plugins and extensions have started, including project cloning. This means that those commands are not triggered until the user opens the IDE in his browser.",
"type": "array",
"items": {
"type": "string"
}
},
"postStop": {
"description": "IDs of commands that should be executed after stopping the devworkspace.",
"type": "array",
"items": {
"type": "string"
}
},
"preStart": {
"description": "IDs of commands that should be executed before the devworkspace start. Kubernetes-wise, these commands would typically be executed in init containers of the devworkspace POD.",
"type": "array",
"items": {
"type": "string"
}
},
"preStop": {
"description": "IDs of commands that should be executed before stopping the devworkspace.",
"type": "array",
"items": {
"type": "string"
}
}
},
"additionalProperties": false
},
"metadata": {
"description": "Optional metadata",
"type": "object",
"properties": {
"attributes": {
"description": "Map of implementation-dependant free-form YAML attributes. Deprecated, use the top-level attributes field instead.",
"type": "object",
"additionalProperties": true
},
"description": {
"description": "Optional devfile description",
"type": "string"
},
"displayName": {
"description": "Optional devfile display name",
"type": "string"
},
"globalMemoryLimit": {
"description": "Optional devfile global memory limit",
"type": "string"
},
"icon": {
"description": "Optional devfile icon, can be a URI or a relative path in the project",
"type": "string"
},
"language": {
"description": "Optional devfile language",
"type": "string"
},
"name": {
"description": "Optional devfile name",
"type": "string"
},
"projectType": {
"description": "Optional devfile project type",
"type": "string"
},
"tags": {
"description": "Optional devfile tags",
"type": "array",
"items": {
"type": "string"
}
},
"version": {
"description": "Optional semver-compatible version",
"type": "string",
"pattern": "^([0-9]+)\\.([0-9]+)\\.([0-9]+)(\\-[0-9a-z-]+(\\.[0-9a-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$"
},
"website": {
"description": "Optional devfile website",
"type": "string"
}
},
"additionalProperties": true
},
"parent": {
"description": "Parent devworkspace template",
"type": "object",
"oneOf": [
{
"required": [
"uri"
]
},
{
"required": [
"id"
]
},
{
"required": [
"kubernetes"
]
}
],
"properties": {
"attributes": {
"description": "Overrides of attributes encapsulated in a parent devfile. Overriding is done according to K8S strategic merge patch standard rules.",
"type": "object",
"additionalProperties": true
},
"commands": {
"description": "Overrides of commands encapsulated in a parent devfile or a plugin. Overriding is done according to K8S strategic merge patch standard rules.",
"type": "array",
"items": {
"type": "object",
"required": [
"id"
],
"oneOf": [
{
"required": [
"exec"
]
},
{
"required": [
"apply"
]
},
{
"required": [
"composite"
]
}
],
"properties": {
"apply": {
"description": "Command that consists in applying a given component definition, typically bound to a devworkspace event.\n\nFor example, when an 'apply' command is bound to a 'preStart' event, and references a 'container' component, it will start the container as a K8S initContainer in the devworkspace POD, unless the component has its 'dedicatedPod' field set to 'true'.\n\nWhen no 'apply' command exist for a given component, it is assumed the component will be applied at devworkspace start by default.",
"type": "object",
"properties": {
"component": {
"description": "Describes component that will be applied",
"type": "string"
},
"group": {
"description": "Defines the group this command is part of",
"type": "object",
"properties": {
"isDefault": {
"description": "Identifies the default command for a given group kind",
"type": "boolean"
},
"kind": {
"description": "Kind of group the command is part of",
"type": "string",
"enum": [
"build",
"run",
"test",
"debug"
]
}
},
"additionalProperties": false
},
"label": {
"description": "Optional label that provides a label for this command to be used in Editor UI menus for example",
"type": "string"
}
},
"additionalProperties": false
},
"attributes": {
"description": "Map of implementation-dependant free-form YAML attributes.",
"type": "object",
"additionalProperties": true
},
"composite": {
"description": "Composite command that allows executing several sub-commands either sequentially or concurrently",
"type": "object",
"properties": {
"commands": {
"description": "The commands that comprise this composite command",
"type": "array",
"items": {
"type": "string"
}
},
"group": {
"description": "Defines the group this command is part of",
"type": "object",
"properties": {
"isDefault": {
"description": "Identifies the default command for a given group kind",
"type": "boolean"
},
"kind": {
"description": "Kind of group the command is part of",
"type": "string",
"enum": [
"build",
"run",
"test",
"debug"
]
}
},
"additionalProperties": false
},
"label": {
"description": "Optional label that provides a label for this command to be used in Editor UI menus for example",
"type": "string"
},
"parallel": {
"description": "Indicates if the sub-commands should be executed concurrently",
"type": "boolean"
}
},
"additionalProperties": false
},
"exec": {
"description": "CLI Command executed in an existing component container",
"type": "object",
"properties": {
"commandLine": {
"description": "The actual command-line string\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.",
"type": "string"
},
"component": {
"description": "Describes component to which given action relates",
"type": "string"
},
"env": {
"description": "Optional list of environment variables that have to be set before running the command",
"type": "array",
"items": {
"type": "object",
"required": [
"name"
],
"properties": {
"name": {
"type": "string"
},
"value": {
"type": "string"
}
},
"additionalProperties": false
}
},
"group": {
"description": "Defines the group this command is part of",
"type": "object",
"properties": {
"isDefault": {
"description": "Identifies the default command for a given group kind",
"type": "boolean"
},
"kind": {
"description": "Kind of group the command is part of",
"type": "string",
"enum": [
"build",
"run",
"test",
"debug"
]
}
},
"additionalProperties": false
},
"hotReloadCapable": {
"description": "Whether the command is capable to reload itself when source code changes. If set to 'true' the command won't be restarted and it is expected to handle file changes on its own.\n\nDefault value is 'false'",
"type": "boolean"
},
"label": {
"description": "Optional label that provides a label for this command to be used in Editor UI menus for example",
"type": "string"
},
"workingDir": {
"description": "Working directory where the command should be executed\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.",
"type": "string"
}
},
"additionalProperties": false
},
"id": {
"description": "Mandatory identifier that allows referencing this command in composite commands, from a parent, or in events.",
"type": "string",
"maxLength": 63,
"pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"
}
},
"additionalProperties": false
}
},
"components": {
"description": "Overrides of components encapsulated in a parent devfile or a plugin. Overriding is done according to K8S strategic merge patch standard rules.",
"type": "array",
"items": {
"type": "object",
"required": [
"name"
],
"oneOf": [
{
"required": [
"container"
]
},
{
"required": [
"kubernetes"
]
},
{
"required": [
"openshift"
]
},
{
"required": [
"volume"
]
}
],
"properties": {
"attributes": {
"description": "Map of implementation-dependant free-form YAML attributes.",
"type": "object",
"additionalProperties": true
},
"container": {
"description": "Allows adding and configuring devworkspace-related containers",
"type": "object",
"properties": {
"args": {
"description": "The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command.\n\nDefaults to an empty array, meaning use whatever is defined in the image.",
"type": "array",
"items": {
"type": "string"
}
},
"command": {
"description": "The command to run in the dockerimage component instead of the default one provided in the image.\n\nDefaults to an empty array, meaning use whatever is defined in the image.",
"type": "array",
"items": {
"type": "string"
}
},
"cpuLimit": {
"type": "string"
},
"cpuRequest": {
"type": "string"
},
"dedicatedPod": {
"description": "Specify if a container should run in its own separated pod, instead of running as part of the main development environment pod.\n\nDefault value is 'false'",
"type": "boolean"
},
"endpoints": {
"type": "array",
"items": {
"type": "object",
"required": [
"name"
],
"properties": {
"attributes": {
"description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",",
"type": "object",
"additionalProperties": true
},
"exposure": {
"description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'",
"type": "string",
"enum": [
"public",
"internal",
"none"
]
},
"name": {
"type": "string",
"maxLength": 63,
"pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"
},
"path": {
"description": "Path of the endpoint URL",
"type": "string"
},
"protocol": {
"description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'",
"type": "string",
"enum": [
"http",
"https",
"ws",
"wss",
"tcp",
"udp"
]
},
"secure": {
"description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.",
"type": "boolean"
},
"targetPort": {
"type": "integer"
}
},
"additionalProperties": false
}
},
"env": {
"description": "Environment variables used in this container.\n\nThe following variables are reserved and cannot be overridden via env:\n\n - '$PROJECTS_ROOT'\n\n - '$PROJECT_SOURCE'",
"type": "array",
"items": {
"type": "object",
"required": [
"name"
],
"properties": {
"name": {
"type": "string"
},
"value": {
"type": "string"
}
},
"additionalProperties": false
}
},
"image": {
"type": "string"
},
"memoryLimit": {
"type": "string"
},
"memoryRequest": {
"type": "string"
},
"mountSources": {
"description": "Toggles whether or not the project source code should be mounted in the component.\n\nDefaults to true for all component types except plugins and components that set 'dedicatedPod' to true.",
"type": "boolean"
},
"sourceMapping": {
"description": "Optional specification of the path in the container where project sources should be transferred/mounted when 'mountSources' is 'true'. When omitted, the default value of /projects is used.",
"type": "string"
},
"volumeMounts": {
"description": "List of volumes mounts that should be mounted is this container.",
"type": "array",
"items": {
"description": "Volume that should be mounted to a component container",
"type": "object",
"required": [
"name"
],
"properties": {
"name": {
"description": "The volume mount name is the name of an existing 'Volume' component. If several containers mount the same volume name then they will reuse the same volume and will be able to access to the same files.",
"type": "string",
"maxLength": 63,
"pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"
},
"path": {
"description": "The path in the component container where the volume should be mounted. If not path is mentioned, default path is the is '/\u003cname\u003e'.",
"type": "string"
}
},
"additionalProperties": false
}
}
},
"additionalProperties": false
},
"kubernetes": {
"description": "Allows importing into the devworkspace the Kubernetes resources defined in a given manifest. For example this allows reusing the Kubernetes definitions used to deploy some runtime components in production.",
"type": "object",
"oneOf": [
{
"required": [
"uri"
]
},
{
"required": [
"inlined"
]
}
],
"properties": {
"endpoints": {
"type": "array",
"items": {
"type": "object",
"required": [
"name"
],
"properties": {
"attributes": {
"description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",",
"type": "object",
"additionalProperties": true
},
"exposure": {
"description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'",
"type": "string",
"enum": [
"public",
"internal",
"none"
]
},
"name": {
"type": "string",
"maxLength": 63,
"pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"
},
"path": {
"description": "Path of the endpoint URL",
"type": "string"
},
"protocol": {
"description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'",
"type": "string",
"enum": [
"http",
"https",
"ws",
"wss",
"tcp",
"udp"
]
},
"secure": {
"description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.",
"type": "boolean"
},
"targetPort": {
"type": "integer"
}
},
"additionalProperties": false
}
},
"inlined": {
"description": "Inlined manifest",
"type": "string"
},
"uri": {
"description": "Location in a file fetched from a uri.",
"type": "string"
}
},
"additionalProperties": false
},
"name": {
"description": "Mandatory name that allows referencing the component from other elements (such as commands) or from an external devfile that may reference this component through a parent or a plugin.",
"type": "string",
"maxLength": 63,
"pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"
},
"openshift": {
"description": "Allows importing into the devworkspace the OpenShift resources defined in a given manifest. For example this allows reusing the OpenShift definitions used to deploy some runtime components in production.",
"type": "object",
"oneOf": [
{
"required": [
"uri"
]
},
{
"required": [
"inlined"
]
}
],
"properties": {
"endpoints": {
"type": "array",
"items": {
"type": "object",
"required": [
"name"
],
"properties": {
"attributes": {
"description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",",
"type": "object",
"additionalProperties": true
},
"exposure": {
"description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'",
"type": "string",
"enum": [
"public",
"internal",
"none"
]
},
"name": {
"type": "string",
"maxLength": 63,
"pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"
},
"path": {
"description": "Path of the endpoint URL",
"type": "string"
},
"protocol": {
"description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'",
"type": "string",
"enum": [
"http",
"https",
"ws",
"wss",
"tcp",
"udp"
]
},
"secure": {
"description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.",
"type": "boolean"
},
"targetPort": {
"type": "integer"
}
},
"additionalProperties": false
}
},
"inlined": {
"description": "Inlined manifest",
"type": "string"
},
"uri": {
"description": "Location in a file fetched from a uri.",
"type": "string"
}
},
"additionalProperties": false
},
"volume": {
"description": "Allows specifying the definition of a volume shared by several other components",
"type": "object",
"properties": {
"ephemeral": {
"description": "Ephemeral volumes are not stored persistently across restarts. Defaults to false",
"type": "boolean"
},
"size": {
"description": "Size of the volume",
"type": "string"
}
},
"additionalProperties": false
}
},
"additionalProperties": false
}
},
"id": {
"description": "Id in a registry that contains a Devfile yaml file",
"type": "string"
},
"kubernetes": {
"description": "Reference to a Kubernetes CRD of type DevWorkspaceTemplate",
"type": "object",
"required": [
"name"
],
"properties": {
"name": {
"type": "string"
},
"namespace": {
"type": "string"
}
},
"additionalProperties": false
},
"projects": {
"description": "Overrides of projects encapsulated in a parent devfile. Overriding is done according to K8S strategic merge patch standard rules.",
"type": "array",
"items": {
"type": "object",
"required": [
"name"
],
"oneOf": [
{
"required": [
"git"
]
},
{
"required": [
"zip"
]
}
],
"properties": {
"attributes": {
"description": "Map of implementation-dependant free-form YAML attributes.",
"type": "object",
"additionalProperties": true
},
"clonePath": {
"description": "Path relative to the root of the projects to which this project should be cloned into. This is a unix-style relative path (i.e. uses forward slashes). The path is invalid if it is absolute or tries to escape the project root through the usage of '..'. If not specified, defaults to the project name.",
"type": "string"
},
"git": {
"description": "Project's Git source",
"type": "object",
"properties": {
"checkoutFrom": {
"description": "Defines from what the project should be checked out. Required if there are more than one remote configured",
"type": "object",
"properties": {
"remote": {
"description": "The remote name should be used as init. Required if there are more than one remote configured",
"type": "string"
},
"revision": {
"description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.",
"type": "string"
}
},
"additionalProperties": false
},
"remotes": {
"description": "The remotes map which should be initialized in the git project. Must have at least one remote configured",
"type": "object",
"additionalProperties": {
"type": "string"
}
}
},
"additionalProperties": false
},
"name": {
"description": "Project name",
"type": "string",
"maxLength": 63,
"pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"
},
"zip": {
"description": "Project's Zip source",
"type": "object",
"properties": {
"location": {
"description": "Zip project's source location address. Should be file path of the archive, e.g. file://$FILE_PATH",
"type": "string"
}
},
"additionalProperties": false
}
},
"additionalProperties": false
}
},
"registryUrl": {
"description": "Registry URL to pull the parent devfile from when using id in the parent reference. To ensure the parent devfile gets resolved consistently in different environments, it is recommended to always specify the 'regsitryURL' when 'Id' is used.",
"type": "string"
},
"starterProjects": {
"description": "Overrides of starterProjects encapsulated in a parent devfile. Overriding is done according to K8S strategic merge patch standard rules.",
"type": "array",
"items": {
"type": "object",
"required": [
"name"
],
"oneOf": [
{
"required": [
"git"
]
},
{
"required": [
"zip"
]
}
],
"properties": {
"attributes": {
"description": "Map of implementation-dependant free-form YAML attributes.",
"type": "object",
"additionalProperties": true
},
"description": {
"description": "Description of a starter project",
"type": "string"
},
"git": {
"description": "Project's Git source",
"type": "object",
"properties": {
"checkoutFrom": {
"description": "Defines from what the project should be checked out. Required if there are more than one remote configured",
"type": "object",
"properties": {
"remote": {
"description": "The remote name should be used as init. Required if there are more than one remote configured",
"type": "string"
},
"revision": {
"description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.",
"type": "string"
}
},
"additionalProperties": false
},
"remotes": {
"description": "The remotes map which should be initialized in the git project. Must have at least one remote configured",
"type": "object",
"additionalProperties": {
"type": "string"
}
}
},
"additionalProperties": false
},
"name": {
"description": "Project name",
"type": "string",
"maxLength": 63,
"pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"
},
"subDir": {
"description": "Sub-directory from a starter project to be used as root for starter project.",
"type": "string"
},
"zip": {
"description": "Project's Zip source",
"type": "object",
"properties": {
"location": {
"description": "Zip project's source location address. Should be file path of the archive, e.g. file://$FILE_PATH",
"type": "string"
}
},
"additionalProperties": false
}
},
"additionalProperties": false
}
},
"uri": {
"description": "URI Reference of a parent devfile YAML file. It can be a full URL or a relative URI with the current devfile as the base URI.",
"type": "string"
},
"variables": {
"description": "Overrides of variables encapsulated in a parent devfile. Overriding is done according to K8S strategic merge patch standard rules.",
"type": "object",
"additionalProperties": {
"type": "string"
}
}
},
"additionalProperties": false
},
"projects": {
"description": "Projects worked on in the devworkspace, containing names and sources locations",
"type": "array",
"items": {
"type": "object",
"required": [
"name"
],
"oneOf": [
{
"required": [
"git"
]
},
{
"required": [
"zip"
]
}
],
"properties": {
"attributes": {
"description": "Map of implementation-dependant free-form YAML attributes.",
"type": "object",
"additionalProperties": true
},
"clonePath": {
"description": "Path relative to the root of the projects to which this project should be cloned into. This is a unix-style relative path (i.e. uses forward slashes). The path is invalid if it is absolute or tries to escape the project root through the usage of '..'. If not specified, defaults to the project name.",
"type": "string"
},
"git": {
"description": "Project's Git source",
"type": "object",
"required": [
"remotes"
],
"properties": {
"checkoutFrom": {
"description": "Defines from what the project should be checked out. Required if there are more than one remote configured",
"type": "object",
"properties": {
"remote": {
"description": "The remote name should be used as init. Required if there are more than one remote configured",
"type": "string"
},
"revision": {
"description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.",
"type": "string"
}
},
"additionalProperties": false
},
"remotes": {
"description": "The remotes map which should be initialized in the git project. Must have at least one remote configured",
"type": "object",
"additionalProperties": {
"type": "string"
}
}
},
"additionalProperties": false
},
"name": {
"description": "Project name",
"type": "string",
"maxLength": 63,
"pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"
},
"zip": {
"description": "Project's Zip source",
"type": "object",
"properties": {
"location": {
"description": "Zip project's source location address. Should be file path of the archive, e.g. file://$FILE_PATH",
"type": "string"
}
},
"additionalProperties": false
}
},
"additionalProperties": false
}
},
"schemaVersion": {
"description": "Devfile schema version",
"type": "string",
"pattern": "^([2-9])\\.([0-9]+)\\.([0-9]+)(\\-[0-9a-z-]+(\\.[0-9a-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$"
},
"starterProjects": {
"description": "StarterProjects is a project that can be used as a starting point when bootstrapping new projects",
"type": "array",
"items": {
"type": "object",
"required": [
"name"
],
"oneOf": [
{
"required": [
"git"
]
},
{
"required": [
"zip"
]
}
],
"properties": {
"attributes": {
"description": "Map of implementation-dependant free-form YAML attributes.",
"type": "object",
"additionalProperties": true
},
"description": {
"description": "Description of a starter project",
"type": "string"
},
"git": {
"description": "Project's Git source",
"type": "object",
"required": [
"remotes"
],
"properties": {
"checkoutFrom": {
"description": "Defines from what the project should be checked out. Required if there are more than one remote configured",
"type": "object",
"properties": {
"remote": {
"description": "The remote name should be used as init. Required if there are more than one remote configured",
"type": "string"
},
"revision": {
"description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.",
"type": "string"
}
},
"additionalProperties": false
},
"remotes": {
"description": "The remotes map which should be initialized in the git project. Must have at least one remote configured",
"type": "object",
"additionalProperties": {
"type": "string"
}
}
},
"additionalProperties": false
},
"name": {
"description": "Project name",
"type": "string",
"maxLength": 63,
"pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"
},
"subDir": {
"description": "Sub-directory from a starter project to be used as root for starter project.",
"type": "string"
},
"zip": {
"description": "Project's Zip source",
"type": "object",
"properties": {
"location": {
"description": "Zip project's source location address. Should be file path of the archive, e.g. file://$FILE_PATH",
"type": "string"
}
},
"additionalProperties": false
}
},
"additionalProperties": false
}
},
"variables": {
"description": "Map of key-value variables used for string replacement in the devfile. Values can can be referenced via {{variable-key}} to replace the corresponding value in string fields in the devfile. Replacement cannot be used for\n\n - schemaVersion, metadata, parent source - element identifiers, e.g. command id, component name, endpoint name, project name - references to identifiers, e.g. in events, a command's component, container's volume mount name - string enums, e.g. command group kind, endpoint exposure",
"type": "object",
"additionalProperties": {
"type": "string"
}
}
},
"additionalProperties": false
}
` | pkg/devfile/parser/data/v2/2.2.0/devfileJsonSchema220.go | 0.836721 | 0.455986 | devfileJsonSchema220.go | starcoder |
package versionbundle
import (
"fmt"
"sort"
"github.com/giantswarm/microerror"
"github.com/giantswarm/micrologger"
)
/*
Core design behind Aggregate() implementation:
Aggregate() function takes list of bundles and it builds all possible
combinations of them. Only restrictions are possibly conflicting Bundle
dependencies.
Aggregate() implementation works in three phases:
Assume input [simplified] bundles:
[]Bundle{ (A, 1), (A, 2), (B, 1), (B, 2), (B, 3), (C, 1), (C, 2)}
1. Group all bundles by name and remove duplicate versions.
This produces following map:
{
"A": [(A, 1), (A, 2)]
"B": [(B, 1), (B, 2), (B, 3)]
"C": [(C, 1), (C, 2)]
}
2. Build a layered tree from Bundles
(root)
|
+---------------------+---------------------+
| |
| |
(A,1) (A,2)
| |
+-------------+-------------+ +-------------+-------------+
| | | | | |
(B,1) (B,2) (B,3) (B,1) (B,2) (B,3)
| | | | | |
+---+---+ +---+---+ +---+---+ +---+---+ +---+---+ +---+---+
| | | | | | | | | | | |
(C,1) (C,2) (C,1) (C,2) (C,1) (C,2) (C,1) (C,2) (C,1) (C,2) (C,1) (C,2)
3. Walk the tree and create aggregated bundles where are no dependency
conflicts.
*/
type AggregatorConfig struct {
Logger micrologger.Logger
}
type Aggregator struct {
logger micrologger.Logger
}
// node is a data structure for bundle aggregation tree
type node struct {
bundle Bundle
leaves []*node
}
// NewAggregator constructs a new aggregator for given AggregatorConfig.
func NewAggregator(config AggregatorConfig) (*Aggregator, error) {
if config.Logger == nil {
return nil, microerror.Maskf(invalidConfigError, "%T.Logger must not be empty", config)
}
a := &Aggregator{
logger: config.Logger,
}
return a, nil
}
// Aggregate merges version bundles based on dependencies each version bundle
// within the given version bundles define for their own components. Duplicate
// versions for same bundle name (e.g. in case of different Provider
// implementations) are removed. To control bundles selected for aggregation,
// filter them before aggregation.
func (a *Aggregator) Aggregate(bundles []Bundle) ([][]Bundle, error) {
if len(bundles) == 0 {
return nil, nil
}
var aggregatedBundles [][]Bundle
if len(bundles) == 1 {
aggregatedBundles = append(aggregatedBundles, bundles)
return aggregatedBundles, nil
}
bundleMap := make(map[string][]Bundle)
// First group all bundles by name.
for _, v := range bundles {
bundleMap[v.Name] = append(bundleMap[v.Name], v)
}
// Gather keys for sorting to guarantee always the same order for [][]Bundles
var keys []string
// Ensure that there are no duplicate bundles.
for k, v := range bundleMap {
sort.Sort(SortBundlesByVersion(v))
for i := 0; i < len(v)-1; i++ {
if v[i].Version == v[i+1].Version {
v = append(v[:i], v[i+1:]...)
i--
}
}
bundleMap[k] = v
keys = append(keys, k)
}
// Sort'em
sort.Strings(keys)
// Tree root is an empty node.
tree := &node{}
// Build the tree.
for _, k := range keys {
a.walkTreeAndAddLeaves(tree, bundleMap[k])
}
// Walk the tree and aggregate.
aggregatedBundles = a.walkTreeAndAggregate(tree, []Bundle{})
// Instead of returning empty slice, return explicit nil to be backwards
// compatible with API.
if len(aggregatedBundles) == 0 {
aggregatedBundles = nil
}
err := AggregatedBundles(aggregatedBundles).Validate()
if err != nil {
return nil, microerror.Mask(err)
}
return aggregatedBundles, nil
}
func (a *Aggregator) walkTreeAndAddLeaves(n *node, bundles []Bundle) {
if len(n.leaves) == 0 {
for _, b := range bundles {
n.leaves = append(n.leaves, &node{bundle: b})
}
return
}
for _, leaf := range n.leaves {
a.walkTreeAndAddLeaves(leaf, bundles)
}
}
func (a *Aggregator) walkTreeAndAggregate(n *node, bundles []Bundle) [][]Bundle {
// If current node is leaf, then return bundle if there are no conflicts.
if len(n.leaves) == 0 {
// Only aggregate bundle groups that don't have conflicting dependencies.
for i, b1 := range bundles {
for j, b2 := range bundles {
// No need to self-verify.
if i == j {
continue
}
if a.bundlesConflictWithDependencies(b1, b2) {
return [][]Bundle{}
}
}
}
sort.Sort(SortBundlesByVersion(bundles))
sort.Stable(SortBundlesByName(bundles))
return [][]Bundle{bundles}
}
// In the middle of the tree -> continue walking.
aggregates := make([][]Bundle, 0)
for _, leaf := range n.leaves {
bundlesCopy := make([]Bundle, len(bundles), len(bundles)+1)
copy(bundlesCopy, bundles)
bundlesCopy = append(bundlesCopy, leaf.bundle)
aggregates = append(aggregates, a.walkTreeAndAggregate(leaf, bundlesCopy)...)
}
return aggregates
}
func (a *Aggregator) bundlesConflictWithDependencies(b1, b2 Bundle) bool {
for _, d := range b1.Dependencies {
for _, c := range b2.Components {
if d.Name != c.Name {
continue
}
if !d.Matches(c) {
a.logger.Log("component", fmt.Sprintf("%#v", c), "dependency", fmt.Sprintf("%#v", d), "level", "debug", "message", "dependency conflicts with component")
return true
}
}
}
for _, d := range b2.Dependencies {
for _, c := range b1.Components {
if d.Name != c.Name {
continue
}
if !d.Matches(c) {
a.logger.Log("component", fmt.Sprintf("%#v", c), "dependency", fmt.Sprintf("%#v", d), "level", "debug", "message", "dependency conflicts with component")
return true
}
}
}
return false
} | vendor/github.com/giantswarm/versionbundle/aggregate.go | 0.666605 | 0.413773 | aggregate.go | starcoder |
package positionallist
import (
"fmt"
)
type PositionalList[T any] struct {
header *Node[T] // header is a sentinel node. header.Next is the first element in the list.
trailer *Node[T] // trailer is a sentinel node. trailer.Prev is the last element in the list.
Size int
}
// New constructs and returns an empty positional list.
func New[T any]() *PositionalList[T] {
var d PositionalList[T]
d.header = &Node[T]{}
d.trailer = &Node[T]{Prev: d.header}
d.header.Next = d.trailer
return &d
}
// IsEmpty returns true if the list doesn't have any elements.
func (p *PositionalList[T]) IsEmpty() bool {
return p.Size == 0
}
// First returns the first Node in the list.
func (p *PositionalList[T]) First() *Node[T] {
return p.header.Next
}
// Last returns the last Node in the list.
func (p *PositionalList[T]) Last() *Node[T] {
return p.trailer.Prev
}
// Before returns the Node before the given Node.
func (p *PositionalList[T]) Before(n *Node[T]) *Node[T] {
return n.Prev
}
// After returns the Node after the given Node.
func (p *PositionalList[T]) After(n *Node[T]) *Node[T] {
return p.validate(n.Next)
}
// AddFirst gets data, constructs a Node out of it, adds it to the first of the list, and returns it.
func (p *PositionalList[T]) AddFirst(data T) *Node[T] {
return p.addBetween(data, p.header, p.header.Next)
}
// AddLast gets data, constructs a Node out of it, adds it to the end of the list, and returns it.
func (p *PositionalList[T]) AddLast(data T) *Node[T] {
return p.addBetween(data, p.trailer.Prev, p.trailer)
}
// AddBefore gets data, constructs a Node out of it, adds it before the given Node, and returns it.
func (p *PositionalList[T]) AddBefore(n *Node[T], data T) *Node[T] {
return p.addBetween(data, n.Prev, n)
}
// AddAfter gets data, constructs a Node out of it, adds it after the given Node, and returns it.
func (p *PositionalList[T]) AddAfter(n *Node[T], data T) *Node[T] {
return p.addBetween(data, n, n.Next)
}
// Set changes the given Node's value to the given data. It returns the previous data.
func (p *PositionalList[T]) Set(n *Node[T], data T) T {
val := n.Data
n.Data = data
return val
}
// Remove removes the given Node from the list and returns it.
func (p *PositionalList[T]) Remove(n *Node[T]) T {
predecessor := n.Prev
successor := n.Next
predecessor.Next = successor
successor.Prev = predecessor
p.Size--
val := n.Data
var empty T
n.Data = empty
n.Next = nil
n.Prev = nil
return val
}
// String returns the string representation of the list.
func (d *PositionalList[T]) String() string {
str := "[ "
for current := d.header.Next; current != d.trailer; current = current.Next {
str += fmt.Sprint(current.Data) + " "
}
str += "]"
return str
}
// addBetween constructs a new Node out of the given data, adds it between the given two Nodes, and returns it.
func (d *PositionalList[T]) addBetween(data T, predecessor *Node[T], successor *Node[T]) *Node[T] {
newNode := &Node[T]{Data: data, Next: successor, Prev: predecessor}
predecessor.Next = newNode
successor.Prev = newNode
d.Size++
return newNode
}
// validate returns nil if the given Node is a sentinel Node.
func (p *PositionalList[T]) validate(n *Node[T]) *Node[T] {
if n == p.header || n == p.trailer {
return nil
}
return n
} | positionallist/positional_list.go | 0.788909 | 0.556882 | positional_list.go | starcoder |
package processor
import (
"fmt"
"time"
"github.com/Jeffail/benthos/lib/log"
"github.com/Jeffail/benthos/lib/metrics"
"github.com/Jeffail/benthos/lib/types"
jmespath "github.com/jmespath/go-jmespath"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeJMESPath] = TypeSpec{
constructor: NewJMESPath,
description: `
Parses a message as a JSON document and attempts to apply a JMESPath expression
to it, replacing the contents of the part with the result. Please refer to the
[JMESPath website](http://jmespath.org/) for information and tutorials regarding
the syntax of expressions.
For example, with the following config:
` + "``` yaml" + `
jmespath:
query: locations[?state == 'WA'].name | sort(@) | {Cities: join(', ', @)}
` + "```" + `
If the initial contents of a message were:
` + "``` json" + `
{
"locations": [
{"name": "Seattle", "state": "WA"},
{"name": "New York", "state": "NY"},
{"name": "Bellevue", "state": "WA"},
{"name": "Olympia", "state": "WA"}
]
}
` + "```" + `
Then the resulting contents would be:
` + "``` json" + `
{"Cities": "Bellevue, Olympia, Seattle"}
` + "```" + `
It is possible to create boolean queries with JMESPath, in order to filter
messages with boolean queries please instead use the
` + "[`jmespath`](../conditions/README.md#jmespath)" + ` condition.`,
}
}
//------------------------------------------------------------------------------
// JMESPathConfig contains configuration fields for the JMESPath processor.
type JMESPathConfig struct {
Parts []int `json:"parts" yaml:"parts"`
Query string `json:"query" yaml:"query"`
}
// NewJMESPathConfig returns a JMESPathConfig with default values.
func NewJMESPathConfig() JMESPathConfig {
return JMESPathConfig{
Parts: []int{},
Query: "",
}
}
//------------------------------------------------------------------------------
// JMESPath is a processor that executes JMESPath queries on a message part and
// replaces the contents with the result.
type JMESPath struct {
parts []int
query *jmespath.JMESPath
conf Config
log log.Modular
stats metrics.Type
mCount metrics.StatCounter
mErrJSONP metrics.StatCounter
mErrJMES metrics.StatCounter
mErrJSONS metrics.StatCounter
mErr metrics.StatCounter
mSent metrics.StatCounter
mBatchSent metrics.StatCounter
}
// NewJMESPath returns a JMESPath processor.
func NewJMESPath(
conf Config, mgr types.Manager, log log.Modular, stats metrics.Type,
) (Type, error) {
query, err := jmespath.Compile(conf.JMESPath.Query)
if err != nil {
return nil, fmt.Errorf("failed to compile JMESPath query: %v", err)
}
j := &JMESPath{
parts: conf.JMESPath.Parts,
query: query,
conf: conf,
log: log,
stats: stats,
mCount: stats.GetCounter("count"),
mErrJSONP: stats.GetCounter("error.json_parse"),
mErrJMES: stats.GetCounter("error.jmespath_search"),
mErrJSONS: stats.GetCounter("error.json_set"),
mErr: stats.GetCounter("error"),
mSent: stats.GetCounter("sent"),
mBatchSent: stats.GetCounter("batch.sent"),
}
return j, nil
}
//------------------------------------------------------------------------------
func safeSearch(part interface{}, j *jmespath.JMESPath) (res interface{}, err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("jmespath panic: %v", r)
}
}()
return j.Search(part)
}
// ProcessMessage applies the processor to a message, either creating >0
// resulting messages or a response to be sent back to the message source.
func (p *JMESPath) ProcessMessage(msg types.Message) ([]types.Message, types.Response) {
p.mCount.Incr(1)
newMsg := msg.Copy()
proc := func(index int) {
jsonPart, err := newMsg.Get(index).JSON()
if err != nil {
p.mErrJSONP.Incr(1)
p.mErr.Incr(1)
p.log.Debugf("Failed to parse part into json: %v\n", err)
FlagFail(newMsg.Get(index))
return
}
var result interface{}
if result, err = safeSearch(jsonPart, p.query); err != nil {
p.mErrJMES.Incr(1)
p.mErr.Incr(1)
p.log.Debugf("Failed to search json: %v\n", err)
FlagFail(newMsg.Get(index))
return
}
if err = newMsg.Get(index).SetJSON(result); err != nil {
p.mErrJSONS.Incr(1)
p.mErr.Incr(1)
p.log.Debugf("Failed to convert jmespath result into part: %v\n", err)
FlagFail(newMsg.Get(index))
}
}
if len(p.parts) == 0 {
for i := 0; i < msg.Len(); i++ {
proc(i)
}
} else {
for _, i := range p.parts {
proc(i)
}
}
msgs := [1]types.Message{newMsg}
p.mBatchSent.Incr(1)
p.mSent.Incr(int64(newMsg.Len()))
return msgs[:], nil
}
// CloseAsync shuts down the processor and stops processing requests.
func (p *JMESPath) CloseAsync() {
}
// WaitForClose blocks until the processor has closed down.
func (p *JMESPath) WaitForClose(timeout time.Duration) error {
return nil
}
//------------------------------------------------------------------------------ | lib/processor/jmespath.go | 0.677581 | 0.815085 | jmespath.go | starcoder |
package main
import (
"fmt"
"io/ioutil"
"strings"
"strconv"
)
type coord struct {
x, y, size int
invalid bool
}
func (self *coord) dist(other coord) int {
return abs(self.x - other.x) + abs(self.y - other.y)
}
type point struct {
nearest *coord
dist int
}
func main() {
// Input parsing
file, _ := ioutil.ReadFile("6.in")
in := strings.Split(string(file), "\n")
coords := make([]coord, 0)
for _, l := range in {
if len(l) == 0 {
continue;
}
s := strings.Split(l, ", ")
x, _ := strconv.Atoi(s[0])
y, _ := strconv.Atoi(s[1])
coords = append(coords, coord { x, y, 0, false })
}
// Part 1
// Get the minimum and maximum x's and y's
min := findminmax(coords, func(a int, b int) bool { return a < b })
max := findminmax(coords, func(a int, b int) bool { return a > b })
max.x += 1
max.y += 1
grid := make([][]point, max.x)
// Fill the grid with dummy data
for x := 0; x < max.x; x++ {
grid[x] = make([]point, max.y)
for y := 0; y < max.y; y++ {
grid[x][y].dist = (max.x + max.y) * 10
}
}
// Assign each point its nearest coordinate or a dummy one if two or more are competing
for i, c := range coords {
for x := 0; x < max.x; x++ {
for y := 0; y < max.y; y++ {
d := c.dist(coord{x, y, 0, false})
if grid[x][y].dist > d {
grid[x][y].nearest = &coords[i]
grid[x][y].dist = d
} else if grid[x][y].dist == d {
grid[x][y].nearest = &coord{0, 0, 0, true}
}
}
}
}
// Filter out invalids and count the size of each area
for x := 0; x < max.x; x++ {
for y := 0; y < max.y; y++ {
if x < min.x || y < min.y || x > (max.x - 2) || (y > max.y - 2) {
grid[x][y].nearest.invalid = true
} else {
grid[x][y].nearest.size++
}
}
}
// Select the largest area
sol1 := coords[0]
for _, c := range coords {
if sol1.size < c.size && !c.invalid {
sol1 = c
}
}
fmt.Println("Solution to part 1:")
fmt.Println(sol1.size)
// Part 2 - Just sum up the distances and count the valid points
sol2 := 0;
for x := min.x; x < max.x-1; x++ {
for y := min.y; y < max.y-1; y++ {
sum := 0;
for _, c := range coords {
sum += c.dist(coord{ x, y, 0, false })
}
if sum < 10000 {
sol2++
}
}
}
fmt.Println("Solution to part 2:")
fmt.Println(sol2)
}
func findminmax(coords []coord, compare func(int, int) bool) coord {
res := coords[0]
for _, c := range coords {
if compare(c.x, res.x) {
res.x = c.x
}
if compare(c.y, res.y) {
res.y = c.y
}
}
return res
}
func abs(n int) int {
y := n >> (strconv.IntSize - 1)
return (n ^ y) - y
}
// Solution part 1: 4342
// Solution part 2: 42966 | 2018/Day6.go | 0.620507 | 0.497131 | Day6.go | starcoder |
package experimental
import (
"encoding/json"
"fmt"
"math"
"sort"
"github.com/heustis/tsp-solver-go/model"
)
// ConvexConcaveWeightedEdges is significantly worse than the other greedy algorithms, see `results_2d_comp_greedy_3.tsv`.
// I tested it with 8, 4, and 1 points in the weighting array (see below).
// With 1 point this produced the same results as `circuitgreedy_impl.go`, which was expected since weighing only one point is the same as only considering which point is closest to its closest edge.
type ConvexConcaveWeightedEdges struct {
Vertices []model.CircuitVertex
circuitEdges []model.CircuitEdge
closestVertices map[model.CircuitEdge]*weightedEdge
unattachedVertices map[model.CircuitVertex]bool
length float64
weights []float64
}
func NewConvexConcaveWeightedEdges(vertices []model.CircuitVertex, perimeterBuilder model.PerimeterBuilder) model.Circuit {
return NewConvexConcaveWeightedEdgesWithNumWeights(vertices, perimeterBuilder, 8)
}
func NewConvexConcaveWeightedEdgesWithNumWeights(vertices []model.CircuitVertex, perimeterBuilder model.PerimeterBuilder, numWeights int) model.Circuit {
if numWeights <= 0 {
numWeights = 1
}
weights := make([]float64, numWeights)
for i, denominator := 0, 2.0; i < numWeights; i, denominator = i+1, denominator*2.0 {
weights[i] = 1.0 / denominator
}
return NewConvexConcaveWeightedEdgesWithWeights(vertices, perimeterBuilder, weights)
}
func NewConvexConcaveWeightedEdgesWithWeights(vertices []model.CircuitVertex, perimeterBuilder model.PerimeterBuilder, weights []float64) model.Circuit {
if len(weights) <= 1 {
weights = []float64{1.0}
}
circuitEdges, unattachedVertices := perimeterBuilder(vertices)
// Find the closest edge for all interior points, based on distance increase; store them in a heap for retrieval from closest to farthest.
length := 0.0
closestVertices := make(map[model.CircuitEdge]*weightedEdge)
for _, edge := range circuitEdges {
closestVertices[edge] = newWeightedEdge(edge, unattachedVertices, weights)
length += edge.GetLength()
}
return &ConvexConcaveWeightedEdges{
Vertices: vertices,
circuitEdges: circuitEdges,
closestVertices: closestVertices,
unattachedVertices: unattachedVertices,
length: length,
weights: weights,
}
}
func (c *ConvexConcaveWeightedEdges) FindNextVertexAndEdge() (model.CircuitVertex, model.CircuitEdge) {
var closestEdge model.CircuitEdge
closestVertices := &weightedEdge{
weightedDistance: math.MaxFloat64,
}
for e, w := range c.closestVertices {
if w.weightedDistance < closestVertices.weightedDistance {
closestVertices = w
closestEdge = e
}
}
if len(closestVertices.closestVertices) == 0 {
return nil, nil
} else {
return closestVertices.closestVertices[0].vertex, closestEdge
}
}
func (c *ConvexConcaveWeightedEdges) GetAttachedEdges() []model.CircuitEdge {
return c.circuitEdges
}
func (c *ConvexConcaveWeightedEdges) GetAttachedVertices() []model.CircuitVertex {
vertices := make([]model.CircuitVertex, len(c.circuitEdges))
for i, edge := range c.circuitEdges {
vertices[i] = edge.GetStart()
}
return vertices
}
func (c *ConvexConcaveWeightedEdges) GetClosestVertices() map[model.CircuitEdge]*weightedEdge {
return c.closestVertices
}
func (c *ConvexConcaveWeightedEdges) GetLength() float64 {
return c.length
}
func (c *ConvexConcaveWeightedEdges) GetUnattachedVertices() map[model.CircuitVertex]bool {
return c.unattachedVertices
}
func (c *ConvexConcaveWeightedEdges) Update(vertexToAdd model.CircuitVertex, edgeToSplit model.CircuitEdge) {
if vertexToAdd != nil {
var edgeIndex int
c.circuitEdges, edgeIndex = model.SplitEdge(c.circuitEdges, edgeToSplit, vertexToAdd)
if edgeIndex < 0 {
expectedEdgeJson, _ := json.Marshal(edgeToSplit)
actualCircuitJson, _ := json.Marshal(c.circuitEdges)
initialVertices, _ := json.Marshal(c.Vertices)
panic(fmt.Errorf("edge not found in circuit=%p, expected=%s, \ncircuit=%s \nvertices=%s", c, string(expectedEdgeJson), string(actualCircuitJson), string(initialVertices)))
}
delete(c.unattachedVertices, vertexToAdd)
delete(c.closestVertices, edgeToSplit)
for e, w := range c.closestVertices {
w.removeVertex(vertexToAdd, e, c.unattachedVertices, c.weights)
}
edgeA, edgeB := c.circuitEdges[edgeIndex], c.circuitEdges[edgeIndex+1]
c.closestVertices[edgeA] = newWeightedEdge(edgeA, c.unattachedVertices, c.weights)
c.closestVertices[edgeB] = newWeightedEdge(edgeB, c.unattachedVertices, c.weights)
c.length += edgeA.GetLength() + edgeB.GetLength() - edgeToSplit.GetLength()
}
}
type weightedVertex struct {
distance float64
vertex model.CircuitVertex
}
type weightedEdge struct {
weightedDistance float64
closestVertices []*weightedVertex
}
func (w *weightedEdge) GetClosestVertices() []*weightedVertex {
return w.closestVertices
}
func (w *weightedEdge) GetDistance() float64 {
return w.weightedDistance
}
func newWeightedEdge(edge model.CircuitEdge, unattachedVertices map[model.CircuitVertex]bool, weights []float64) *weightedEdge {
lenClosest := int(math.Min(float64(len(weights)), float64(len(unattachedVertices))))
lastIndex := lenClosest - 1
w := &weightedEdge{
weightedDistance: 0.0,
closestVertices: make([]*weightedVertex, lenClosest),
}
nextIndex := 0
for v := range unattachedVertices {
vertexDistance := edge.DistanceIncrease(v)
// The first n vertices can be added directly to the array, then sorted once all are added.
if nextIndex < lenClosest {
w.closestVertices[nextIndex] = &weightedVertex{
vertex: v,
distance: vertexDistance,
}
if nextIndex == lastIndex {
sort.Slice(w.closestVertices, func(i, j int) bool {
return w.closestVertices[i].distance < w.closestVertices[j].distance
})
}
nextIndex++
} else if vertexDistance < w.closestVertices[lastIndex].distance {
w.closestVertices[lastIndex] = &weightedVertex{
vertex: v,
distance: vertexDistance,
}
// Bubbling is normally too inefficient for sorting, but this array has a max of 8 entries so it isn't too impactful.
for i, j := lastIndex, lastIndex-1; j >= 0; i, j = j, j-1 {
// Stop bubbling once this vertex is farther than the next vertex in the array ("next" meaning closer to index 0).
if vertexDistance > w.closestVertices[j].distance {
break
}
w.closestVertices[i], w.closestVertices[j] = w.closestVertices[j], w.closestVertices[i]
}
}
}
w.updateDistance(weights)
return w
}
func (w *weightedEdge) removeVertex(vertex model.CircuitVertex, edge model.CircuitEdge, unattachedVertices map[model.CircuitVertex]bool, weights []float64) {
numVertices := len(w.closestVertices)
vertexIndex := numVertices - 1
for ; vertexIndex >= 0 && w.closestVertices[vertexIndex].vertex != vertex; vertexIndex-- {
}
if vertexIndex < 0 {
return
}
// If there are unattached vertices that are not included in the weighted average, add the next closest vertex into the average.
if len(unattachedVertices) >= numVertices {
nextClosest := &weightedVertex{
distance: math.MaxFloat64,
}
for v := range unattachedVertices {
if dist := edge.DistanceIncrease(v); dist < nextClosest.distance {
nextClosest.vertex = v
nextClosest.distance = dist
}
}
w.closestVertices[vertexIndex] = nextClosest
// Bubble the newly added vertex to the last position in the array, because
// a newly added vertex will be farther away than any vertex already in the list (due to newWeightedEdge selecting the 8 closest vertices).
for i, j := vertexIndex, vertexIndex+1; j < numVertices; i, j = j, j+1 {
w.closestVertices[i], w.closestVertices[j] = w.closestVertices[j], w.closestVertices[i]
}
} else if vertexIndex == 0 {
w.closestVertices = w.closestVertices[1:]
} else if vertexIndex == numVertices-1 {
w.closestVertices = w.closestVertices[:vertexIndex]
} else {
w.closestVertices = append(w.closestVertices[:vertexIndex], w.closestVertices[vertexIndex+1:]...)
}
w.updateDistance(weights)
}
func (w *weightedEdge) updateDistance(weights []float64) {
w.weightedDistance = 0.0
for i, v := range w.closestVertices {
w.weightedDistance += v.distance * weights[i]
}
}
var _ model.Circuit = (*ConvexConcaveWeightedEdges)(nil) | circuit/experimental/convexconcave_weighted_edges_impl.go | 0.812496 | 0.558357 | convexconcave_weighted_edges_impl.go | starcoder |
package iterator
import (
"sync/atomic"
"github.com/apache/arrow/go/arrow"
"github.com/apache/arrow/go/arrow/array"
"github.com/gomem/gomem/internal/debug"
)
// BooleanValueIterator is an iterator for reading an Arrow Column value by value.
type BooleanValueIterator struct {
refCount int64
chunkIterator *ChunkIterator
// Things we need to maintain for the iterator
index int // current value index
ref *array.Boolean // the chunk reference
done bool // there are no more elements for this iterator
dataType arrow.DataType
}
// NewBooleanValueIterator creates a new BooleanValueIterator for reading an Arrow Column.
func NewBooleanValueIterator(col *array.Column) *BooleanValueIterator {
// We need a ChunkIterator to read the chunks
chunkIterator := NewChunkIterator(col)
return &BooleanValueIterator{
refCount: 1,
chunkIterator: chunkIterator,
index: 0,
ref: nil,
dataType: col.DataType(),
}
}
// Value will return the current value that the iterator is on and boolean value indicating if the value is actually null.
func (vr *BooleanValueIterator) Value() (bool, bool) {
return vr.ref.Value(vr.index), vr.ref.IsNull(vr.index)
}
// ValuePointer will return a pointer to the current value that the iterator is on. It will return nil if the value is actually null.
func (vr *BooleanValueIterator) ValuePointer() *bool {
if vr.ref.IsNull(vr.index) {
return nil
}
value := vr.ref.Value(vr.index)
return &value
}
// ValueInterface returns the value as an interface{}.
func (vr *BooleanValueIterator) ValueInterface() interface{} {
if vr.ref.IsNull(vr.index) {
return nil
}
return vr.ref.Value(vr.index)
}
// ValueAsJSON returns the current value as an interface{} in it's JSON representation.
func (vr *BooleanValueIterator) ValueAsJSON() (interface{}, error) {
if vr.ref.IsNull(vr.index) {
return nil, nil
}
return booleanAsJSON(vr.ref.Value(vr.index))
}
func (vr *BooleanValueIterator) DataType() arrow.DataType {
return vr.dataType
}
// Next moves the iterator to the next value. This will return false
// when there are no more values.
func (vr *BooleanValueIterator) Next() bool {
if vr.done {
return false
}
// Move the index up
vr.index++
// Keep moving the chunk up until we get one with data
for vr.ref == nil || vr.index >= vr.ref.Len() {
if !vr.nextChunk() {
// There were no more chunks with data in them
vr.done = true
return false
}
}
return true
}
func (vr *BooleanValueIterator) nextChunk() bool {
// Advance the chunk until we get one with data in it or we are done
if !vr.chunkIterator.Next() {
// No more chunks
return false
}
// There was another chunk.
// We maintain the ref and the values because the ref is going to allow us to retain the memory.
ref := vr.chunkIterator.Chunk()
ref.Retain()
if vr.ref != nil {
vr.ref.Release()
}
vr.ref = ref.(*array.Boolean)
vr.index = 0
return true
}
// Retain keeps a reference to the BooleanValueIterator
func (vr *BooleanValueIterator) Retain() {
atomic.AddInt64(&vr.refCount, 1)
}
// Release removes a reference to the BooleanValueIterator
func (vr *BooleanValueIterator) Release() {
debug.Assert(atomic.LoadInt64(&vr.refCount) > 0, "too many releases")
if atomic.AddInt64(&vr.refCount, -1) == 0 {
if vr.chunkIterator != nil {
vr.chunkIterator.Release()
vr.chunkIterator = nil
}
if vr.ref != nil {
vr.ref.Release()
vr.ref = nil
}
}
} | pkg/iterator/booleanvalueiterator.go | 0.791015 | 0.403479 | booleanvalueiterator.go | starcoder |
package model
import (
"reflect"
"strconv"
"time"
"github.com/emmettwoo/EMM-MoneyBox/util"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
)
type DayFlowEntity struct {
Id primitive.ObjectID `bson:"_id,omitempty"`
CashFlows []primitive.ObjectID `json:"cashFlows" bson:"cashFlows"`
Day int `json:"day" bson:"day"`
Month int `json:"month" bson:"month"`
Year int `json:"year" bson:"year"`
}
func (entity DayFlowEntity) IsEmpty() bool {
return reflect.DeepEqual(entity, DayFlowEntity{})
}
func GetDayFlowByObjectId(objectId primitive.ObjectID) DayFlowEntity {
filter := bson.D{
primitive.E{Key: "_id", Value: objectId},
}
// 打开dayFlow的数据表连线
util.OpenConnection("dayFlow")
return convertBsonM2DayFlowEntity(util.GetOne(filter))
}
func GetDayFlowByDate(date time.Time) DayFlowEntity {
// Golang有趣的日期转换机制,不过转出来是String,数据库是int,所以转一下类型。
monthInInt, _ := strconv.Atoi(date.Format("01"))
// 查询条件为指定的年月日,设计上一天只会对应一笔dayFlow数据。
filter := bson.D{
primitive.E{Key: "day", Value: date.Day()},
primitive.E{Key: "month", Value: monthInInt},
primitive.E{Key: "year", Value: date.Year()},
}
util.OpenConnection("dayFlow")
return convertBsonM2DayFlowEntity(util.GetOne(filter))
}
func InsertDayFlowByEntity(entity DayFlowEntity) primitive.ObjectID {
util.OpenConnection("dayFlow")
return util.InsertOne(convertDayFlowEntity2BsonD(entity))
}
func InsertDayFlowByDate(date time.Time) primitive.ObjectID {
monthInInt, _ := strconv.Atoi(date.Format("01"))
entity := DayFlowEntity{
Day: date.Day(),
Month: monthInInt,
Year: date.Year(),
}
util.OpenConnection("dayFlow")
return util.InsertOne(convertDayFlowEntity2BsonD(entity))
}
func UpdateDayFlowByEntity(entity DayFlowEntity) bool {
if entity.Id == primitive.NilObjectID {
panic("DayFlow's id can not be nil.")
}
filter := bson.D{
primitive.E{Key: "_id", Value: entity.Id},
}
util.OpenConnection("dayFlow")
return util.UpdateMany(filter, convertDayFlowEntity2BsonD(entity)) == 1
}
func DeleteDayFlowByObjectId(objectId primitive.ObjectID) DayFlowEntity {
filter := bson.D{
primitive.E{Key: "_id", Value: objectId},
}
entity := GetDayFlowByObjectId(objectId)
if entity.IsEmpty() {
panic("DayFlow does not exist!")
} else {
util.OpenConnection("dayFlow")
util.DeleteMany(filter)
return entity
}
}
func DeleteDayFlowByDate(date time.Time) DayFlowEntity {
monthInInt, _ := strconv.Atoi(date.Format("01"))
filter := bson.D{
primitive.E{Key: "day", Value: date.Day()},
primitive.E{Key: "month", Value: monthInInt},
primitive.E{Key: "year", Value: date.Year()},
}
entity := GetDayFlowByDate(date)
if entity.IsEmpty() {
panic("DayFlow does not exist!")
} else {
util.OpenConnection("dayFlow")
util.DeleteMany(filter)
return entity
}
}
func convertDayFlowEntity2BsonD(entity DayFlowEntity) bson.D {
// 为空时自动生成新Id
if entity.Id == primitive.NilObjectID {
entity.Id = primitive.NewObjectIDFromTimestamp(time.Now())
}
return bson.D{
primitive.E{Key: "_id", Value: entity.Id},
primitive.E{Key: "cashFlows", Value: entity.CashFlows},
primitive.E{Key: "day", Value: entity.Day},
primitive.E{Key: "month", Value: entity.Month},
primitive.E{Key: "year", Value: entity.Year},
}
}
func convertBsonM2DayFlowEntity(bsonM bson.M) DayFlowEntity {
var entity DayFlowEntity
bsonBytes, _ := bson.Marshal(bsonM)
bson.Unmarshal(bsonBytes, &entity)
return entity
} | model/day_flow.go | 0.529993 | 0.425009 | day_flow.go | starcoder |
package vectormath
const g_PI_OVER_2 = 1.570796327
func M3Copy(result *Matrix3, mat *Matrix3) {
V3Copy(&result.col0, &mat.col0)
V3Copy(&result.col1, &mat.col1)
V3Copy(&result.col2, &mat.col2)
}
func M3MakeFromScalar(result *Matrix3, scalar float32) {
V3MakeFromScalar(&result.col0, scalar)
V3MakeFromScalar(&result.col1, scalar)
V3MakeFromScalar(&result.col2, scalar)
}
func M3MakeFromQ(result *Matrix3, unitQuat *Quat) {
qx := unitQuat.X
qy := unitQuat.X
qz := unitQuat.X
qw := unitQuat.X
qx2 := qx + qx
qy2 := qy + qy
qz2 := qz + qz
qxqx2 := qx * qx2
qxqy2 := qx * qy2
qxqz2 := qx * qz2
qxqw2 := qw * qx2
qyqy2 := qy * qy2
qyqz2 := qy * qz2
qyqw2 := qw * qy2
qzqz2 := qz * qz2
qzqw2 := qw * qz2
V3MakeFromElems(&result.col0, ((1.0 - qyqy2) - qzqz2), (qxqy2 + qzqw2), (qxqz2 - qyqw2))
V3MakeFromElems(&result.col1, (qxqy2 - qzqw2), ((1.0 - qxqx2) - qzqz2), (qyqz2 + qxqw2))
V3MakeFromElems(&result.col2, (qxqz2 + qyqw2), (qyqz2 - qxqw2), ((1.0 - qxqx2) - qyqy2))
}
func M3MakeFromCols(result *Matrix3, col0, col1, col2 *Vector3) {
V3Copy(&result.col0, col0)
V3Copy(&result.col1, col1)
V3Copy(&result.col2, col2)
}
func (m *Matrix3) SetCol0(col0 *Vector3) {
V3Copy(&m.col0, col0)
}
func (m *Matrix3) SetCol1(col1 *Vector3) {
V3Copy(&m.col1, col1)
}
func (m *Matrix3) SetCol2(col2 *Vector3) {
V3Copy(&m.col2, col2)
}
func (m *Matrix3) SetCol(col int, vec *Vector3) {
switch col {
case 0:
V3Copy(&m.col0, vec)
case 1:
V3Copy(&m.col1, vec)
case 2:
V3Copy(&m.col2, vec)
}
}
func (m *Matrix3) SetRow(row int, vec *Vector3) {
m.col0.SetElem(row, vec.GetElem(0))
m.col1.SetElem(row, vec.GetElem(1))
m.col2.SetElem(row, vec.GetElem(2))
}
func (m *Matrix3) SetElem(col, row int, val float32) {
var tmpV3_0 Vector3
M3GetCol(&tmpV3_0, m, col)
tmpV3_0.SetElem(row, val)
m.SetCol(col, &tmpV3_0)
}
func (m *Matrix3) GetElem(col, row int) float32 {
var tmpV3_0 Vector3
M3GetCol(&tmpV3_0, m, col)
return tmpV3_0.GetElem(row)
}
func M3GetCol0(result *Vector3, mat *Matrix3) {
V3Copy(result, &mat.col0)
}
func M3GetCol1(result *Vector3, mat *Matrix3) {
V3Copy(result, &mat.col1)
}
func M3GetCol2(result *Vector3, mat *Matrix3) {
V3Copy(result, &mat.col2)
}
func M3GetCol(result *Vector3, mat *Matrix3, col int) {
switch col {
case 0:
V3Copy(result, &mat.col0)
case 1:
V3Copy(result, &mat.col1)
case 2:
V3Copy(result, &mat.col2)
}
}
func M3GetRow(result *Vector3, mat *Matrix3, row int) {
x := mat.col0.GetElem(row)
y := mat.col1.GetElem(row)
z := mat.col2.GetElem(row)
V3MakeFromElems(result, x, y, z)
}
func M3Transpose(result, mat *Matrix3) {
var tmpResult Matrix3
V3MakeFromElems(&tmpResult.col0, mat.col0.X, mat.col1.X, mat.col2.X)
V3MakeFromElems(&tmpResult.col1, mat.col0.Y, mat.col1.Y, mat.col2.Y)
V3MakeFromElems(&tmpResult.col2, mat.col0.Z, mat.col1.Z, mat.col2.Z)
M3Copy(result, &tmpResult)
}
func M3Inverse(result, mat *Matrix3) {
var tmp0, tmp1, tmp2 Vector3
V3Cross(&tmp0, &mat.col1, &mat.col2)
V3Cross(&tmp1, &mat.col2, &mat.col0)
V3Cross(&tmp2, &mat.col0, &mat.col1)
detinv := 1.0 / V3Dot(&mat.col2, &tmp2)
V3MakeFromElems(&result.col0, tmp0.X*detinv, tmp1.X*detinv, tmp2.X*detinv)
V3MakeFromElems(&result.col1, tmp0.Y*detinv, tmp1.Y*detinv, tmp2.Y*detinv)
V3MakeFromElems(&result.col2, tmp0.Z*detinv, tmp1.Z*detinv, tmp2.Z*detinv)
}
func (m *Matrix3) Determinant() float32 {
var tmpV3_0 Vector3
V3Cross(&tmpV3_0, &m.col0, &m.col1)
return V3Dot(&m.col2, &tmpV3_0)
}
func M3Add(result, mat0, mat1 *Matrix3) {
V3Add(&result.col0, &mat0.col0, &mat1.col0)
V3Add(&result.col1, &mat0.col1, &mat1.col1)
V3Add(&result.col2, &mat0.col2, &mat1.col2)
}
func M3Sub(result, mat0, mat1 *Matrix3) {
V3Sub(&result.col0, &mat0.col0, &mat1.col0)
V3Sub(&result.col1, &mat0.col1, &mat1.col1)
V3Sub(&result.col2, &mat0.col2, &mat1.col2)
}
func M3Neg(result, mat *Matrix3) {
V3Neg(&result.col0, &mat.col0)
V3Neg(&result.col1, &mat.col1)
V3Neg(&result.col2, &mat.col2)
}
func M3AbsPerElem(result, mat *Matrix3) {
V3AbsPerElem(&result.col0, &mat.col0)
V3AbsPerElem(&result.col1, &mat.col1)
V3AbsPerElem(&result.col2, &mat.col2)
}
func M3ScalarMul(result, mat *Matrix3, scalar float32) {
V3ScalarMul(&result.col0, &mat.col0, scalar)
V3ScalarMul(&result.col1, &mat.col1, scalar)
V3ScalarMul(&result.col2, &mat.col2, scalar)
}
func M3MulV3(result *Vector3, mat *Matrix3, vec *Vector3) {
tmpX := ((mat.col0.X * vec.X) + (mat.col1.X * vec.Y)) + (mat.col2.X * vec.Z)
tmpY := ((mat.col0.Y * vec.X) + (mat.col1.Y * vec.Y)) + (mat.col2.Y * vec.Z)
tmpZ := ((mat.col0.Z * vec.X) + (mat.col1.Z * vec.Y)) + (mat.col2.Z * vec.Z)
V3MakeFromElems(result, tmpX, tmpY, tmpZ)
}
func M3Mul(result, mat0, mat1 *Matrix3) {
var tmpResult Matrix3
M3MulV3(&tmpResult.col0, mat0, &mat1.col0)
M3MulV3(&tmpResult.col1, mat0, &mat1.col1)
M3MulV3(&tmpResult.col2, mat0, &mat1.col2)
M3Copy(result, &tmpResult)
}
func M3MulPerElem(result, mat0, mat1 *Matrix3) {
V3MulPerElem(&result.col0, &mat0.col0, &mat1.col0)
V3MulPerElem(&result.col1, &mat0.col1, &mat1.col1)
V3MulPerElem(&result.col2, &mat0.col2, &mat1.col2)
}
func M3MakeIdentity(result *Matrix3) {
V3MakeXAxis(&result.col0)
V3MakeYAxis(&result.col1)
V3MakeZAxis(&result.col2)
}
func M3MakeRotationX(result *Matrix3, radians float32) {
s := sin(radians)
c := cos(radians)
V3MakeXAxis(&result.col0)
V3MakeFromElems(&result.col1, 0.0, c, s)
V3MakeFromElems(&result.col2, 0.0, -s, c)
}
func M3MakeRotationY(result *Matrix3, radians float32) {
s := sin(radians)
c := cos(radians)
V3MakeFromElems(&result.col0, c, 0.0, -s)
V3MakeYAxis(&result.col1)
V3MakeFromElems(&result.col2, s, 0.0, c)
}
func M3MakeRotationZ(result *Matrix3, radians float32) {
s := sin(radians)
c := cos(radians)
V3MakeFromElems(&result.col0, c, s, 0.0)
V3MakeFromElems(&result.col1, -s, c, 0.0)
V3MakeZAxis(&result.col2)
}
func M3MakeRotationZYX(result *Matrix3, radiansXYZ *Vector3) {
sX := sin(radiansXYZ.X)
cX := cos(radiansXYZ.X)
sY := sin(radiansXYZ.Y)
cY := cos(radiansXYZ.Y)
sZ := sin(radiansXYZ.Z)
cZ := cos(radiansXYZ.Z)
tmp0 := cZ * sY
tmp1 := sZ * sY
V3MakeFromElems(&result.col0, (cZ * cY), (sZ * cY), -sY)
V3MakeFromElems(&result.col1, ((tmp0 * sX) - (sZ * cX)), ((tmp1 * sX) + (cZ * cX)), (cY * sX))
V3MakeFromElems(&result.col2, ((tmp0 * cX) + (sZ * sX)), ((tmp1 * cX) - (cZ * sX)), (cY * cX))
}
func M3MakeRotationAxis(result *Matrix3, radians float32, unitVec *Vector3) {
s := sin(radians)
c := cos(radians)
x := unitVec.X
y := unitVec.Y
z := unitVec.Z
xy := x * y
yz := y * z
zx := z * x
oneMinusC := 1.0 - c
V3MakeFromElems(&result.col0, (((x * x) * oneMinusC) + c), ((xy * oneMinusC) + (z * s)), ((zx * oneMinusC) - (y * s)))
V3MakeFromElems(&result.col1, ((xy * oneMinusC) - (z * s)), (((y * y) * oneMinusC) + c), ((yz * oneMinusC) + (x * s)))
V3MakeFromElems(&result.col2, ((zx * oneMinusC) + (y * s)), ((yz * oneMinusC) - (x * s)), (((z * z) * oneMinusC) + c))
}
func M3MakeRotationQ(result *Matrix3, unitQuat *Quat) {
M3MakeFromQ(result, unitQuat)
}
func M3MakeScale(result *Matrix3, scaleVec *Vector3) {
V3MakeFromElems(&result.col0, scaleVec.X, 0.0, 0.0)
V3MakeFromElems(&result.col1, 0.0, scaleVec.Y, 0.0)
V3MakeFromElems(&result.col2, 0.0, 0.0, scaleVec.Z)
}
func M3AppendScale(result, mat *Matrix3, scaleVec *Vector3) {
V3ScalarMul(&result.col0, &mat.col0, scaleVec.X)
V3ScalarMul(&result.col1, &mat.col1, scaleVec.Y)
V3ScalarMul(&result.col2, &mat.col2, scaleVec.Z)
}
func M3PrependScale(result *Matrix3, scaleVec *Vector3, mat *Matrix3) {
V3MulPerElem(&result.col0, &mat.col0, scaleVec)
V3MulPerElem(&result.col1, &mat.col1, scaleVec)
V3MulPerElem(&result.col2, &mat.col2, scaleVec)
}
func M3Select(result, mat0, mat1 *Matrix3, select1 int) {
V3Select(&result.col0, &mat0.col0, &mat1.col0, select1)
V3Select(&result.col1, &mat0.col1, &mat1.col1, select1)
V3Select(&result.col2, &mat0.col2, &mat1.col2, select1)
}
func (m *Matrix3) String() string {
var tmp Matrix3
M3Transpose(&tmp, m)
return tmp.col0.String() + tmp.col1.String() + tmp.col2.String()
}
/*******/
func M4Copy(result, mat *Matrix4) {
V4Copy(&result.col0, &mat.col0)
V4Copy(&result.col1, &mat.col1)
V4Copy(&result.col2, &mat.col2)
V4Copy(&result.col3, &mat.col3)
}
func M4MakeFromScalar(result *Matrix4, scalar float32) {
V4MakeFromScalar(&result.col0, scalar)
V4MakeFromScalar(&result.col1, scalar)
V4MakeFromScalar(&result.col2, scalar)
V4MakeFromScalar(&result.col3, scalar)
}
func M4MakeFromT3(result *Matrix4, mat *Transform3) {
V4MakeFromV3Scalar(&result.col0, &mat.col0, 0.0)
V4MakeFromV3Scalar(&result.col1, &mat.col1, 0.0)
V4MakeFromV3Scalar(&result.col2, &mat.col2, 0.0)
V4MakeFromV3Scalar(&result.col3, &mat.col3, 1.0)
}
func M4MakeFromCols(result *Matrix4, col0, col1, col2, col3 *Vector4) {
V4Copy(&result.col0, col0)
V4Copy(&result.col1, col1)
V4Copy(&result.col2, col2)
V4Copy(&result.col3, col3)
}
func M4MakeFromM3V3(result *Matrix4, mat *Matrix3, translateVec *Vector3) {
V4MakeFromV3Scalar(&result.col0, &mat.col0, 0.0)
V4MakeFromV3Scalar(&result.col1, &mat.col1, 0.0)
V4MakeFromV3Scalar(&result.col2, &mat.col2, 0.0)
V4MakeFromV3Scalar(&result.col3, translateVec, 1.0)
}
func M4MakeFromQV3(result *Matrix4, unitQuat *Quat, translateVec *Vector3) {
var mat *Matrix3
M3MakeFromQ(mat, unitQuat)
V4MakeFromV3Scalar(&result.col0, &mat.col0, 0.0)
V4MakeFromV3Scalar(&result.col1, &mat.col1, 0.0)
V4MakeFromV3Scalar(&result.col2, &mat.col2, 0.0)
V4MakeFromV3Scalar(&result.col3, translateVec, 1.0)
}
func (m *Matrix4) SetCol0(col0 *Vector4) {
V4Copy(&m.col0, col0)
}
func (m *Matrix4) SetCol1(col1 *Vector4) {
V4Copy(&m.col1, col1)
}
func (m *Matrix4) SetCol2(col2 *Vector4) {
V4Copy(&m.col2, col2)
}
func (m *Matrix4) SetCol3(col3 *Vector4) {
V4Copy(&m.col3, col3)
}
func (m *Matrix4) SetCol(col int, vec *Vector4) {
switch col {
case 0:
V4Copy(&m.col0, vec)
case 1:
V4Copy(&m.col1, vec)
case 2:
V4Copy(&m.col2, vec)
case 3:
V4Copy(&m.col3, vec)
}
}
func (m *Matrix4) SetRow(row int, vec *Vector4) {
m.col0.SetElem(row, vec.X)
m.col1.SetElem(row, vec.Y)
m.col2.SetElem(row, vec.Z)
m.col3.SetElem(row, vec.W)
}
func (m *Matrix4) SetElem(col, row int, val float32) {
var tmpV3_0 Vector4
M4GetCol(&tmpV3_0, m, col)
tmpV3_0.SetElem(row, val)
m.SetCol(col, &tmpV3_0)
}
func (m *Matrix4) GetElem(col, row int) float32 {
var tmpV4_0 Vector4
M4GetCol(&tmpV4_0, m, col)
return tmpV4_0.GetElem(row)
}
func M4GetCol0(result *Vector4, mat *Matrix4) {
V4Copy(result, &mat.col0)
}
func M4GetCol1(result *Vector4, mat *Matrix4) {
V4Copy(result, &mat.col1)
}
func M4GetCol2(result *Vector4, mat *Matrix4) {
V4Copy(result, &mat.col2)
}
func M4GetCol3(result *Vector4, mat *Matrix4) {
V4Copy(result, &mat.col3)
}
func M4GetCol(result *Vector4, mat *Matrix4, col int) {
switch col {
case 0:
V4Copy(result, &mat.col0)
case 1:
V4Copy(result, &mat.col1)
case 2:
V4Copy(result, &mat.col2)
case 3:
V4Copy(result, &mat.col3)
}
}
func M4GetRow(result *Vector4, mat *Matrix4, row int) {
V4MakeFromElems(result, mat.col0.GetElem(row), mat.col1.GetElem(row), mat.col2.GetElem(row), mat.col3.GetElem(row))
}
func M4Transpose(result, mat *Matrix4) {
var tmpResult Matrix4
V4MakeFromElems(&tmpResult.col0, mat.col0.X, mat.col1.X, mat.col2.X, mat.col3.X)
V4MakeFromElems(&tmpResult.col1, mat.col0.Y, mat.col1.Y, mat.col2.Y, mat.col3.Y)
V4MakeFromElems(&tmpResult.col2, mat.col0.Z, mat.col1.Z, mat.col2.Z, mat.col3.Z)
V4MakeFromElems(&tmpResult.col3, mat.col0.W, mat.col1.W, mat.col2.W, mat.col3.W)
M4Copy(result, &tmpResult)
}
func M4Inverse(result, mat *Matrix4) {
var res0, res1, res2, res3 Vector4
mA := mat.col0.X
mB := mat.col0.Y
mC := mat.col0.Z
mD := mat.col0.W
mE := mat.col1.X
mF := mat.col1.Y
mG := mat.col1.Z
mH := mat.col1.W
mI := mat.col2.X
mJ := mat.col2.Y
mK := mat.col2.Z
mL := mat.col2.W
mM := mat.col3.X
mN := mat.col3.Y
mO := mat.col3.Z
mP := mat.col3.W
tmp0 := ((mK * mD) - (mC * mL))
tmp1 := ((mO * mH) - (mG * mP))
tmp2 := ((mB * mK) - (mJ * mC))
tmp3 := ((mF * mO) - (mN * mG))
tmp4 := ((mJ * mD) - (mB * mL))
tmp5 := ((mN * mH) - (mF * mP))
res0.SetX(((mJ * tmp1) - (mL * tmp3)) - (mK * tmp5))
res0.SetY(((mN * tmp0) - (mP * tmp2)) - (mO * tmp4))
res0.SetZ(((mD * tmp3) + (mC * tmp5)) - (mB * tmp1))
res0.SetW(((mH * tmp2) + (mG * tmp4)) - (mF * tmp0))
detInv := (1.0 / ((((mA * res0.X) + (mE * res0.Y)) + (mI * res0.Z)) + (mM * res0.W)))
res1.SetX(mI * tmp1)
res1.SetY(mM * tmp0)
res1.SetZ(mA * tmp1)
res1.SetW(mE * tmp0)
res3.SetX(mI * tmp3)
res3.SetY(mM * tmp2)
res3.SetZ(mA * tmp3)
res3.SetW(mE * tmp2)
res2.SetX(mI * tmp5)
res2.SetY(mM * tmp4)
res2.SetZ(mA * tmp5)
res2.SetW(mE * tmp4)
tmp0 = ((mI * mB) - (mA * mJ))
tmp1 = ((mM * mF) - (mE * mN))
tmp2 = ((mI * mD) - (mA * mL))
tmp3 = ((mM * mH) - (mE * mP))
tmp4 = ((mI * mC) - (mA * mK))
tmp5 = ((mM * mG) - (mE * mO))
res2.SetX(((mL * tmp1) - (mJ * tmp3)) + res2.X)
res2.SetY(((mP * tmp0) - (mN * tmp2)) + res2.Y)
res2.SetZ(((mB * tmp3) - (mD * tmp1)) - res2.Z)
res2.SetW(((mF * tmp2) - (mH * tmp0)) - res2.W)
res3.SetX(((mJ * tmp5) - (mK * tmp1)) + res3.X)
res3.SetY(((mN * tmp4) - (mO * tmp0)) + res3.Y)
res3.SetZ(((mC * tmp1) - (mB * tmp5)) - res3.Z)
res3.SetW(((mG * tmp0) - (mF * tmp4)) - res3.W)
res1.SetX(((mK * tmp3) - (mL * tmp5)) - res1.X)
res1.SetY(((mO * tmp2) - (mP * tmp4)) - res1.Y)
res1.SetZ(((mD * tmp5) - (mC * tmp3)) + res1.Z)
res1.SetW(((mH * tmp4) - (mG * tmp2)) + res1.W)
V4ScalarMul(&result.col0, &res0, detInv)
V4ScalarMul(&result.col1, &res1, detInv)
V4ScalarMul(&result.col2, &res2, detInv)
V4ScalarMul(&result.col3, &res3, detInv)
}
func M4AffineInverse(result, mat *Matrix4) {
var affineMat, tmpT3_0 Transform3
var tmpV3_0, tmpV3_1, tmpV3_2, tmpV3_3 Vector3
V4GetXYZ(&tmpV3_0, &mat.col0)
V4GetXYZ(&tmpV3_1, &mat.col1)
V4GetXYZ(&tmpV3_2, &mat.col2)
V4GetXYZ(&tmpV3_3, &mat.col3)
affineMat.SetCol0(&tmpV3_0)
affineMat.SetCol1(&tmpV3_1)
affineMat.SetCol2(&tmpV3_2)
affineMat.SetCol3(&tmpV3_3)
T3Inverse(&tmpT3_0, &affineMat)
M4MakeFromT3(result, &tmpT3_0)
}
func M4OrthoInverse(result, mat *Matrix4) {
var affineMat, tmpT3_0 Transform3
var tmpV3_0, tmpV3_1, tmpV3_2, tmpV3_3 Vector3
V4GetXYZ(&tmpV3_0, &mat.col0)
V4GetXYZ(&tmpV3_1, &mat.col1)
V4GetXYZ(&tmpV3_2, &mat.col2)
V4GetXYZ(&tmpV3_3, &mat.col3)
affineMat.SetCol0(&tmpV3_0)
affineMat.SetCol1(&tmpV3_1)
affineMat.SetCol2(&tmpV3_2)
affineMat.SetCol3(&tmpV3_3)
T3OrthoInverse(&tmpT3_0, &affineMat)
M4MakeFromT3(result, &tmpT3_0)
}
func (m *Matrix4) Determinant() float32 {
mA := m.col0.X
mB := m.col0.Y
mC := m.col0.Z
mD := m.col0.W
mE := m.col1.X
mF := m.col1.Y
mG := m.col1.Z
mH := m.col1.W
mI := m.col2.X
mJ := m.col2.Y
mK := m.col2.Z
mL := m.col2.W
mM := m.col3.X
mN := m.col3.Y
mO := m.col3.Z
mP := m.col3.W
tmp0 := ((mK * mD) - (mC * mL))
tmp1 := ((mO * mH) - (mG * mP))
tmp2 := ((mB * mK) - (mJ * mC))
tmp3 := ((mF * mO) - (mN * mG))
tmp4 := ((mJ * mD) - (mB * mL))
tmp5 := ((mN * mH) - (mF * mP))
dx := (((mJ * tmp1) - (mL * tmp3)) - (mK * tmp5))
dy := (((mN * tmp0) - (mP * tmp2)) - (mO * tmp4))
dz := (((mD * tmp3) + (mC * tmp5)) - (mB * tmp1))
dw := (((mH * tmp2) + (mG * tmp4)) - (mF * tmp0))
return ((((mA * dx) + (mE * dy)) + (mI * dz)) + (mM * dw))
}
func M4Add(result, mat0, mat1 *Matrix4) {
V4Add(&result.col0, &mat0.col0, &mat1.col0)
V4Add(&result.col1, &mat0.col1, &mat1.col1)
V4Add(&result.col2, &mat0.col2, &mat1.col2)
V4Add(&result.col3, &mat0.col3, &mat1.col3)
}
func M4Sub(result, mat0, mat1 *Matrix4) {
V4Sub(&result.col0, &mat0.col0, &mat1.col0)
V4Sub(&result.col1, &mat0.col1, &mat1.col1)
V4Sub(&result.col2, &mat0.col2, &mat1.col2)
V4Sub(&result.col3, &mat0.col3, &mat1.col3)
}
func M4Neg(result, mat *Matrix4) {
V4Neg(&result.col0, &mat.col0)
V4Neg(&result.col1, &mat.col1)
V4Neg(&result.col2, &mat.col2)
V4Neg(&result.col3, &mat.col3)
}
func M4AbsPerElem(result, mat *Matrix4) {
V4AbsPerElem(&result.col0, &mat.col0)
V4AbsPerElem(&result.col1, &mat.col1)
V4AbsPerElem(&result.col2, &mat.col2)
V4AbsPerElem(&result.col3, &mat.col3)
}
func M4ScalarMul(result, mat *Matrix4, scalar float32) {
V4ScalarMul(&result.col0, &mat.col0, scalar)
V4ScalarMul(&result.col1, &mat.col1, scalar)
V4ScalarMul(&result.col2, &mat.col2, scalar)
V4ScalarMul(&result.col3, &mat.col3, scalar)
}
func M4MulV4(result *Vector4, mat *Matrix4, vec *Vector4) {
tmpX := (((mat.col0.X * vec.X) + (mat.col1.X * vec.Y)) + (mat.col2.X * vec.Z)) + (mat.col3.X * vec.W)
tmpY := (((mat.col0.Y * vec.X) + (mat.col1.Y * vec.Y)) + (mat.col2.Y * vec.Z)) + (mat.col3.Y * vec.W)
tmpZ := (((mat.col0.Z * vec.X) + (mat.col1.Z * vec.Y)) + (mat.col2.Z * vec.Z)) + (mat.col3.Z * vec.W)
tmpW := (((mat.col0.W * vec.X) + (mat.col1.W * vec.Y)) + (mat.col2.W * vec.Z)) + (mat.col3.W * vec.W)
V4MakeFromElems(result, tmpX, tmpY, tmpZ, tmpW)
}
func M4MulV3(result *Vector4, mat *Matrix4, vec *Vector3) {
result.X = ((mat.col0.X * vec.X) + (mat.col1.X * vec.Y)) + (mat.col2.X * vec.Z)
result.Y = ((mat.col0.Y * vec.X) + (mat.col1.Y * vec.Y)) + (mat.col2.Y * vec.Z)
result.Z = ((mat.col0.Z * vec.X) + (mat.col1.Z * vec.Y)) + (mat.col2.Z * vec.Z)
result.W = ((mat.col0.W * vec.X) + (mat.col1.W * vec.Y)) + (mat.col2.W * vec.Z)
}
func M4MulP3(result *Vector4, mat *Matrix4, pnt *Point3) {
result.X = (((mat.col0.X * pnt.X) + (mat.col1.X * pnt.Y)) + (mat.col2.X * pnt.Z)) + mat.col3.X
result.Y = (((mat.col0.Y * pnt.X) + (mat.col1.Y * pnt.Y)) + (mat.col2.Y * pnt.Z)) + mat.col3.Y
result.Z = (((mat.col0.Z * pnt.X) + (mat.col1.Z * pnt.Y)) + (mat.col2.Z * pnt.Z)) + mat.col3.Z
result.W = (((mat.col0.W * pnt.X) + (mat.col1.W * pnt.Y)) + (mat.col2.W * pnt.Z)) + mat.col3.W
}
func M4Mul(result, mat0, mat1 *Matrix4) {
var tmpResult Matrix4
M4MulV4(&tmpResult.col0, mat0, &mat1.col0)
M4MulV4(&tmpResult.col1, mat0, &mat1.col1)
M4MulV4(&tmpResult.col2, mat0, &mat1.col2)
M4MulV4(&tmpResult.col3, mat0, &mat1.col3)
M4Copy(result, &tmpResult)
}
func M4MulT3(result, mat *Matrix4, tfrm1 *Transform3) {
var tmpResult Matrix4
var tmpP3_0 Point3
M4MulV3(&tmpResult.col0, mat, &tfrm1.col0)
M4MulV3(&tmpResult.col1, mat, &tfrm1.col1)
M4MulV3(&tmpResult.col2, mat, &tfrm1.col2)
P3MakeFromV3(&tmpP3_0, &tfrm1.col3)
M4MulP3(&tmpResult.col3, mat, &tmpP3_0)
M4Copy(result, &tmpResult)
}
func M4MulPerElem(result, mat0, mat1 *Matrix4) {
V4MulPerElem(&result.col0, &mat0.col0, &mat1.col0)
V4MulPerElem(&result.col1, &mat0.col1, &mat1.col1)
V4MulPerElem(&result.col2, &mat0.col2, &mat1.col2)
V4MulPerElem(&result.col3, &mat0.col3, &mat1.col3)
}
func M4MakeIdentity(result *Matrix4) {
V4MakeXAxis(&result.col0)
V4MakeYAxis(&result.col1)
V4MakeZAxis(&result.col2)
V4MakeWAxis(&result.col3)
}
func (m *Matrix4) SetUpper3x3(mat3 *Matrix3) {
m.col0.SetXYZ(&mat3.col0)
m.col1.SetXYZ(&mat3.col1)
m.col2.SetXYZ(&mat3.col2)
}
func M4GetUpper3x3(result *Matrix3, mat *Matrix4) {
V4GetXYZ(&result.col0, &mat.col0)
V4GetXYZ(&result.col1, &mat.col1)
V4GetXYZ(&result.col2, &mat.col2)
}
func (m *Matrix4) SetTranslation(translateVec *Vector3) {
m.col3.SetXYZ(translateVec)
}
func M4GetTranslation(result *Vector3, mat *Matrix4) {
V4GetXYZ(result, &mat.col3)
}
func M4MakeRotationX(result *Matrix4, radians float32) {
s := sin(radians)
c := cos(radians)
V4MakeXAxis(&result.col0)
V4MakeFromElems(&result.col1, 0.0, c, s, 0.0)
V4MakeFromElems(&result.col2, 0.0, -s, c, 0.0)
V4MakeWAxis(&result.col3)
}
func M4MakeRotationY(result *Matrix4, radians float32) {
s := sin(radians)
c := cos(radians)
V4MakeFromElems(&result.col0, c, 0.0, -s, 0.0)
V4MakeYAxis(&result.col1)
V4MakeFromElems(&result.col2, s, 0.0, c, 0.0)
V4MakeWAxis(&result.col3)
}
func M4MakeRotationZ(result *Matrix4, radians float32) {
s := sin(radians)
c := cos(radians)
V4MakeFromElems(&result.col0, c, s, 0.0, 0.0)
V4MakeFromElems(&result.col1, -s, c, 0.0, 0.0)
V4MakeZAxis(&result.col2)
V4MakeWAxis(&result.col3)
}
func M4MakeRotationZYX(result *Matrix4, radiansXYZ *Vector3) {
sX := sin(radiansXYZ.X)
cX := cos(radiansXYZ.X)
sY := sin(radiansXYZ.Y)
cY := cos(radiansXYZ.Y)
sZ := sin(radiansXYZ.Z)
cZ := cos(radiansXYZ.Z)
tmp0 := (cZ * sY)
tmp1 := (sZ * sY)
V4MakeFromElems(&result.col0, (cZ * cY), (sZ * cY), -sY, 0.0)
V4MakeFromElems(&result.col1, ((tmp0 * sX) - (sZ * cX)), ((tmp1 * sX) + (cZ * cX)), (cY * sX), 0.0)
V4MakeFromElems(&result.col2, ((tmp0 * cX) + (sZ * sX)), ((tmp1 * cX) - (cZ * sX)), (cY * cX), 0.0)
V4MakeWAxis(&result.col3)
}
func M4MakeRotationAxis(result *Matrix4, radians float32, unitVec *Vector3) {
s := sin(radians)
c := cos(radians)
x := unitVec.X
y := unitVec.Y
z := unitVec.Z
xy := x * y
yz := y * z
zx := z * x
oneMinusC := 1.0 - c
V4MakeFromElems(&result.col0, (((x * x) * oneMinusC) + c), ((xy * oneMinusC) + (z * s)), ((zx * oneMinusC) - (y * s)), 0.0)
V4MakeFromElems(&result.col1, ((xy * oneMinusC) - (z * s)), (((y * y) * oneMinusC) + c), ((yz * oneMinusC) + (x * s)), 0.0)
V4MakeFromElems(&result.col2, ((zx * oneMinusC) + (y * s)), ((yz * oneMinusC) - (x * s)), (((z * z) * oneMinusC) + c), 0.0)
V4MakeWAxis(&result.col3)
}
func M4MakeRotationQ(result *Matrix4, unitQuat *Quat) {
var tmpT3_0 Transform3
T3MakeRotationQ(&tmpT3_0, unitQuat)
M4MakeFromT3(result, &tmpT3_0)
}
func M4MakeScale(result *Matrix4, scaleVec *Vector3) {
V4MakeFromElems(&result.col0, scaleVec.X, 0.0, 0.0, 0.0)
V4MakeFromElems(&result.col1, 0.0, scaleVec.Y, 0.0, 0.0)
V4MakeFromElems(&result.col2, 0.0, 0.0, scaleVec.Z, 0.0)
V4MakeWAxis(&result.col3)
}
func M4AppendScale(result, mat *Matrix4, scaleVec *Vector3) {
V4ScalarMul(&result.col0, &mat.col0, scaleVec.X)
V4ScalarMul(&result.col1, &mat.col1, scaleVec.Y)
V4ScalarMul(&result.col2, &mat.col2, scaleVec.Z)
V4Copy(&result.col3, &mat.col3)
}
func M4PrependScale(result *Matrix4, scaleVec *Vector3, mat *Matrix4) {
var scale4 Vector4
V4MakeFromV3Scalar(&scale4, scaleVec, 1.0)
V4MulPerElem(&result.col0, &mat.col0, &scale4)
V4MulPerElem(&result.col1, &mat.col1, &scale4)
V4MulPerElem(&result.col2, &mat.col2, &scale4)
V4MulPerElem(&result.col3, &mat.col3, &scale4)
}
func M4MakeTranslation(result *Matrix4, translateVec *Vector3) {
V4MakeXAxis(&result.col0)
V4MakeYAxis(&result.col1)
V4MakeZAxis(&result.col2)
V4MakeFromV3Scalar(&result.col3, translateVec, 1.0)
}
func M4MakeLookAt(result *Matrix4, eyePos, lookAtPos *Point3, upVec *Vector3) {
var m4EyeFrame Matrix4
var v3X, v3Y, v3Z, tmpV3_0, tmpV3_1 Vector3
var tmpV4_0, tmpV4_1, tmpV4_2, tmpV4_3 Vector4
V3Normalize(&v3Y, upVec)
P3Sub(&tmpV3_0, eyePos, lookAtPos)
V3Normalize(&v3Z, &tmpV3_0)
V3Cross(&tmpV3_1, &v3Y, &v3Z)
V3Normalize(&v3X, &tmpV3_1)
V3Cross(&v3Y, &v3Z, &v3X)
V4MakeFromV3(&tmpV4_0, &v3X)
V4MakeFromV3(&tmpV4_1, &v3Y)
V4MakeFromV3(&tmpV4_2, &v3Z)
V4MakeFromP3(&tmpV4_3, eyePos)
M4MakeFromCols(&m4EyeFrame, &tmpV4_0, &tmpV4_1, &tmpV4_2, &tmpV4_3)
M4OrthoInverse(result, &m4EyeFrame)
}
func M4MakePerspective(result *Matrix4, fovyRadians, aspect, zNear, zFar float32) {
f := tan(g_PI_OVER_2 - (0.5 * fovyRadians))
rangeInv := 1.0 / (zNear - zFar)
V4MakeFromElems(&result.col0, (f / aspect), 0.0, 0.0, 0.0)
V4MakeFromElems(&result.col1, 0.0, f, 0.0, 0.0)
V4MakeFromElems(&result.col2, 0.0, 0.0, ((zNear + zFar) * rangeInv), -1.0)
V4MakeFromElems(&result.col3, 0.0, 0.0, (((zNear * zFar) * rangeInv) * 2.0), 0.0)
}
func M4MakeFrustum(result *Matrix4, left, right, bottom, top, zNear, zFar float32) {
sum_rl := (right + left)
sum_tb := (top + bottom)
sum_nf := (zNear + zFar)
inv_rl := (1.0 / (right - left))
inv_tb := (1.0 / (top - bottom))
inv_nf := (1.0 / (zNear - zFar))
n2 := (zNear + zNear)
V4MakeFromElems(&result.col0, (n2 * inv_rl), 0.0, 0.0, 0.0)
V4MakeFromElems(&result.col1, 0.0, (n2 * inv_tb), 0.0, 0.0)
V4MakeFromElems(&result.col2, (sum_rl * inv_rl), (sum_tb * inv_tb), (sum_nf * inv_nf), -1.0)
V4MakeFromElems(&result.col3, 0.0, 0.0, ((n2 * inv_nf) * zFar), 0.0)
}
func M4MakeOrthographic(result *Matrix4, left, right, bottom, top, zNear, zFar float32) {
sum_rl := (right + left)
sum_tb := (top + bottom)
sum_nf := (zNear + zFar)
inv_rl := (1.0 / (right - left))
inv_tb := (1.0 / (top - bottom))
inv_nf := (1.0 / (zNear - zFar))
V4MakeFromElems(&result.col0, (inv_rl + inv_rl), 0.0, 0.0, 0.0)
V4MakeFromElems(&result.col1, 0.0, (inv_tb + inv_tb), 0.0, 0.0)
V4MakeFromElems(&result.col2, 0.0, 0.0, (inv_nf + inv_nf), 0.0)
V4MakeFromElems(&result.col3, (-sum_rl * inv_rl), (-sum_tb * inv_tb), (sum_nf * inv_nf), 1.0)
}
func M4Select(result, mat0, mat1 *Matrix4, select1 int) {
V4Select(&result.col0, &mat0.col0, &mat1.col0, select1)
V4Select(&result.col1, &mat0.col1, &mat1.col1, select1)
V4Select(&result.col2, &mat0.col2, &mat1.col2, select1)
V4Select(&result.col3, &mat0.col3, &mat1.col3, select1)
}
func (m *Matrix4) String() string {
var tmp Matrix4
M4Transpose(&tmp, m)
return tmp.col0.String() + tmp.col1.String() + tmp.col2.String() + tmp.col3.String()
}
/*******/
func T3Copy(result, tfrm *Transform3) {
V3Copy(&result.col0, &tfrm.col0)
V3Copy(&result.col1, &tfrm.col1)
V3Copy(&result.col2, &tfrm.col2)
V3Copy(&result.col3, &tfrm.col3)
}
func T3MakeFromScalar(result *Transform3, scalar float32) {
V3MakeFromScalar(&result.col0, scalar)
V3MakeFromScalar(&result.col1, scalar)
V3MakeFromScalar(&result.col2, scalar)
V3MakeFromScalar(&result.col3, scalar)
}
func T3MakeFromCols(result *Transform3, col0, col1, col2, col3 *Vector3) {
V3Copy(&result.col0, col0)
V3Copy(&result.col1, col1)
V3Copy(&result.col2, col2)
V3Copy(&result.col3, col3)
}
func T3MakeFromM3V3(result *Transform3, tfrm *Matrix3, translateVec *Vector3) {
result.SetUpper3x3(tfrm)
result.SetTranslation(translateVec)
}
func T3MakeFromQV3(result *Transform3, unitQuat *Quat, translateVec *Vector3) {
var tmpM3_0 Matrix3
M3MakeFromQ(&tmpM3_0, unitQuat)
result.SetUpper3x3(&tmpM3_0)
result.SetTranslation(translateVec)
}
func (t *Transform3) SetCol0(col0 *Vector3) {
V3Copy(&t.col0, col0)
}
func (t *Transform3) SetCol1(col1 *Vector3) {
V3Copy(&t.col1, col1)
}
func (t *Transform3) SetCol2(col2 *Vector3) {
V3Copy(&t.col2, col2)
}
func (t *Transform3) SetCol3(col3 *Vector3) {
V3Copy(&t.col3, col3)
}
func (t *Transform3) SetCol(col int, vec *Vector3) {
switch col {
case 0:
V3Copy(&t.col0, vec)
case 1:
V3Copy(&t.col1, vec)
case 2:
V3Copy(&t.col2, vec)
case 3:
V3Copy(&t.col3, vec)
}
}
func (t *Transform3) SetRow(row int, vec *Vector4) {
t.col0.SetElem(row, vec.GetElem(0))
t.col1.SetElem(row, vec.GetElem(1))
t.col2.SetElem(row, vec.GetElem(2))
t.col3.SetElem(row, vec.GetElem(3))
}
func (t *Transform3) SetElem(col, row int, val float32) {
var tmpV3_0 Vector3
T3GetCol(&tmpV3_0, t, col)
tmpV3_0.SetElem(row, val)
t.SetCol(col, &tmpV3_0)
}
func (t *Transform3) GetElem(col, row int) float32 {
var tmpV3_0 Vector3
T3GetCol(&tmpV3_0, t, col)
return tmpV3_0.GetElem(row)
}
func T3GetCol0(result *Vector3, tfrm *Transform3) {
V3Copy(result, &tfrm.col0)
}
func T3GetCol1(result *Vector3, tfrm *Transform3) {
V3Copy(result, &tfrm.col1)
}
func T3GetCol2(result *Vector3, tfrm *Transform3) {
V3Copy(result, &tfrm.col2)
}
func T3GetCol3(result *Vector3, tfrm *Transform3) {
V3Copy(result, &tfrm.col3)
}
func T3GetCol(result *Vector3, tfrm *Transform3, col int) {
switch col {
case 0:
V3Copy(result, &tfrm.col0)
case 1:
V3Copy(result, &tfrm.col1)
case 2:
V3Copy(result, &tfrm.col2)
case 3:
V3Copy(result, &tfrm.col3)
}
}
func T3GetRow(result *Vector4, tfrm *Transform3, row int) {
V4MakeFromElems(result, tfrm.col0.GetElem(row), tfrm.col1.GetElem(row), tfrm.col2.GetElem(row), tfrm.col3.GetElem(row))
}
func T3Inverse(result, tfrm *Transform3) {
var tmp0, tmp1, tmp2, inv0, inv1, inv2, tmpV3_0, tmpV3_1, tmpV3_2, tmpV3_3, tmpV3_4, tmpV3_5 Vector3
V3Cross(&tmp0, &tfrm.col1, &tfrm.col2)
V3Cross(&tmp1, &tfrm.col2, &tfrm.col0)
V3Cross(&tmp2, &tfrm.col0, &tfrm.col1)
detinv := (1.0 / V3Dot(&tfrm.col2, &tmp2))
V3MakeFromElems(&inv0, (tmp0.X * detinv), (tmp1.X * detinv), (tmp2.X * detinv))
V3MakeFromElems(&inv1, (tmp0.Y * detinv), (tmp1.Y * detinv), (tmp2.Y * detinv))
V3MakeFromElems(&inv2, (tmp0.Z * detinv), (tmp1.Z * detinv), (tmp2.Z * detinv))
V3Copy(&result.col0, &inv0)
V3Copy(&result.col1, &inv1)
V3Copy(&result.col2, &inv2)
V3ScalarMul(&tmpV3_0, &inv0, tfrm.col3.X)
V3ScalarMul(&tmpV3_1, &inv1, tfrm.col3.Y)
V3ScalarMul(&tmpV3_2, &inv2, tfrm.col3.Z)
V3Add(&tmpV3_3, &tmpV3_1, &tmpV3_2)
V3Add(&tmpV3_4, &tmpV3_0, &tmpV3_3)
V3Neg(&tmpV3_5, &tmpV3_4)
V3Copy(&result.col3, &tmpV3_5)
}
func T3OrthoInverse(result, tfrm *Transform3) {
var inv0, inv1, inv2, tmpV3_0, tmpV3_1, tmpV3_2, tmpV3_3, tmpV3_4, tmpV3_5 Vector3
V3MakeFromElems(&inv0, tfrm.col0.X, tfrm.col1.X, tfrm.col2.X)
V3MakeFromElems(&inv1, tfrm.col0.Y, tfrm.col1.Y, tfrm.col2.Y)
V3MakeFromElems(&inv2, tfrm.col0.Z, tfrm.col1.Z, tfrm.col2.Z)
V3Copy(&result.col0, &inv0)
V3Copy(&result.col1, &inv1)
V3Copy(&result.col2, &inv2)
V3ScalarMul(&tmpV3_0, &inv0, tfrm.col3.X)
V3ScalarMul(&tmpV3_1, &inv1, tfrm.col3.Y)
V3ScalarMul(&tmpV3_2, &inv2, tfrm.col3.Z)
V3Add(&tmpV3_3, &tmpV3_1, &tmpV3_2)
V3Add(&tmpV3_4, &tmpV3_0, &tmpV3_3)
V3Neg(&tmpV3_5, &tmpV3_4)
V3Copy(&result.col3, &tmpV3_5)
}
func T3AbsPerElem(result, tfrm *Transform3) {
V3AbsPerElem(&result.col0, &tfrm.col0)
V3AbsPerElem(&result.col1, &tfrm.col1)
V3AbsPerElem(&result.col2, &tfrm.col2)
V3AbsPerElem(&result.col3, &tfrm.col3)
}
func T3MulV3(result *Vector3, tfrm *Transform3, vec *Vector3) {
tmpX := ((tfrm.col0.X * vec.X) + (tfrm.col1.X * vec.Y)) + (tfrm.col2.X * vec.Z)
tmpY := ((tfrm.col0.Y * vec.X) + (tfrm.col1.Y * vec.Y)) + (tfrm.col2.Y * vec.Z)
tmpZ := ((tfrm.col0.Z * vec.X) + (tfrm.col1.Z * vec.Y)) + (tfrm.col2.Z * vec.Z)
V3MakeFromElems(result, tmpX, tmpY, tmpZ)
}
func T3MulP3(result *Point3, tfrm *Transform3, pnt *Point3) {
tmpX := ((((tfrm.col0.X * pnt.X) + (tfrm.col1.X * pnt.Y)) + (tfrm.col2.X * pnt.Z)) + tfrm.col3.X)
tmpY := ((((tfrm.col0.Y * pnt.X) + (tfrm.col1.Y * pnt.Y)) + (tfrm.col2.Y * pnt.Z)) + tfrm.col3.Y)
tmpZ := ((((tfrm.col0.Z * pnt.X) + (tfrm.col1.Z * pnt.Y)) + (tfrm.col2.Z * pnt.Z)) + tfrm.col3.Z)
P3MakeFromElems(result, tmpX, tmpY, tmpZ)
}
func T3Mul(result, tfrm0, tfrm1 *Transform3) {
var tmpResult Transform3
var tmpP3_0, tmpP3_1 Point3
T3MulV3(&tmpResult.col0, tfrm0, &tfrm1.col0)
T3MulV3(&tmpResult.col1, tfrm0, &tfrm1.col1)
T3MulV3(&tmpResult.col2, tfrm0, &tfrm1.col2)
P3MakeFromV3(&tmpP3_0, &tfrm1.col3)
T3MulP3(&tmpP3_1, tfrm0, &tmpP3_0)
V3MakeFromP3(&tmpResult.col3, &tmpP3_1)
T3Copy(result, &tmpResult)
}
func T3MulPerElem(result, tfrm0, tfrm1 *Transform3) {
V3MulPerElem(&result.col0, &tfrm0.col0, &tfrm1.col0)
V3MulPerElem(&result.col1, &tfrm0.col1, &tfrm1.col1)
V3MulPerElem(&result.col2, &tfrm0.col2, &tfrm1.col2)
V3MulPerElem(&result.col3, &tfrm0.col3, &tfrm1.col3)
}
func T3MakeIdentity(result *Transform3) {
V3MakeXAxis(&result.col0)
V3MakeYAxis(&result.col1)
V3MakeZAxis(&result.col2)
V3MakeFromScalar(&result.col3, 0.0)
}
func (m *Transform3) SetUpper3x3(tfrm *Matrix3) {
V3Copy(&m.col0, &tfrm.col0)
V3Copy(&m.col1, &tfrm.col1)
V3Copy(&m.col2, &tfrm.col2)
}
func T3GetUpper3x3(result *Matrix3, tfrm *Transform3) {
M3MakeFromCols(result, &tfrm.col0, &tfrm.col1, &tfrm.col2)
}
func (t *Transform3) SetTranslation(translateVec *Vector3) {
V3Copy(&t.col3, translateVec)
}
func T3GetTranslation(result *Vector3, tfrm *Transform3) {
V3Copy(result, &tfrm.col3)
}
func T3MakeRotationX(result *Transform3, radians float32) {
s := sin(radians)
c := cos(radians)
V3MakeXAxis(&result.col0)
V3MakeFromElems(&result.col1, 0.0, c, s)
V3MakeFromElems(&result.col2, 0.0, -s, c)
V3MakeFromScalar(&result.col3, 0.0)
}
func T3MakeRotationY(result *Transform3, radians float32) {
s := sin(radians)
c := cos(radians)
V3MakeFromElems(&result.col0, c, 0.0, -s)
V3MakeYAxis(&result.col1)
V3MakeFromElems(&result.col2, s, 0.0, c)
V3MakeFromScalar(&result.col3, 0.0)
}
func T3MakeRotationZ(result *Transform3, radians float32) {
s := sin(radians)
c := cos(radians)
V3MakeFromElems(&result.col0, c, s, 0.0)
V3MakeFromElems(&result.col1, -s, c, 0.0)
V3MakeZAxis(&result.col2)
V3MakeFromScalar(&result.col3, 0.0)
}
func T3MakeRotationZYX(result *Transform3, radiansXYZ *Vector3) {
sX := sin(radiansXYZ.X)
cX := cos(radiansXYZ.X)
sY := sin(radiansXYZ.Y)
cY := cos(radiansXYZ.Y)
sZ := sin(radiansXYZ.Z)
cZ := cos(radiansXYZ.Z)
tmp0 := (cZ * sY)
tmp1 := (sZ * sY)
V3MakeFromElems(&result.col0, (cZ * cY), (sZ * cY), -sY)
V3MakeFromElems(&result.col1, ((tmp0 * sX) - (sZ * cX)), ((tmp1 * sX) + (cZ * cX)), (cY * sX))
V3MakeFromElems(&result.col2, ((tmp0 * cX) + (sZ * sX)), ((tmp1 * cX) - (cZ * sX)), (cY * cX))
V3MakeFromScalar(&result.col3, 0.0)
}
func T3MakeRotationAxis(result *Transform3, radians float32, unitVec *Vector3) {
var tmpM3_0 Matrix3
var tmpV3_0 Vector3
M3MakeRotationAxis(&tmpM3_0, radians, unitVec)
V3MakeFromScalar(&tmpV3_0, 0.0)
T3MakeFromM3V3(result, &tmpM3_0, &tmpV3_0)
}
func T3MakeRotationQ(result *Transform3, unitQuat *Quat) {
var tmpM3_0 Matrix3
var tmpV3_0 Vector3
M3MakeFromQ(&tmpM3_0, unitQuat)
V3MakeFromScalar(&tmpV3_0, 0.0)
T3MakeFromM3V3(result, &tmpM3_0, &tmpV3_0)
}
func T3MakeScale(result *Transform3, scaleVec *Vector3) {
V3MakeFromElems(&result.col0, scaleVec.X, 0.0, 0.0)
V3MakeFromElems(&result.col1, 0.0, scaleVec.Y, 0.0)
V3MakeFromElems(&result.col2, 0.0, 0.0, scaleVec.Z)
V3MakeFromScalar(&result.col3, 0.0)
}
func T3AppendScale(result, tfrm *Transform3, scaleVec *Vector3) {
V3ScalarMul(&result.col0, &tfrm.col0, scaleVec.X)
V3ScalarMul(&result.col1, &tfrm.col1, scaleVec.Y)
V3ScalarMul(&result.col2, &tfrm.col2, scaleVec.Z)
V3Copy(&result.col3, &tfrm.col3)
}
func T3PrependScale(result *Transform3, scaleVec *Vector3, tfrm *Transform3) {
V3MulPerElem(&result.col0, &tfrm.col0, scaleVec)
V3MulPerElem(&result.col1, &tfrm.col1, scaleVec)
V3MulPerElem(&result.col2, &tfrm.col2, scaleVec)
V3MulPerElem(&result.col3, &tfrm.col3, scaleVec)
}
func T3MakeTranslation(result *Transform3, translateVec *Vector3) {
V3MakeXAxis(&result.col0)
V3MakeYAxis(&result.col1)
V3MakeZAxis(&result.col2)
V3Copy(&result.col3, translateVec)
}
func T3Select(result, tfrm0, tfrm1 *Transform3, select1 int) {
V3Select(&result.col0, &tfrm0.col0, &tfrm1.col0, select1)
V3Select(&result.col1, &tfrm0.col1, &tfrm1.col1, select1)
V3Select(&result.col2, &tfrm0.col2, &tfrm1.col2, select1)
V3Select(&result.col3, &tfrm0.col3, &tfrm1.col3, select1)
}
func (t *Transform3) String() string {
var tmpV4_0, tmpV4_1, tmpV4_2 Vector4
T3GetRow(&tmpV4_0, t, 0)
T3GetRow(&tmpV4_1, t, 1)
T3GetRow(&tmpV4_2, t, 2)
return tmpV4_0.String() + tmpV4_1.String() + tmpV4_2.String()
}
/*******/
func QMakeFromM3(result *Quat, tfrm *Matrix3) {
xx := tfrm.col0.X
yx := tfrm.col0.Y
zx := tfrm.col0.Z
xy := tfrm.col1.X
yy := tfrm.col1.Y
zy := tfrm.col1.Z
xz := tfrm.col2.X
yz := tfrm.col2.Y
zz := tfrm.col2.Z
trace := ((xx + yy) + zz)
negTrace := (trace < 0.0)
ZgtX := zz > xx
ZgtY := zz > yy
YgtX := yy > xx
largestXorY := (!ZgtX || !ZgtY) && negTrace
largestYorZ := (YgtX || ZgtX) && negTrace
largestZorX := (ZgtY || !YgtX) && negTrace
if largestXorY {
zz = -zz
xy = -xy
}
if largestYorZ {
xx = -xx
yz = -yz
}
if largestZorX {
yy = -yy
zx = -zx
}
radicand := (((xx + yy) + zz) + 1.0)
scale := (0.5 * (1.0 / sqrt(radicand)))
tmpx := ((zy - yz) * scale)
tmpy := ((xz - zx) * scale)
tmpz := ((yx - xy) * scale)
tmpw := (radicand * scale)
qx := tmpx
qy := tmpy
qz := tmpz
qw := tmpw
if largestXorY {
qx = tmpw
qy = tmpz
qz = tmpy
qw = tmpx
}
if largestYorZ {
tmpx = qx
tmpz = qz
qx = qy
qy = tmpx
qz = qw
qw = tmpz
}
result.X = qx
result.Y = qy
result.Z = qz
result.W = qw
}
func V3Outer(result *Matrix3, tfrm0, tfrm1 *Vector3) {
V3ScalarMul(&result.col0, tfrm0, tfrm1.X)
V3ScalarMul(&result.col1, tfrm0, tfrm1.Y)
V3ScalarMul(&result.col2, tfrm0, tfrm1.Z)
}
func V4Outer(result *Matrix4, tfrm0, tfrm1 *Vector4) {
V4ScalarMul(&result.col0, tfrm0, tfrm1.X)
V4ScalarMul(&result.col1, tfrm0, tfrm1.Y)
V4ScalarMul(&result.col2, tfrm0, tfrm1.Z)
V4ScalarMul(&result.col3, tfrm0, tfrm1.W)
}
func V3RowMul(result *Vector3, vec *Vector3, mat *Matrix3) {
tmpX := (((vec.X * mat.col0.X) + (vec.Y * mat.col0.Y)) + (vec.Z * mat.col0.Z))
tmpY := (((vec.X * mat.col1.X) + (vec.Y * mat.col1.Y)) + (vec.Z * mat.col1.Z))
tmpZ := (((vec.X * mat.col2.X) + (vec.Y * mat.col2.Y)) + (vec.Z * mat.col2.Z))
V3MakeFromElems(result, tmpX, tmpY, tmpZ)
}
func V3CrossMatrix(result *Matrix3, vec *Vector3) {
V3MakeFromElems(&result.col0, 0.0, vec.Z, -vec.Y)
V3MakeFromElems(&result.col1, -vec.Z, 0.0, vec.X)
V3MakeFromElems(&result.col2, vec.Y, -vec.X, 0.0)
}
func V3CrossMatrixMul(result *Matrix3, vec *Vector3, mat *Matrix3) {
var tmpV3_0, tmpV3_1, tmpV3_2 Vector3
V3Cross(&tmpV3_0, vec, &mat.col0)
V3Cross(&tmpV3_1, vec, &mat.col1)
V3Cross(&tmpV3_2, vec, &mat.col2)
M3MakeFromCols(result, &tmpV3_0, &tmpV3_1, &tmpV3_2)
} | mat_aos.go | 0.731155 | 0.590632 | mat_aos.go | starcoder |
package elements
const defaultElementsJSON string = `
[
{
"Symbol": "H",
"Name": "hydrogen",
"Number": 1,
"Isotope": [
{
"Mass": 1.00782503223,
"Abundance": 0.999885
},
{
"Mass": 2.01410177812,
"Abundance": 0.000115
}
]
},
{
"Symbol": "He",
"Name": "helium",
"Number": 2,
"Isotope": [
{
"Mass": 3.0160293201,
"Abundance": 0.00000134
},
{
"Mass": 4.00260325413,
"Abundance": 0.99999866
}
]
},
{
"Symbol": "Li",
"Name": "lithium",
"Number": 3,
"Isotope": [
{
"Mass": 6.0151228874,
"Abundance": 0.0759
},
{
"Mass": 7.0160034366,
"Abundance": 0.9241
}
]
},
{
"Symbol": "Be",
"Name": "beryllium",
"Number": 4,
"Isotope": [
{
"Mass": 9.012183065,
"Abundance": 1
}
]
},
{
"Symbol": "B",
"Name": "boron",
"Number": 5,
"Isotope": [
{
"Mass": 10.01293695,
"Abundance": 0.199
},
{
"Mass": 11.00930536,
"Abundance": 0.801
}
]
},
{
"Symbol": "C",
"Name": "carbon",
"Number": 6,
"Isotope": [
{
"Mass": 12,
"Abundance": 0.9893
},
{
"Mass": 13.00335483507,
"Abundance": 0.0107
}
]
},
{
"Symbol": "N",
"Name": "nitrogen",
"Number": 7,
"Isotope": [
{
"Mass": 14.00307400443,
"Abundance": 0.99636
},
{
"Mass": 15.00010889888,
"Abundance": 0.00364
}
]
},
{
"Symbol": "O",
"Name": "oxygen",
"Number": 8,
"Isotope": [
{
"Mass": 15.99491461957,
"Abundance": 0.99757
},
{
"Mass": 16.9991317565,
"Abundance": 0.00038
},
{
"Mass": 17.99915961286,
"Abundance": 0.00205
}
]
},
{
"Symbol": "F",
"Name": "fluorine",
"Number": 9,
"Isotope": [
{
"Mass": 18.99840316273,
"Abundance": 1
}
]
},
{
"Symbol": "Ne",
"Name": "neon",
"Number": 10,
"Isotope": [
{
"Mass": 19.9924401762,
"Abundance": 0.9048
},
{
"Mass": 20.993846685,
"Abundance": 0.0027
},
{
"Mass": 21.991385114,
"Abundance": 0.0925
}
]
},
{
"Symbol": "Na",
"Name": "sodium",
"Number": 11,
"Isotope": [
{
"Mass": 22.989769282,
"Abundance": 1
}
]
},
{
"Symbol": "Mg",
"Name": "magnesium",
"Number": 12,
"Isotope": [
{
"Mass": 23.985041697,
"Abundance": 0.7899
},
{
"Mass": 24.985836976,
"Abundance": 0.1
},
{
"Mass": 25.982592968,
"Abundance": 0.1101
}
]
},
{
"Symbol": "Al",
"Name": "aluminium",
"Number": 13,
"Isotope": [
{
"Mass": 26.98153853,
"Abundance": 1
}
]
},
{
"Symbol": "Si",
"Name": "silicon",
"Number": 14,
"Isotope": [
{
"Mass": 27.97692653465,
"Abundance": 0.92223
},
{
"Mass": 28.9764946649,
"Abundance": 0.04685
},
{
"Mass": 29.973770136,
"Abundance": 0.03092
}
]
},
{
"Symbol": "P",
"Name": "phosphorus",
"Number": 15,
"Isotope": [
{
"Mass": 30.97376199842,
"Abundance": 1
}
]
},
{
"Symbol": "S",
"Name": "sulfur",
"Number": 16,
"Isotope": [
{
"Mass": 31.9720711744,
"Abundance": 0.9499
},
{
"Mass": 32.9714589098,
"Abundance": 0.0075
},
{
"Mass": 33.967867004,
"Abundance": 0.0425
},
{
"Mass": 35.96708071,
"Abundance": 0.0001
}
]
},
{
"Symbol": "Cl",
"Name": "chlorine",
"Number": 17,
"Isotope": [
{
"Mass": 34.968852682,
"Abundance": 0.7576
},
{
"Mass": 36.965902602,
"Abundance": 0.2424
}
]
},
{
"Symbol": "Ar",
"Name": "argon",
"Number": 18,
"Isotope": [
{
"Mass": 35.967545105,
"Abundance": 0.003336
},
{
"Mass": 37.96273211,
"Abundance": 0.000629
},
{
"Mass": 39.9623831237,
"Abundance": 0.996035
}
]
},
{
"Symbol": "K",
"Name": "potassium",
"Number": 19,
"Isotope": [
{
"Mass": 38.9637064864,
"Abundance": 0.932581
},
{
"Mass": 39.963998166,
"Abundance": 0.000117
},
{
"Mass": 40.9618252579,
"Abundance": 0.067302
}
]
},
{
"Symbol": "Ca",
"Name": "calcium",
"Number": 20,
"Isotope": [
{
"Mass": 39.962590863,
"Abundance": 0.96941
},
{
"Mass": 41.95861783,
"Abundance": 0.00647
},
{
"Mass": 42.95876644,
"Abundance": 0.00135
},
{
"Mass": 43.95548156,
"Abundance": 0.02086
},
{
"Mass": 45.953689,
"Abundance": 0.00004
},
{
"Mass": 47.95252276,
"Abundance": 0.00187
}
]
},
{
"Symbol": "Sc",
"Name": "scandium",
"Number": 21,
"Isotope": [
{
"Mass": 44.95590828,
"Abundance": 1
}
]
},
{
"Symbol": "Ti",
"Name": "titanium",
"Number": 22,
"Isotope": [
{
"Mass": 45.95262772,
"Abundance": 0.0825
},
{
"Mass": 46.95175879,
"Abundance": 0.0744
},
{
"Mass": 47.94794198,
"Abundance": 0.7372
},
{
"Mass": 48.94786568,
"Abundance": 0.0541
},
{
"Mass": 49.94478689,
"Abundance": 0.0518
}
]
},
{
"Symbol": "V",
"Name": "vanadium",
"Number": 23,
"Isotope": [
{
"Mass": 49.94715601,
"Abundance": 0.0025
},
{
"Mass": 50.94395704,
"Abundance": 0.9975
}
]
},
{
"Symbol": "Cr",
"Name": "chromium",
"Number": 24,
"Isotope": [
{
"Mass": 49.94604183,
"Abundance": 0.04345
},
{
"Mass": 51.94050623,
"Abundance": 0.83789
},
{
"Mass": 52.94064815,
"Abundance": 0.09501
},
{
"Mass": 53.93887916,
"Abundance": 0.02365
}
]
},
{
"Symbol": "Mn",
"Name": "manganese",
"Number": 25,
"Isotope": [
{
"Mass": 54.93804391,
"Abundance": 1
}
]
},
{
"Symbol": "Fe",
"Name": "iron",
"Number": 26,
"Isotope": [
{
"Mass": 53.93960899,
"Abundance": 0.05845
},
{
"Mass": 55.93493633,
"Abundance": 0.91754
},
{
"Mass": 56.93539284,
"Abundance": 0.02119
},
{
"Mass": 57.93327443,
"Abundance": 0.00282
}
]
},
{
"Symbol": "Co",
"Name": "cobalt",
"Number": 27,
"Isotope": [
{
"Mass": 58.93319429,
"Abundance": 1
}
]
},
{
"Symbol": "Ni",
"Name": "nickel",
"Number": 28,
"Isotope": [
{
"Mass": 57.93534241,
"Abundance": 0.68077
},
{
"Mass": 59.93078588,
"Abundance": 0.26223
},
{
"Mass": 60.93105557,
"Abundance": 0.011399
},
{
"Mass": 61.92834537,
"Abundance": 0.036346
},
{
"Mass": 63.92796682,
"Abundance": 0.009255
}
]
},
{
"Symbol": "Cu",
"Name": "copper",
"Number": 29,
"Isotope": [
{
"Mass": 62.92959772,
"Abundance": 0.6915
},
{
"Mass": 64.9277897,
"Abundance": 0.3085
}
]
},
{
"Symbol": "Zn",
"Name": "zinc",
"Number": 30,
"Isotope": [
{
"Mass": 63.92914201,
"Abundance": 0.4917
},
{
"Mass": 65.92603381,
"Abundance": 0.2773
},
{
"Mass": 66.92712775,
"Abundance": 0.0404
},
{
"Mass": 67.92484455,
"Abundance": 0.1845
},
{
"Mass": 69.9253192,
"Abundance": 0.0061
}
]
},
{
"Symbol": "Ga",
"Name": "gallium",
"Number": 31,
"Isotope": [
{
"Mass": 68.9255735,
"Abundance": 0.60108
},
{
"Mass": 70.92470258,
"Abundance": 0.39892
}
]
},
{
"Symbol": "Ge",
"Name": "germanium",
"Number": 32,
"Isotope": [
{
"Mass": 69.92424875,
"Abundance": 0.2057
},
{
"Mass": 71.922075826,
"Abundance": 0.2745
},
{
"Mass": 72.923458956,
"Abundance": 0.0775
},
{
"Mass": 73.921177761,
"Abundance": 0.365
},
{
"Mass": 75.921402726,
"Abundance": 0.0773
}
]
},
{
"Symbol": "As",
"Name": "arsenic",
"Number": 33,
"Isotope": [
{
"Mass": 74.92159457,
"Abundance": 1
}
]
},
{
"Symbol": "Se",
"Name": "selenium",
"Number": 34,
"Isotope": [
{
"Mass": 73.922475934,
"Abundance": 0.0089
},
{
"Mass": 75.919213704,
"Abundance": 0.0937
},
{
"Mass": 76.919914154,
"Abundance": 0.0763
},
{
"Mass": 77.91730928,
"Abundance": 0.2377
},
{
"Mass": 79.9165218,
"Abundance": 0.4961
},
{
"Mass": 81.9166995,
"Abundance": 0.0873
}
]
},
{
"Symbol": "Br",
"Name": "bromine",
"Number": 35,
"Isotope": [
{
"Mass": 78.9183376,
"Abundance": 0.5069
},
{
"Mass": 80.9162897,
"Abundance": 0.4931
}
]
},
{
"Symbol": "Kr",
"Name": "krypton",
"Number": 36,
"Isotope": [
{
"Mass": 77.92036494,
"Abundance": 0.00355
},
{
"Mass": 79.91637808,
"Abundance": 0.02286
},
{
"Mass": 81.91348273,
"Abundance": 0.11593
},
{
"Mass": 82.91412716,
"Abundance": 0.115
},
{
"Mass": 83.9114977282,
"Abundance": 0.56987
},
{
"Mass": 85.9106106269,
"Abundance": 0.17279
}
]
},
{
"Symbol": "Rb",
"Name": "rubidium",
"Number": 37,
"Isotope": [
{
"Mass": 84.9117897379,
"Abundance": 0.7217
},
{
"Mass": 86.909180531,
"Abundance": 0.2783
}
]
},
{
"Symbol": "Sr",
"Name": "strontium",
"Number": 38,
"Isotope": [
{
"Mass": 83.9134191,
"Abundance": 0.0056
},
{
"Mass": 85.9092606,
"Abundance": 0.0986
},
{
"Mass": 86.9088775,
"Abundance": 0.07
},
{
"Mass": 87.9056125,
"Abundance": 0.8258
}
]
},
{
"Symbol": "Y",
"Name": "yttrium",
"Number": 39,
"Isotope": [
{
"Mass": 88.9058403,
"Abundance": 1
}
]
},
{
"Symbol": "Zr",
"Name": "zirconium",
"Number": 40,
"Isotope": [
{
"Mass": 89.9046977,
"Abundance": 0.5145
},
{
"Mass": 90.9056396,
"Abundance": 0.1122
},
{
"Mass": 91.9050347,
"Abundance": 0.1715
},
{
"Mass": 93.9063108,
"Abundance": 0.1738
},
{
"Mass": 95.9082714,
"Abundance": 0.028
}
]
},
{
"Symbol": "Nb",
"Name": "niobium",
"Number": 41,
"Isotope": [
{
"Mass": 92.906373,
"Abundance": 1
}
]
},
{
"Symbol": "Mo",
"Name": "molybdenum",
"Number": 42,
"Isotope": [
{
"Mass": 91.90680796,
"Abundance": 0.1453
},
{
"Mass": 93.9050849,
"Abundance": 0.0915
},
{
"Mass": 94.90583877,
"Abundance": 0.1584
},
{
"Mass": 95.90467612,
"Abundance": 0.1667
},
{
"Mass": 96.90601812,
"Abundance": 0.096
},
{
"Mass": 97.90540482,
"Abundance": 0.2439
},
{
"Mass": 99.9074718,
"Abundance": 0.0982
}
]
},
{
"Symbol": "Tc",
"Name": "technetium",
"Number": 43,
"Isotope": null
},
{
"Symbol": "Ru",
"Name": "ruthenium",
"Number": 44,
"Isotope": [
{
"Mass": 95.90759025,
"Abundance": 0.0554
},
{
"Mass": 97.9052868,
"Abundance": 0.0187
},
{
"Mass": 98.9059341,
"Abundance": 0.1276
},
{
"Mass": 99.9042143,
"Abundance": 0.126
},
{
"Mass": 100.9055769,
"Abundance": 0.1706
},
{
"Mass": 101.9043441,
"Abundance": 0.3155
},
{
"Mass": 103.9054275,
"Abundance": 0.1862
}
]
},
{
"Symbol": "Rh",
"Name": "rhodium",
"Number": 45,
"Isotope": [
{
"Mass": 102.905498,
"Abundance": 1
}
]
},
{
"Symbol": "Pd",
"Name": "palladium",
"Number": 46,
"Isotope": [
{
"Mass": 101.9056022,
"Abundance": 0.0102
},
{
"Mass": 103.9040305,
"Abundance": 0.1114
},
{
"Mass": 104.9050796,
"Abundance": 0.2233
},
{
"Mass": 105.9034804,
"Abundance": 0.2733
},
{
"Mass": 107.9038916,
"Abundance": 0.2646
},
{
"Mass": 109.9051722,
"Abundance": 0.1172
}
]
},
{
"Symbol": "Ag",
"Name": "silver",
"Number": 47,
"Isotope": [
{
"Mass": 106.9050916,
"Abundance": 0.51839
},
{
"Mass": 108.9047553,
"Abundance": 0.48161
}
]
},
{
"Symbol": "Cd",
"Name": "cadmium",
"Number": 48,
"Isotope": [
{
"Mass": 105.9064599,
"Abundance": 0.0125
},
{
"Mass": 107.9041834,
"Abundance": 0.0089
},
{
"Mass": 109.90300661,
"Abundance": 0.1249
},
{
"Mass": 110.90418287,
"Abundance": 0.128
},
{
"Mass": 111.90276287,
"Abundance": 0.2413
},
{
"Mass": 112.90440813,
"Abundance": 0.1222
},
{
"Mass": 113.90336509,
"Abundance": 0.2873
},
{
"Mass": 115.90476315,
"Abundance": 0.0749
}
]
},
{
"Symbol": "In",
"Name": "indium",
"Number": 49,
"Isotope": [
{
"Mass": 112.90406184,
"Abundance": 0.0429
},
{
"Mass": 114.903878776,
"Abundance": 0.9571
}
]
},
{
"Symbol": "Sn",
"Name": "tin",
"Number": 50,
"Isotope": [
{
"Mass": 111.90482387,
"Abundance": 0.0097
},
{
"Mass": 113.9027827,
"Abundance": 0.0066
},
{
"Mass": 114.903344699,
"Abundance": 0.0034
},
{
"Mass": 115.9017428,
"Abundance": 0.1454
},
{
"Mass": 116.90295398,
"Abundance": 0.0768
},
{
"Mass": 117.90160657,
"Abundance": 0.2422
},
{
"Mass": 118.90331117,
"Abundance": 0.0859
},
{
"Mass": 119.90220163,
"Abundance": 0.3258
},
{
"Mass": 121.9034438,
"Abundance": 0.0463
},
{
"Mass": 123.9052766,
"Abundance": 0.0579
}
]
},
{
"Symbol": "Sb",
"Name": "antimony",
"Number": 51,
"Isotope": [
{
"Mass": 120.903812,
"Abundance": 0.5721
},
{
"Mass": 122.9042132,
"Abundance": 0.4279
}
]
},
{
"Symbol": "Te",
"Name": "tellurium",
"Number": 52,
"Isotope": [
{
"Mass": 119.9040593,
"Abundance": 0.0009
},
{
"Mass": 121.9030435,
"Abundance": 0.0255
},
{
"Mass": 122.9042698,
"Abundance": 0.0089
},
{
"Mass": 123.9028171,
"Abundance": 0.0474
},
{
"Mass": 124.9044299,
"Abundance": 0.0707
},
{
"Mass": 125.9033109,
"Abundance": 0.1884
},
{
"Mass": 127.90446128,
"Abundance": 0.3174
},
{
"Mass": 129.906222748,
"Abundance": 0.3408
}
]
},
{
"Symbol": "I",
"Name": "iodine",
"Number": 53,
"Isotope": [
{
"Mass": 126.9044719,
"Abundance": 1
}
]
},
{
"Symbol": "Xe",
"Name": "xenon",
"Number": 54,
"Isotope": [
{
"Mass": 123.905892,
"Abundance": 0.000952
},
{
"Mass": 125.9042983,
"Abundance": 0.00089
},
{
"Mass": 127.903531,
"Abundance": 0.019102
},
{
"Mass": 128.9047808611,
"Abundance": 0.264006
},
{
"Mass": 129.903509349,
"Abundance": 0.04071
},
{
"Mass": 130.90508406,
"Abundance": 0.212324
},
{
"Mass": 131.9041550856,
"Abundance": 0.269086
},
{
"Mass": 133.90539466,
"Abundance": 0.104357
},
{
"Mass": 135.907214484,
"Abundance": 0.088573
}
]
},
{
"Symbol": "Cs",
"Name": "caesium",
"Number": 55,
"Isotope": [
{
"Mass": 132.905451961,
"Abundance": 1
}
]
},
{
"Symbol": "Ba",
"Name": "barium",
"Number": 56,
"Isotope": [
{
"Mass": 129.9063207,
"Abundance": 0.00106
},
{
"Mass": 131.9050611,
"Abundance": 0.00101
},
{
"Mass": 133.90450818,
"Abundance": 0.02417
},
{
"Mass": 134.90568838,
"Abundance": 0.06592
},
{
"Mass": 135.90457573,
"Abundance": 0.07854
},
{
"Mass": 136.90582714,
"Abundance": 0.11232
},
{
"Mass": 137.905247,
"Abundance": 0.71698
}
]
},
{
"Symbol": "La",
"Name": "lanthanum",
"Number": 57,
"Isotope": [
{
"Mass": 137.9071149,
"Abundance": 0.0008881
},
{
"Mass": 138.9063563,
"Abundance": 0.9991119
}
]
},
{
"Symbol": "Ce",
"Name": "cerium",
"Number": 58,
"Isotope": [
{
"Mass": 135.90712921,
"Abundance": 0.00185
},
{
"Mass": 137.905991,
"Abundance": 0.00251
},
{
"Mass": 139.9054431,
"Abundance": 0.8845
},
{
"Mass": 141.9092504,
"Abundance": 0.11114
}
]
},
{
"Symbol": "Pr",
"Name": "praseodymium",
"Number": 59,
"Isotope": [
{
"Mass": 140.9076576,
"Abundance": 1
}
]
},
{
"Symbol": "Nd",
"Name": "neodymium",
"Number": 60,
"Isotope": [
{
"Mass": 141.907729,
"Abundance": 0.27152
},
{
"Mass": 142.90982,
"Abundance": 0.12174
},
{
"Mass": 143.910093,
"Abundance": 0.23798
},
{
"Mass": 144.9125793,
"Abundance": 0.08293
},
{
"Mass": 145.9131226,
"Abundance": 0.17189
},
{
"Mass": 147.9168993,
"Abundance": 0.05756
},
{
"Mass": 149.9209022,
"Abundance": 0.05638
}
]
},
{
"Symbol": "Pm",
"Name": "promethium",
"Number": 61,
"Isotope": null
},
{
"Symbol": "Sm",
"Name": "samarium",
"Number": 62,
"Isotope": [
{
"Mass": 143.9120065,
"Abundance": 0.0307
},
{
"Mass": 146.9149044,
"Abundance": 0.1499
},
{
"Mass": 147.9148292,
"Abundance": 0.1124
},
{
"Mass": 148.9171921,
"Abundance": 0.1382
},
{
"Mass": 149.9172829,
"Abundance": 0.0738
},
{
"Mass": 151.9197397,
"Abundance": 0.2675
},
{
"Mass": 153.9222169,
"Abundance": 0.2275
}
]
},
{
"Symbol": "Eu",
"Name": "europium",
"Number": 63,
"Isotope": [
{
"Mass": 150.9198578,
"Abundance": 0.4781
},
{
"Mass": 152.921238,
"Abundance": 0.5219
}
]
},
{
"Symbol": "Gd",
"Name": "gadolinium",
"Number": 64,
"Isotope": [
{
"Mass": 151.9197995,
"Abundance": 0.002
},
{
"Mass": 153.9208741,
"Abundance": 0.0218
},
{
"Mass": 154.9226305,
"Abundance": 0.148
},
{
"Mass": 155.9221312,
"Abundance": 0.2047
},
{
"Mass": 156.9239686,
"Abundance": 0.1565
},
{
"Mass": 157.9241123,
"Abundance": 0.2484
},
{
"Mass": 159.9270624,
"Abundance": 0.2186
}
]
},
{
"Symbol": "Tb",
"Name": "terbium",
"Number": 65,
"Isotope": [
{
"Mass": 158.9253547,
"Abundance": 1
}
]
},
{
"Symbol": "Dy",
"Name": "dysprosium",
"Number": 66,
"Isotope": [
{
"Mass": 155.9242847,
"Abundance": 0.00056
},
{
"Mass": 157.9244159,
"Abundance": 0.00095
},
{
"Mass": 159.9252046,
"Abundance": 0.02329
},
{
"Mass": 160.9269405,
"Abundance": 0.18889
},
{
"Mass": 161.9268056,
"Abundance": 0.25475
},
{
"Mass": 162.9287383,
"Abundance": 0.24896
},
{
"Mass": 163.9291819,
"Abundance": 0.2826
}
]
},
{
"Symbol": "Ho",
"Name": "holmium",
"Number": 67,
"Isotope": [
{
"Mass": 164.9303288,
"Abundance": 1
}
]
},
{
"Symbol": "Er",
"Name": "erbium",
"Number": 68,
"Isotope": [
{
"Mass": 161.9287884,
"Abundance": 0.00139
},
{
"Mass": 163.9292088,
"Abundance": 0.01601
},
{
"Mass": 165.9302995,
"Abundance": 0.33503
},
{
"Mass": 166.9320546,
"Abundance": 0.22869
},
{
"Mass": 167.9323767,
"Abundance": 0.26978
},
{
"Mass": 169.9354702,
"Abundance": 0.1491
}
]
},
{
"Symbol": "Tm",
"Name": "thulium",
"Number": 69,
"Isotope": [
{
"Mass": 168.9342179,
"Abundance": 1
}
]
},
{
"Symbol": "Yb",
"Name": "ytterbium",
"Number": 70,
"Isotope": [
{
"Mass": 167.9338896,
"Abundance": 0.00123
},
{
"Mass": 169.9347664,
"Abundance": 0.02982
},
{
"Mass": 170.9363302,
"Abundance": 0.1409
},
{
"Mass": 171.9363859,
"Abundance": 0.2168
},
{
"Mass": 172.9382151,
"Abundance": 0.16103
},
{
"Mass": 173.9388664,
"Abundance": 0.32026
},
{
"Mass": 175.9425764,
"Abundance": 0.12996
}
]
},
{
"Symbol": "Lu",
"Name": "lutetium",
"Number": 71,
"Isotope": [
{
"Mass": 174.9407752,
"Abundance": 0.97401
},
{
"Mass": 175.9426897,
"Abundance": 0.02599
}
]
},
{
"Symbol": "Hf",
"Name": "hafnium",
"Number": 72,
"Isotope": [
{
"Mass": 173.9400461,
"Abundance": 0.0016
},
{
"Mass": 175.9414076,
"Abundance": 0.0526
},
{
"Mass": 176.9432277,
"Abundance": 0.186
},
{
"Mass": 177.9437058,
"Abundance": 0.2728
},
{
"Mass": 178.9458232,
"Abundance": 0.1362
},
{
"Mass": 179.946557,
"Abundance": 0.3508
}
]
},
{
"Symbol": "Ta",
"Name": "tantalum",
"Number": 73,
"Isotope": [
{
"Mass": 179.9474648,
"Abundance": 0.0001201
},
{
"Mass": 180.9479958,
"Abundance": 0.9998799
}
]
},
{
"Symbol": "W",
"Name": "tungsten",
"Number": 74,
"Isotope": [
{
"Mass": 179.9467108,
"Abundance": 0.0012
},
{
"Mass": 181.94820394,
"Abundance": 0.265
},
{
"Mass": 182.95022275,
"Abundance": 0.1431
},
{
"Mass": 183.95093092,
"Abundance": 0.3064
},
{
"Mass": 185.9543628,
"Abundance": 0.2843
}
]
},
{
"Symbol": "Re",
"Name": "rhenium",
"Number": 75,
"Isotope": [
{
"Mass": 184.9529545,
"Abundance": 0.374
},
{
"Mass": 186.9557501,
"Abundance": 0.626
}
]
},
{
"Symbol": "Os",
"Name": "osmium",
"Number": 76,
"Isotope": [
{
"Mass": 183.9524885,
"Abundance": 0.0002
},
{
"Mass": 185.953835,
"Abundance": 0.0159
},
{
"Mass": 186.9557474,
"Abundance": 0.0196
},
{
"Mass": 187.9558352,
"Abundance": 0.1324
},
{
"Mass": 188.9581442,
"Abundance": 0.1615
},
{
"Mass": 189.9584437,
"Abundance": 0.2626
},
{
"Mass": 191.961477,
"Abundance": 0.4078
}
]
},
{
"Symbol": "Ir",
"Name": "iridium",
"Number": 77,
"Isotope": [
{
"Mass": 190.9605893,
"Abundance": 0.373
},
{
"Mass": 192.9629216,
"Abundance": 0.627
}
]
},
{
"Symbol": "Pt",
"Name": "platinum",
"Number": 78,
"Isotope": [
{
"Mass": 189.9599297,
"Abundance": 0.00012
},
{
"Mass": 191.9610387,
"Abundance": 0.00782
},
{
"Mass": 193.9626809,
"Abundance": 0.3286
},
{
"Mass": 194.9647917,
"Abundance": 0.3378
},
{
"Mass": 195.96495209,
"Abundance": 0.2521
},
{
"Mass": 197.9678949,
"Abundance": 0.07356
}
]
},
{
"Symbol": "Au",
"Name": "gold",
"Number": 79,
"Isotope": [
{
"Mass": 196.96656879,
"Abundance": 1
}
]
},
{
"Symbol": "Hg",
"Name": "mercury",
"Number": 80,
"Isotope": [
{
"Mass": 195.9658326,
"Abundance": 0.0015
},
{
"Mass": 197.9667686,
"Abundance": 0.0997
},
{
"Mass": 198.96828064,
"Abundance": 0.1687
},
{
"Mass": 199.96832659,
"Abundance": 0.231
},
{
"Mass": 200.97030284,
"Abundance": 0.1318
},
{
"Mass": 201.9706434,
"Abundance": 0.2986
},
{
"Mass": 203.97349398,
"Abundance": 0.0687
}
]
},
{
"Symbol": "Tl",
"Name": "thallium",
"Number": 81,
"Isotope": [
{
"Mass": 202.9723446,
"Abundance": 0.2952
},
{
"Mass": 204.9744278,
"Abundance": 0.7048
}
]
},
{
"Symbol": "Pb",
"Name": "lead",
"Number": 82,
"Isotope": [
{
"Mass": 203.973044,
"Abundance": 0.014
},
{
"Mass": 205.9744657,
"Abundance": 0.241
},
{
"Mass": 206.9758973,
"Abundance": 0.221
},
{
"Mass": 207.9766525,
"Abundance": 0.524
}
]
},
{
"Symbol": "Bi",
"Name": "bismuth",
"Number": 83,
"Isotope": [
{
"Mass": 208.9803991,
"Abundance": 1
}
]
},
{
"Symbol": "Th",
"Name": "thorium",
"Number": 90,
"Isotope": [
{
"Mass": 232.0380558,
"Abundance": 1
}
]
},
{
"Symbol": "Pa",
"Name": "protactinium",
"Number": 91,
"Isotope": [
{
"Mass": 231.0358842,
"Abundance": 1
}
]
},
{
"Symbol": "U",
"Name": "uranium",
"Number": 92,
"Isotope": [
{
"Mass": 234.0409523,
"Abundance": 0.000054
},
{
"Mass": 235.0439301,
"Abundance": 0.007204
},
{
"Mass": 238.0507884,
"Abundance": 0.992742
}
]
}
]
` | elements/defaults.go | 0.623721 | 0.574037 | defaults.go | starcoder |
package filters
import "math"
/*
Filters used for grid-based interpolation.
Filter code adapted from https://github.com/disintegration/imaging/
MIT License - https://github.com/disintegration/imaging/blob/master/LICENSE
*/
var (
// Box filter (averaging pixels).
Box = GridFilter{
Size: 0.5,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x <= 0.5 {
return 1.0
}
return 0
},
}
// Linear filter.
Linear = GridFilter{
Size: 1.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 1.0 {
return 1.0 - x
}
return 0
},
}
// Hermite cubic spline filter (BC-spline; B=0; C=0).
Hermite = GridFilter{
Size: 1.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 1.0 {
return bcspline(x, 0.0, 0.0)
}
return 0
},
}
// MitchellNetravali is Mitchell-Netravali cubic filter (BC-spline; B=1/3; C=1/3).
MitchellNetravali = GridFilter{
Size: 2.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 2.0 {
return bcspline(x, 1.0/3.0, 1.0/3.0)
}
return 0
},
}
// CatmullRom is a Catmull-Rom - sharp cubic filter (BC-spline; B=0; C=0.5).
CatmullRom = GridFilter{
Size: 2.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 2.0 {
return bcspline(x, 0.0, 0.5)
}
return 0
},
}
// BSpline is a smooth cubic filter (BC-spline; B=1; C=0).
BSpline = GridFilter{
Size: 2.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 2.0 {
return bcspline(x, 1.0, 0.0)
}
return 0
},
}
// Gaussian is a Gaussian blurring filter.
Gaussian = GridFilter{
Size: 2.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 2.0 {
return math.Exp(-2 * x * x)
}
return 0
},
}
// Bartlett is a Bartlett-windowed sinc filter (3 lobes).
Bartlett = GridFilter{
Size: 3.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * (3.0 - x) / 3.0
}
return 0
},
}
// Lanczos filter (3 lobes).
Lanczos = GridFilter{
Size: 3.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * sinc(x/3.0)
}
return 0
},
}
// Hann is a Hann-windowed sinc filter (3 lobes).
Hann = GridFilter{
Size: 3.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * (0.5 + 0.5*math.Cos(math.Pi*x/3.0))
}
return 0
},
}
// Hamming is a Hamming-windowed sinc filter (3 lobes).
Hamming = GridFilter{
Size: 3.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * (0.54 + 0.46*math.Cos(math.Pi*x/3.0))
}
return 0
},
}
// Blackman is a Blackman-windowed sinc filter (3 lobes).
Blackman = GridFilter{
Size: 3.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * (0.42 - 0.5*math.Cos(math.Pi*x/3.0+math.Pi) + 0.08*math.Cos(2.0*math.Pi*x/3.0))
}
return 0
},
}
// Welch is a Welch-windowed sinc filter (parabolic window, 3 lobes).
Welch = GridFilter{
Size: 3.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * (1.0 - (x * x / 9.0))
}
return 0
},
}
// Cosine is a Cosine-windowed sinc filter (3 lobes).
Cosine = GridFilter{
Size: 3.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * math.Cos((math.Pi/2.0)*(x/3.0))
}
return 0
},
}
)
// FilterKernel produces a weight for a certain unit-distance delta.
type FilterKernel func(float64) float64
// GridFilter defines the filter size and kernel
// used for interpolating over grid values.
type GridFilter struct {
Size float64
Kernel FilterKernel
}
// NewGridFilter creates a new GridFilter.
func NewGridFilter(support float64, kernel FilterKernel) GridFilter {
return GridFilter{support, kernel}
}
func bcspline(x, b, c float64) float64 {
var y float64
x = math.Abs(x)
if x < 1.0 {
y = ((12-9*b-6*c)*x*x*x + (-18+12*b+6*c)*x*x + (6 - 2*b)) / 6
} else if x < 2.0 {
y = ((-b-6*c)*x*x*x + (6*b+30*c)*x*x + (-12*b-48*c)*x + (8*b + 24*c)) / 6
}
return y
}
func sinc(x float64) float64 {
if x == 0 {
return 1
}
return math.Sin(math.Pi*x) / (math.Pi * x)
} | f64/filters/grid.go | 0.83346 | 0.600188 | grid.go | starcoder |
package models
import (
i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e "time"
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// AssignmentFilterEvaluationSummary represent result summary for assignment filter evaluation
type AssignmentFilterEvaluationSummary struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// The admin defined name for assignment filter.
assignmentFilterDisplayName *string
// Unique identifier for the assignment filter object
assignmentFilterId *string
// The time the assignment filter was last modified.
assignmentFilterLastModifiedDateTime *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time
// The platform for which this assignment filter is created. Possible values are: android, androidForWork, iOS, macOS, windowsPhone81, windows81AndLater, windows10AndLater, androidWorkProfile, unknown.
assignmentFilterPlatform *DevicePlatformType
// Indicate filter type either include or exclude. Possible values are: none, include, exclude.
assignmentFilterType *DeviceAndAppManagementAssignmentFilterType
// A collection of filter types and their corresponding evaluation results.
assignmentFilterTypeAndEvaluationResults []AssignmentFilterTypeAndEvaluationResultable
// The time assignment filter was evaluated.
evaluationDateTime *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time
// Assignment filter evaluation result. Possible values are: unknown, match, notMatch, inconclusive, failure, notEvaluated.
evaluationResult *AssignmentFilterEvaluationResult
}
// NewAssignmentFilterEvaluationSummary instantiates a new assignmentFilterEvaluationSummary and sets the default values.
func NewAssignmentFilterEvaluationSummary()(*AssignmentFilterEvaluationSummary) {
m := &AssignmentFilterEvaluationSummary{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreateAssignmentFilterEvaluationSummaryFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateAssignmentFilterEvaluationSummaryFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewAssignmentFilterEvaluationSummary(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *AssignmentFilterEvaluationSummary) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetAssignmentFilterDisplayName gets the assignmentFilterDisplayName property value. The admin defined name for assignment filter.
func (m *AssignmentFilterEvaluationSummary) GetAssignmentFilterDisplayName()(*string) {
if m == nil {
return nil
} else {
return m.assignmentFilterDisplayName
}
}
// GetAssignmentFilterId gets the assignmentFilterId property value. Unique identifier for the assignment filter object
func (m *AssignmentFilterEvaluationSummary) GetAssignmentFilterId()(*string) {
if m == nil {
return nil
} else {
return m.assignmentFilterId
}
}
// GetAssignmentFilterLastModifiedDateTime gets the assignmentFilterLastModifiedDateTime property value. The time the assignment filter was last modified.
func (m *AssignmentFilterEvaluationSummary) GetAssignmentFilterLastModifiedDateTime()(*i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time) {
if m == nil {
return nil
} else {
return m.assignmentFilterLastModifiedDateTime
}
}
// GetAssignmentFilterPlatform gets the assignmentFilterPlatform property value. The platform for which this assignment filter is created. Possible values are: android, androidForWork, iOS, macOS, windowsPhone81, windows81AndLater, windows10AndLater, androidWorkProfile, unknown.
func (m *AssignmentFilterEvaluationSummary) GetAssignmentFilterPlatform()(*DevicePlatformType) {
if m == nil {
return nil
} else {
return m.assignmentFilterPlatform
}
}
// GetAssignmentFilterType gets the assignmentFilterType property value. Indicate filter type either include or exclude. Possible values are: none, include, exclude.
func (m *AssignmentFilterEvaluationSummary) GetAssignmentFilterType()(*DeviceAndAppManagementAssignmentFilterType) {
if m == nil {
return nil
} else {
return m.assignmentFilterType
}
}
// GetAssignmentFilterTypeAndEvaluationResults gets the assignmentFilterTypeAndEvaluationResults property value. A collection of filter types and their corresponding evaluation results.
func (m *AssignmentFilterEvaluationSummary) GetAssignmentFilterTypeAndEvaluationResults()([]AssignmentFilterTypeAndEvaluationResultable) {
if m == nil {
return nil
} else {
return m.assignmentFilterTypeAndEvaluationResults
}
}
// GetEvaluationDateTime gets the evaluationDateTime property value. The time assignment filter was evaluated.
func (m *AssignmentFilterEvaluationSummary) GetEvaluationDateTime()(*i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time) {
if m == nil {
return nil
} else {
return m.evaluationDateTime
}
}
// GetEvaluationResult gets the evaluationResult property value. Assignment filter evaluation result. Possible values are: unknown, match, notMatch, inconclusive, failure, notEvaluated.
func (m *AssignmentFilterEvaluationSummary) GetEvaluationResult()(*AssignmentFilterEvaluationResult) {
if m == nil {
return nil
} else {
return m.evaluationResult
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *AssignmentFilterEvaluationSummary) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["assignmentFilterDisplayName"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetAssignmentFilterDisplayName(val)
}
return nil
}
res["assignmentFilterId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetAssignmentFilterId(val)
}
return nil
}
res["assignmentFilterLastModifiedDateTime"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetTimeValue()
if err != nil {
return err
}
if val != nil {
m.SetAssignmentFilterLastModifiedDateTime(val)
}
return nil
}
res["assignmentFilterPlatform"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetEnumValue(ParseDevicePlatformType)
if err != nil {
return err
}
if val != nil {
m.SetAssignmentFilterPlatform(val.(*DevicePlatformType))
}
return nil
}
res["assignmentFilterType"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetEnumValue(ParseDeviceAndAppManagementAssignmentFilterType)
if err != nil {
return err
}
if val != nil {
m.SetAssignmentFilterType(val.(*DeviceAndAppManagementAssignmentFilterType))
}
return nil
}
res["assignmentFilterTypeAndEvaluationResults"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateAssignmentFilterTypeAndEvaluationResultFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]AssignmentFilterTypeAndEvaluationResultable, len(val))
for i, v := range val {
res[i] = v.(AssignmentFilterTypeAndEvaluationResultable)
}
m.SetAssignmentFilterTypeAndEvaluationResults(res)
}
return nil
}
res["evaluationDateTime"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetTimeValue()
if err != nil {
return err
}
if val != nil {
m.SetEvaluationDateTime(val)
}
return nil
}
res["evaluationResult"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetEnumValue(ParseAssignmentFilterEvaluationResult)
if err != nil {
return err
}
if val != nil {
m.SetEvaluationResult(val.(*AssignmentFilterEvaluationResult))
}
return nil
}
return res
}
// Serialize serializes information the current object
func (m *AssignmentFilterEvaluationSummary) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteStringValue("assignmentFilterDisplayName", m.GetAssignmentFilterDisplayName())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("assignmentFilterId", m.GetAssignmentFilterId())
if err != nil {
return err
}
}
{
err := writer.WriteTimeValue("assignmentFilterLastModifiedDateTime", m.GetAssignmentFilterLastModifiedDateTime())
if err != nil {
return err
}
}
if m.GetAssignmentFilterPlatform() != nil {
cast := (*m.GetAssignmentFilterPlatform()).String()
err := writer.WriteStringValue("assignmentFilterPlatform", &cast)
if err != nil {
return err
}
}
if m.GetAssignmentFilterType() != nil {
cast := (*m.GetAssignmentFilterType()).String()
err := writer.WriteStringValue("assignmentFilterType", &cast)
if err != nil {
return err
}
}
if m.GetAssignmentFilterTypeAndEvaluationResults() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetAssignmentFilterTypeAndEvaluationResults()))
for i, v := range m.GetAssignmentFilterTypeAndEvaluationResults() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err := writer.WriteCollectionOfObjectValues("assignmentFilterTypeAndEvaluationResults", cast)
if err != nil {
return err
}
}
{
err := writer.WriteTimeValue("evaluationDateTime", m.GetEvaluationDateTime())
if err != nil {
return err
}
}
if m.GetEvaluationResult() != nil {
cast := (*m.GetEvaluationResult()).String()
err := writer.WriteStringValue("evaluationResult", &cast)
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *AssignmentFilterEvaluationSummary) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetAssignmentFilterDisplayName sets the assignmentFilterDisplayName property value. The admin defined name for assignment filter.
func (m *AssignmentFilterEvaluationSummary) SetAssignmentFilterDisplayName(value *string)() {
if m != nil {
m.assignmentFilterDisplayName = value
}
}
// SetAssignmentFilterId sets the assignmentFilterId property value. Unique identifier for the assignment filter object
func (m *AssignmentFilterEvaluationSummary) SetAssignmentFilterId(value *string)() {
if m != nil {
m.assignmentFilterId = value
}
}
// SetAssignmentFilterLastModifiedDateTime sets the assignmentFilterLastModifiedDateTime property value. The time the assignment filter was last modified.
func (m *AssignmentFilterEvaluationSummary) SetAssignmentFilterLastModifiedDateTime(value *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time)() {
if m != nil {
m.assignmentFilterLastModifiedDateTime = value
}
}
// SetAssignmentFilterPlatform sets the assignmentFilterPlatform property value. The platform for which this assignment filter is created. Possible values are: android, androidForWork, iOS, macOS, windowsPhone81, windows81AndLater, windows10AndLater, androidWorkProfile, unknown.
func (m *AssignmentFilterEvaluationSummary) SetAssignmentFilterPlatform(value *DevicePlatformType)() {
if m != nil {
m.assignmentFilterPlatform = value
}
}
// SetAssignmentFilterType sets the assignmentFilterType property value. Indicate filter type either include or exclude. Possible values are: none, include, exclude.
func (m *AssignmentFilterEvaluationSummary) SetAssignmentFilterType(value *DeviceAndAppManagementAssignmentFilterType)() {
if m != nil {
m.assignmentFilterType = value
}
}
// SetAssignmentFilterTypeAndEvaluationResults sets the assignmentFilterTypeAndEvaluationResults property value. A collection of filter types and their corresponding evaluation results.
func (m *AssignmentFilterEvaluationSummary) SetAssignmentFilterTypeAndEvaluationResults(value []AssignmentFilterTypeAndEvaluationResultable)() {
if m != nil {
m.assignmentFilterTypeAndEvaluationResults = value
}
}
// SetEvaluationDateTime sets the evaluationDateTime property value. The time assignment filter was evaluated.
func (m *AssignmentFilterEvaluationSummary) SetEvaluationDateTime(value *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time)() {
if m != nil {
m.evaluationDateTime = value
}
}
// SetEvaluationResult sets the evaluationResult property value. Assignment filter evaluation result. Possible values are: unknown, match, notMatch, inconclusive, failure, notEvaluated.
func (m *AssignmentFilterEvaluationSummary) SetEvaluationResult(value *AssignmentFilterEvaluationResult)() {
if m != nil {
m.evaluationResult = value
}
} | models/assignment_filter_evaluation_summary.go | 0.673406 | 0.41185 | assignment_filter_evaluation_summary.go | starcoder |
package describe
import (
"errors"
"fmt"
"time"
"github.com/theothertomelliott/meetingtime"
)
// Schedule generates an English description of an instance of meetingtime.Schedule
func Schedule(schedule meetingtime.Schedule) (string, error) {
switch schedule.Type {
case meetingtime.Daily:
return daily(schedule), nil
case meetingtime.Weekly:
return weekly(schedule), nil
case meetingtime.Monthly:
return monthly(schedule), nil
case meetingtime.MonthlyByWeekday:
return monthlyByWeekday(schedule), nil
case meetingtime.Yearly:
return yearly(schedule), nil
}
return "", errors.New("unknown schedule type")
}
func daily(schedule meetingtime.Schedule) string {
if schedule.Frequency == 1 {
return fmt.Sprintf("Every day starting %v", formatDate(schedule.First))
}
return fmt.Sprintf("Every %d days starting %v", schedule.Frequency, formatDate(schedule.First))
}
func weekly(schedule meetingtime.Schedule) string {
if schedule.Frequency == 1 {
return fmt.Sprintf("Every week starting %v", formatDate(schedule.First))
}
return fmt.Sprintf("Every %d weeks starting %v", schedule.Frequency, formatDate(schedule.First))
}
func monthly(schedule meetingtime.Schedule) string {
if schedule.Frequency == 1 {
return fmt.Sprintf("Every month starting %v", formatDate(schedule.First))
}
return fmt.Sprintf("Every %d months starting %v", schedule.Frequency, formatDate(schedule.First))
}
func monthlyByWeekday(schedule meetingtime.Schedule) string {
weekday, n := meetingtime.GetWeekdayAndIndex(schedule.First)
return fmt.Sprintf("Every %v%v %v, starting %v", n, ordSuffix(n), weekday.String(), formatDateNoDay(schedule.First))
}
func yearly(schedule meetingtime.Schedule) string {
if schedule.Frequency == 1 {
return fmt.Sprintf("Every year starting %v", formatDate(schedule.First))
}
return fmt.Sprintf("Every %d years starting %v", schedule.Frequency, formatDate(schedule.First))
}
func formatDate(d time.Time) string {
return d.Format("Mon Jan 02 2006 at 3:04PM")
}
func formatDateNoDay(d time.Time) string {
return d.Format("Jan 02 2006 at 3:04PM")
}
func ordSuffix(x int) string {
switch x % 10 {
case 1:
if x%100 != 11 {
return "st"
}
case 2:
if x%100 != 12 {
return "nd"
}
case 3:
if x%100 != 13 {
return "rd"
}
}
return "th"
} | describe/schedule.go | 0.606964 | 0.444987 | schedule.go | starcoder |
package navmeshv2
import (
"github.com/g3n/engine/math32"
)
const (
BlocksX = 6
BlocksY = 6
BlocksTotal = BlocksX * BlocksY
TilesX = 96
TilesY = 96
TilesTotal = TilesX * TilesY
VerticesX = TilesX + 1
VerticesY = TilesY + 1
VerticesTotal = VerticesX * VerticesY
TerrainWidth = TilesX * TileWidth
TerrainHeight = TilesY * TileHeight
TerrainWidthInt = 1920
TerrainHeightInt = 1920
)
type RtNavmeshTerrain struct {
RtNavmeshBase
Region Region
tileMap [TilesTotal]RtNavmeshTile
planeMap [BlocksTotal]RtNavmeshPlane
heightMap [VerticesTotal]float32
Objects []RtNavmeshInstObj
Cells []RtNavmeshCellQuad
GlobalEdges []RtNavmeshEdgeGlobal
InternalEdges []RtNavmeshEdgeInternal
}
func (t RtNavmeshTerrain) GetNavmeshType() RtNavmeshType {
return RtNavmeshTypeTerrain
}
func NewRtNavmeshTerrain(filename string, region Region) RtNavmeshTerrain {
return RtNavmeshTerrain{
RtNavmeshBase: RtNavmeshBase{Filename: filename},
Region: region,
Objects: make([]RtNavmeshInstObj, 0),
Cells: make([]RtNavmeshCellQuad, 0),
GlobalEdges: make([]RtNavmeshEdgeGlobal, 0),
InternalEdges: make([]RtNavmeshEdgeInternal, 0),
}
}
func (t RtNavmeshTerrain) GetCell(index int) RtNavmeshCell {
return &t.Cells[index]
}
func (t RtNavmeshTerrain) GetTile(x, y int) RtNavmeshTile {
return t.tileMap[y*TilesY+x]
}
func (t RtNavmeshTerrain) GetHeight(x, y int) float32 {
return t.heightMap[y*VerticesY+x]
}
func (t RtNavmeshTerrain) GetPlane(xBlock, zBlock int) RtNavmeshPlane {
return t.planeMap[zBlock*BlocksY+xBlock]
}
func (t RtNavmeshTerrain) ResolveCell(pos *math32.Vector3) (RtNavmeshCellQuad, error) {
tile := t.GetTile(int(pos.X/TileWidth), int(pos.Z/TileHeight))
return t.Cells[tile.CellIndex], nil
}
func (t RtNavmeshTerrain) ResolveHeight(pos *math32.Vector3) float32 {
tileX := int(pos.X / TileWidth)
tileZ := int(pos.Z / TileHeight)
if tileX < 0 {
tileX = 0
}
if tileZ < 0 {
tileZ = 0
}
tileX1 := tileX + 1
tileZ1 := tileZ + 1
if tileX1 >= tileX {
tileX1 = tileX
}
if tileZ1 >= tileZ {
tileZ1 = tileZ
}
h1 := t.GetHeight(tileX, tileZ)
h2 := t.GetHeight(tileX, tileZ1)
h3 := t.GetHeight(tileX1, tileZ)
h4 := t.GetHeight(tileX1, tileZ1)
// h1--------h3
// | | |
// | | |
// h5--+------h6
// | | |
// h2--------h4
tileOffsetX := pos.X - (TileWidth * float32(tileX))
tileOffsetXLength := tileOffsetX / TileWidth
tileOffsetZ := pos.Z - (TileHeight * float32(tileZ))
tileOffsetZLength := tileOffsetZ / TileHeight
h5 := h1 + (h2-h1)*tileOffsetZLength
h6 := h3 + (h4-h3)*tileOffsetZLength
yHeight := h5 + (h6-h5)*tileOffsetXLength
return yHeight
} | navmeshv2/rt_navmesh_terrain.go | 0.654895 | 0.423637 | rt_navmesh_terrain.go | starcoder |
package digit
const (
// LinkRelationAlternate designates a substitute for the link's context.
LinkRelationAlternate = "alternate"
// LinkRelationAppendix refers to an appendix.
LinkRelationAppendix = "appendix"
// LinkRelationBookmark refers to a bookmark or entry point.
LinkRelationBookmark = "bookmark"
// LinkRelationChapter refers to a chapter in a collection of resources.
LinkRelationChapter = "chapter"
// LinkRelationContents refers to a table of contents.
LinkRelationContents = "contents"
// LinkRelationCopyright refers to a copyright statement that applies to the link's context.
LinkRelationCopyright = "copyright"
// LinkRelationCurrent refers to a resource containing the most recent item(s) in a collection of resources.
LinkRelationCurrent = "current"
// LinkRelationDescribedBy refers to a resource providing information about the link's context.
LinkRelationDescribedBy = "describedby"
// LinkRelationEdit refers to a resource that can be used to edit the link's context.
LinkRelationEdit = "edit"
// LinkRelationEditMedia refers to a resource that can be used to edit media associated with the link's context.
LinkRelationEditMedia = "edit-media"
// LinkRelationEnclosure identifies a related resource that is potentially large and might require special handling.
LinkRelationEnclosure = "enclosure"
// LinkRelationFirst is an IRI that refers to the furthest preceding resource in a series of resources.
LinkRelationFirst = "first"
// LinkRelationGlossary refers to a glossary of terms.
LinkRelationGlossary = "glossary"
// LinkRelationHelp refers to a resource offering help (more information, links to other sources information, etc.)
LinkRelationHelp = "help"
// LinkRelationHub refers to a hub that enables registration for notification of updates to the context.
LinkRelationHub = "hub"
// LinkRelationIndex refers to an index.
LinkRelationIndex = "index"
// LinkRelationLast is an IRI that refers to the furthest following resource in a series of resources.
LinkRelationLast = "last"
// LinkRelationLatestVersion points to a resource containing the latest (e.g., current) version of the context.
LinkRelationLatestVersion = "latest-version"
// LinkRelationLicense refers to a license associated with the link's context.
LinkRelationLicense = "license"
// LinkRelationNext refers to the next resource in a ordered series of resources.
LinkRelationNext = "next"
// LinkRelationNextArchive refers to the immediately following archive resource.
LinkRelationNextArchive = "next-archive"
// LinkRelationPayment indicates a resource where payment is accepted.
LinkRelationPayment = "payment"
// LinkRelationPrev refers to the previous resource in an ordered series of resources. Synonym for "previous".
LinkRelationPrev = "prev"
// LinkRelationPredecessorVersion points to a resource containing the predecessor version in the version history.
LinkRelationPredecessorVersion = "predecessor-version"
// LinkRelationPrevious refers to the previous resource in an ordered series of resources. Synonym for "prev".
LinkRelationPrevious = "previous"
// LinkRelationPreviousArchive refers to the immediately preceding archive resource.
LinkRelationPreviousArchive = "prev-archive"
// LinkRelationRelated identifies a related resource.
LinkRelationRelated = "related"
// LinkRelationReplies identifies a resource that is a reply to the context of the link.
LinkRelationReplies = "replies"
// LinkRelationSection refers to a section in a collection of resources.
LinkRelationSection = "section"
// LinkRelationSelf conveys an identifier for the link's context.
LinkRelationSelf = "self"
// LinkRelationService indicates a URI that can be used to retrieve a service document.
LinkRelationService = "service"
// LinkRelationStart refers to the first resource in a collection of resources.
LinkRelationStart = "start"
// LinkRelationStylesheet refers to an external style sheet.
LinkRelationStylesheet = "stylesheet"
// LinkRelationSubsection refers to a resource serving as a subsection in a collection of resources.
LinkRelationSubsection = "subsection"
// LinkRelationSuccessorVersion points to a resource containing the successor version in the version history.
LinkRelationSuccessorVersion = "successor-version"
// LinkRelationUp refers to a parent document in a hierarchy of documents.
LinkRelationUp = "up"
// LinkRelationVersionHistory points to a resource containing the version history for the context.
LinkRelationVersionHistory = "version-history"
// LinkRelationVia identifies a resource that is the source of the information in the link's context.
LinkRelationVia = "via"
// LinkRelationWorkingCopy points to a working copy for this resource.
LinkRelationWorkingCopy = "working-copy"
// LinkRelationWorkingCopyOf points to the versioned resource from which this working copy was obtained.
LinkRelationWorkingCopyOf = "working-copy-of"
) | linkRelationName.go | 0.624866 | 0.421433 | linkRelationName.go | starcoder |
package ui
// A Label is a static line of text used to mark other controls.
// Label text is drawn on a single line; text that does not fit is truncated.
// A Label can appear in one of two places: bound to a control or standalone.
// This determines the vertical alignment of the label.
type Label struct {
created bool
sysData *sysData
initText string
standalone bool
}
// NewLabel creates a new Label with the specified text.
// The label is set to be bound to a control, so its vertical position depends on its vertical cell size in an implementation-defined manner.
func NewLabel(text string) *Label {
return &Label{
sysData: mksysdata(c_label),
initText: text,
}
}
// NewStandaloneLabel creates a new Label with the specified text.
// The label is set to be standalone, so its vertical position will always be at the top of the vertical space assigned to it.
func NewStandaloneLabel(text string) *Label {
return &Label{
sysData: mksysdata(c_label),
initText: text,
standalone: true,
}
}
// SetText sets the Label's text.
func (l *Label) SetText(text string) {
if l.created {
l.sysData.setText(text)
return
}
l.initText = text
}
// Text returns the Label's text.
func (l *Label) Text() string {
if l.created {
return l.sysData.text()
}
return l.initText
}
func (l *Label) make(window *sysData) error {
l.sysData.alternate = l.standalone
err := l.sysData.make(window)
if err != nil {
return err
}
l.sysData.setText(l.initText)
l.created = true
return nil
}
func (l *Label) allocate(x int, y int, width int, height int, d *sysSizeData) []*allocation {
return []*allocation{&allocation{
x: x,
y: y,
width: width,
height: height,
this: l,
}}
}
func (l *Label) preferredSize(d *sysSizeData) (width int, height int) {
return l.sysData.preferredSize(d)
}
func (l *Label) commitResize(a *allocation, d *sysSizeData) {
l.sysData.commitResize(a, d)
}
func (l *Label) getAuxResizeInfo(d *sysSizeData) {
l.sysData.getAuxResizeInfo(d)
} | label.go | 0.671901 | 0.405566 | label.go | starcoder |
package datatype
import (
"fmt"
"math"
"github.com/i-sevostyanov/NanoDB/internal/sql"
)
type Integer struct {
value int64
}
func NewInteger(v int64) Integer {
return Integer{value: v}
}
func (i Integer) Raw() interface{} {
return i.value
}
func (i Integer) DataType() sql.DataType {
return sql.Integer
}
func (i Integer) Compare(v sql.Value) (sql.CompareType, error) {
switch value := v.Raw().(type) {
case int64:
switch {
case i.value < value:
return sql.Less, nil
case i.value > value:
return sql.Greater, nil
default:
return sql.Equal, nil
}
case nil:
return sql.Greater, nil
default:
return sql.Equal, fmt.Errorf("unexptected arg type: %T", value)
}
}
func (i Integer) UnaryPlus() (sql.Value, error) {
return Integer{value: i.value}, nil
}
func (i Integer) UnaryMinus() (sql.Value, error) {
return Integer{value: -i.value}, nil
}
func (i Integer) Add(v sql.Value) (sql.Value, error) {
switch value := v.Raw().(type) {
case int64:
return Integer{value: i.value + value}, nil
case float64:
return Float{value: float64(i.value) + value}, nil
case nil:
return Null{}, nil
default:
return nil, fmt.Errorf("unexptected arg type: %T", value)
}
}
func (i Integer) Sub(v sql.Value) (sql.Value, error) {
switch value := v.Raw().(type) {
case int64:
return Integer{value: i.value - value}, nil
case float64:
return Float{value: float64(i.value) - value}, nil
case nil:
return Null{}, nil
default:
return nil, fmt.Errorf("unexptected arg type: %T", value)
}
}
func (i Integer) Mul(v sql.Value) (sql.Value, error) {
switch value := v.Raw().(type) {
case int64:
return Integer{value: i.value * value}, nil
case float64:
return Float{value: float64(i.value) * value}, nil
case nil:
return Null{}, nil
default:
return nil, fmt.Errorf("unexptected arg type: %T", value)
}
}
func (i Integer) Div(v sql.Value) (sql.Value, error) {
switch value := v.Raw().(type) {
case int64:
if value == 0 {
return nil, fmt.Errorf("division by zero")
}
return Integer{value: i.value / value}, nil
case float64:
if value == 0 {
return nil, fmt.Errorf("division by zero")
}
return Float{value: float64(i.value) / value}, nil
case nil:
return Null{}, nil
default:
return nil, fmt.Errorf("unexptected arg type: %T", value)
}
}
func (i Integer) Pow(v sql.Value) (sql.Value, error) {
switch value := v.Raw().(type) {
case int64:
return Float{value: math.Pow(float64(i.value), float64(value))}, nil
case float64:
return Float{value: math.Pow(float64(i.value), value)}, nil
case nil:
return Null{}, nil
default:
return nil, fmt.Errorf("unexptected arg type: %T", value)
}
}
func (i Integer) Mod(v sql.Value) (sql.Value, error) {
switch value := v.Raw().(type) {
case int64:
if value == 0 {
return nil, fmt.Errorf("division by zero")
}
return Integer{value: i.value % value}, nil
case float64:
if value == 0 {
return nil, fmt.Errorf("division by zero")
}
return Float{value: math.Mod(float64(i.value), value)}, nil
case nil:
return Null{}, nil
default:
return nil, fmt.Errorf("unexptected arg type: %T", value)
}
}
func (i Integer) Equal(v sql.Value) (sql.Value, error) {
switch value := v.Raw().(type) {
case int64:
return Boolean{value: i.value == value}, nil
case float64:
return Boolean{value: float64(i.value) == value}, nil
case nil:
return Null{}, nil
default:
return nil, fmt.Errorf("unexptected arg type: %T", value)
}
}
func (i Integer) NotEqual(v sql.Value) (sql.Value, error) {
switch value := v.Raw().(type) {
case int64:
return Boolean{value: i.value != value}, nil
case float64:
return Boolean{value: float64(i.value) != value}, nil
case nil:
return Null{}, nil
default:
return nil, fmt.Errorf("unexptected arg type: %T", value)
}
}
func (i Integer) GreaterThan(v sql.Value) (sql.Value, error) {
switch value := v.Raw().(type) {
case int64:
return Boolean{value: i.value > value}, nil
case float64:
return Boolean{value: float64(i.value) > value}, nil
case nil:
return Null{}, nil
default:
return nil, fmt.Errorf("unexptected arg type: %T", value)
}
}
func (i Integer) LessThan(v sql.Value) (sql.Value, error) {
switch value := v.Raw().(type) {
case int64:
return Boolean{value: i.value < value}, nil
case float64:
return Boolean{value: float64(i.value) < value}, nil
case nil:
return Null{}, nil
default:
return nil, fmt.Errorf("unexptected arg type: %T", value)
}
}
func (i Integer) GreaterOrEqual(v sql.Value) (sql.Value, error) {
switch value := v.Raw().(type) {
case int64:
return Boolean{value: i.value >= value}, nil
case float64:
return Boolean{value: float64(i.value) >= value}, nil
case nil:
return Null{}, nil
default:
return nil, fmt.Errorf("unexptected arg type: %T", value)
}
}
func (i Integer) LessOrEqual(v sql.Value) (sql.Value, error) {
switch value := v.Raw().(type) {
case int64:
return Boolean{value: i.value <= value}, nil
case float64:
return Boolean{value: float64(i.value) <= value}, nil
case nil:
return Null{}, nil
default:
return nil, fmt.Errorf("unexptected arg type: %T", value)
}
}
func (i Integer) And(_ sql.Value) (sql.Value, error) {
return nil, fmt.Errorf("unsupported operation")
}
func (i Integer) Or(_ sql.Value) (sql.Value, error) {
return nil, fmt.Errorf("unsupported operation")
} | internal/sql/datatype/integer.go | 0.693265 | 0.496155 | integer.go | starcoder |
package softwarebackend
import (
"image/color"
"math"
"github.com/gsvigruha/canvas/backend/backendbase"
)
func triangleLR(tri []backendbase.Vec, y float64) (l, r float64, outside bool) {
a, b, c := tri[0], tri[1], tri[2]
// sort by y
if a[1] > b[1] {
a, b = b, a
}
if b[1] > c[1] {
b, c = c, b
if a[1] > b[1] {
a, b = b, a
}
}
// check general bounds
if y <= a[1] {
return a[0], a[0], true
}
if y > c[1] {
return c[0], c[0], true
}
// find left and right x at y
if y >= a[1] && y <= b[1] && a[1] < b[1] {
r0 := (y - a[1]) / (b[1] - a[1])
l = (b[0]-a[0])*r0 + a[0]
r1 := (y - a[1]) / (c[1] - a[1])
r = (c[0]-a[0])*r1 + a[0]
} else {
r0 := (y - b[1]) / (c[1] - b[1])
l = (c[0]-b[0])*r0 + b[0]
r1 := (y - a[1]) / (c[1] - a[1])
r = (c[0]-a[0])*r1 + a[0]
}
if l > r {
l, r = r, l
}
return
}
func (b *SoftwareBackend) fillTriangleNoAA(tri []backendbase.Vec, fn func(x, y int)) {
minY := int(math.Floor(math.Min(math.Min(tri[0][1], tri[1][1]), tri[2][1])))
maxY := int(math.Ceil(math.Max(math.Max(tri[0][1], tri[1][1]), tri[2][1])))
if minY < 0 {
minY = 0
} else if minY >= b.h {
return
}
if maxY < 0 {
return
} else if maxY >= b.h {
maxY = b.h - 1
}
for y := minY; y <= maxY; y++ {
l, r, out := triangleLR(tri, float64(y)+0.5)
if out {
continue
}
if l < 0 {
l = 0
} else if l > float64(b.w) {
continue
}
if r < 0 {
continue
} else if r > float64(b.w) {
r = float64(b.w)
}
if l >= r {
continue
}
fl, cr := int(math.Floor(l)), int(math.Ceil(r))
for x := fl; x <= cr; x++ {
fx := float64(x) + 0.5
if fx < l || fx >= r {
continue
}
fn(x, y)
}
}
}
type msaaPixel struct {
ix, iy int
fx, fy float64
tx, ty float64
}
func (b *SoftwareBackend) fillTriangleMSAA(tri []backendbase.Vec, msaaLevel int, msaaPixels []msaaPixel, fn func(x, y int)) []msaaPixel {
msaaStep := 1.0 / float64(msaaLevel+1)
minY := int(math.Floor(math.Min(math.Min(tri[0][1], tri[1][1]), tri[2][1])))
maxY := int(math.Ceil(math.Max(math.Max(tri[0][1], tri[1][1]), tri[2][1])))
if minY < 0 {
minY = 0
} else if minY >= b.h {
return msaaPixels
}
if maxY < 0 {
return msaaPixels
} else if maxY >= b.h {
maxY = b.h - 1
}
for y := minY; y <= maxY; y++ {
var l, r [5]float64
allOut := true
minL, maxR := math.MaxFloat64, 0.0
sy := float64(y) + msaaStep*0.5
for step := 0; step <= msaaLevel; step++ {
var out bool
l[step], r[step], out = triangleLR(tri, sy)
if l[step] < 0 {
l[step] = 0
} else if l[step] > float64(b.w) {
l[step] = float64(b.w)
out = true
}
if r[step] < 0 {
r[step] = 0
out = true
} else if r[step] > float64(b.w) {
r[step] = float64(b.w)
}
if r[step] <= l[step] {
out = true
}
if !out {
allOut = false
minL = math.Min(minL, l[step])
maxR = math.Max(maxR, r[step])
}
sy += msaaStep
}
if allOut {
continue
}
fl, cr := int(math.Floor(minL)), int(math.Ceil(maxR))
for x := fl; x <= cr; x++ {
sy = float64(y) + msaaStep*0.5
allIn := true
check:
for stepy := 0; stepy <= msaaLevel; stepy++ {
sx := float64(x) + msaaStep*0.5
for stepx := 0; stepx <= msaaLevel; stepx++ {
if sx < l[stepy] || sx >= r[stepy] {
allIn = false
break check
}
sx += msaaStep
}
sy += msaaStep
}
if allIn {
fn(x, y)
continue
}
sy = float64(y) + msaaStep*0.5
for stepy := 0; stepy <= msaaLevel; stepy++ {
sx := float64(x) + msaaStep*0.5
for stepx := 0; stepx <= msaaLevel; stepx++ {
if sx >= l[stepy] && sx < r[stepy] {
msaaPixels = addMSAAPixel(msaaPixels, msaaPixel{ix: x, iy: y, fx: sx, fy: sy})
}
sx += msaaStep
}
sy += msaaStep
}
}
}
return msaaPixels
}
func addMSAAPixel(msaaPixels []msaaPixel, px msaaPixel) []msaaPixel {
for _, px2 := range msaaPixels {
if px == px2 {
return msaaPixels
}
}
return append(msaaPixels, px)
}
func quadArea(quad [4]backendbase.Vec) float64 {
leftv := backendbase.Vec{quad[1][0] - quad[0][0], quad[1][1] - quad[0][1]}
topv := backendbase.Vec{quad[3][0] - quad[0][0], quad[3][1] - quad[0][1]}
return math.Abs(leftv[0]*topv[1] - leftv[1]*topv[0])
}
func (b *SoftwareBackend) fillQuadNoAA(quad [4]backendbase.Vec, fn func(x, y int, tx, ty float64)) {
minY := int(math.Floor(math.Min(math.Min(quad[0][1], quad[1][1]), math.Min(quad[2][1], quad[3][1]))))
maxY := int(math.Ceil(math.Max(math.Max(quad[0][1], quad[1][1]), math.Max(quad[2][1], quad[3][1]))))
if minY < 0 {
minY = 0
} else if minY >= b.h {
return
}
if maxY < 0 {
return
} else if maxY >= b.h {
maxY = b.h - 1
}
leftv := backendbase.Vec{quad[1][0] - quad[0][0], quad[1][1] - quad[0][1]}
leftLen := math.Sqrt(leftv[0]*leftv[0] + leftv[1]*leftv[1])
leftv[0] /= leftLen
leftv[1] /= leftLen
topv := backendbase.Vec{quad[3][0] - quad[0][0], quad[3][1] - quad[0][1]}
topLen := math.Sqrt(topv[0]*topv[0] + topv[1]*topv[1])
topv[0] /= topLen
topv[1] /= topLen
tri1 := [3]backendbase.Vec{quad[0], quad[1], quad[2]}
tri2 := [3]backendbase.Vec{quad[0], quad[2], quad[3]}
for y := minY; y <= maxY; y++ {
lf1, rf1, out1 := triangleLR(tri1[:], float64(y)+0.5)
lf2, rf2, out2 := triangleLR(tri2[:], float64(y)+0.5)
if out1 && out2 {
continue
}
l := math.Min(lf1, lf2)
r := math.Max(rf1, rf2)
if l < 0 {
l = 0
} else if l > float64(b.w) {
continue
}
if r < 0 {
continue
} else if r > float64(b.w) {
r = float64(b.w)
}
if l >= r {
continue
}
tfy := float64(y) + 0.5 - quad[0][1]
fl, cr := int(math.Floor(l)), int(math.Ceil(r))
for x := fl; x <= cr; x++ {
fx := float64(x) + 0.5
if fx < l || fx >= r {
continue
}
tfx := fx - quad[0][0]
var tx, ty float64
if math.Abs(leftv[0]) > math.Abs(leftv[1]) {
tx = (tfy - tfx*(leftv[1]/leftv[0])) / (topv[1] - topv[0]*(leftv[1]/leftv[0]))
ty = (tfx - topv[0]*tx) / leftv[0]
} else {
tx = (tfx - tfy*(leftv[0]/leftv[1])) / (topv[0] - topv[1]*(leftv[0]/leftv[1]))
ty = (tfy - topv[1]*tx) / leftv[1]
}
fn(x, y, tx/topLen, ty/leftLen)
}
}
}
func (b *SoftwareBackend) fillQuadMSAA(quad [4]backendbase.Vec, msaaLevel int, msaaPixels []msaaPixel, fn func(x, y int, tx, ty float64)) []msaaPixel {
msaaStep := 1.0 / float64(msaaLevel+1)
minY := int(math.Floor(math.Min(math.Min(quad[0][1], quad[1][1]), math.Min(quad[2][1], quad[3][1]))))
maxY := int(math.Ceil(math.Max(math.Max(quad[0][1], quad[1][1]), math.Max(quad[2][1], quad[3][1]))))
if minY < 0 {
minY = 0
} else if minY >= b.h {
return msaaPixels
}
if maxY < 0 {
return msaaPixels
} else if maxY >= b.h {
maxY = b.h - 1
}
leftv := backendbase.Vec{quad[1][0] - quad[0][0], quad[1][1] - quad[0][1]}
leftLen := math.Sqrt(leftv[0]*leftv[0] + leftv[1]*leftv[1])
leftv[0] /= leftLen
leftv[1] /= leftLen
topv := backendbase.Vec{quad[3][0] - quad[0][0], quad[3][1] - quad[0][1]}
topLen := math.Sqrt(topv[0]*topv[0] + topv[1]*topv[1])
topv[0] /= topLen
topv[1] /= topLen
tri1 := [3]backendbase.Vec{quad[0], quad[1], quad[2]}
tri2 := [3]backendbase.Vec{quad[0], quad[2], quad[3]}
for y := minY; y <= maxY; y++ {
var l, r [5]float64
allOut := true
minL, maxR := math.MaxFloat64, 0.0
sy := float64(y) + msaaStep*0.5
for step := 0; step <= msaaLevel; step++ {
lf1, rf1, out1 := triangleLR(tri1[:], sy)
lf2, rf2, out2 := triangleLR(tri2[:], sy)
l[step] = math.Min(lf1, lf2)
r[step] = math.Max(rf1, rf2)
out := out1 || out2
if l[step] < 0 {
l[step] = 0
} else if l[step] > float64(b.w) {
l[step] = float64(b.w)
out = true
}
if r[step] < 0 {
r[step] = 0
out = true
} else if r[step] > float64(b.w) {
r[step] = float64(b.w)
}
if r[step] <= l[step] {
out = true
}
if !out {
allOut = false
minL = math.Min(minL, l[step])
maxR = math.Max(maxR, r[step])
}
sy += msaaStep
}
if allOut {
continue
}
fl, cr := int(math.Floor(minL)), int(math.Ceil(maxR))
for x := fl; x <= cr; x++ {
sy = float64(y) + msaaStep*0.5
allIn := true
check:
for stepy := 0; stepy <= msaaLevel; stepy++ {
sx := float64(x) + msaaStep*0.5
for stepx := 0; stepx <= msaaLevel; stepx++ {
if sx < l[stepy] || sx >= r[stepy] {
allIn = false
break check
}
sx += msaaStep
}
sy += msaaStep
}
if allIn {
tfx := float64(x) + 0.5 - quad[0][0]
tfy := float64(y) + 0.5 - quad[0][1]
var tx, ty float64
if math.Abs(leftv[0]) > math.Abs(leftv[1]) {
tx = (tfy - tfx*(leftv[1]/leftv[0])) / (topv[1] - topv[0]*(leftv[1]/leftv[0]))
ty = (tfx - topv[0]*tx) / leftv[0]
} else {
tx = (tfx - tfy*(leftv[0]/leftv[1])) / (topv[0] - topv[1]*(leftv[0]/leftv[1]))
ty = (tfy - topv[1]*tx) / leftv[1]
}
fn(x, y, tx/topLen, ty/leftLen)
continue
}
sy = float64(y) + msaaStep*0.5
for stepy := 0; stepy <= msaaLevel; stepy++ {
sx := float64(x) + msaaStep*0.5
for stepx := 0; stepx <= msaaLevel; stepx++ {
if sx >= l[stepy] && sx < r[stepy] {
tfx := sx - quad[0][0]
tfy := sy - quad[0][1]
var tx, ty float64
if math.Abs(leftv[0]) > math.Abs(leftv[1]) {
tx = (tfy - tfx*(leftv[1]/leftv[0])) / (topv[1] - topv[0]*(leftv[1]/leftv[0]))
ty = (tfx - topv[0]*tx) / leftv[0]
} else {
tx = (tfx - tfy*(leftv[0]/leftv[1])) / (topv[0] - topv[1]*(leftv[0]/leftv[1]))
ty = (tfy - topv[1]*tx) / leftv[1]
}
msaaPixels = addMSAAPixel(msaaPixels, msaaPixel{ix: x, iy: y, fx: sx, fy: sy, tx: tx / topLen, ty: ty / leftLen})
}
sx += msaaStep
}
sy += msaaStep
}
}
}
return msaaPixels
}
func (b *SoftwareBackend) fillQuad(pts [4]backendbase.Vec, fn func(x, y, tx, ty float64) color.RGBA) {
b.clearStencil()
if b.MSAA > 0 {
var msaaPixelBuf [500]msaaPixel
msaaPixels := msaaPixelBuf[:0]
msaaPixels = b.fillQuadMSAA(pts, b.MSAA, msaaPixels, func(x, y int, tx, ty float64) {
if b.clip.AlphaAt(x, y).A == 0 {
return
}
if b.stencil.AlphaAt(x, y).A > 0 {
return
}
b.stencil.SetAlpha(x, y, color.Alpha{A: 255})
col := fn(float64(x)+0.5, float64(y)+0.5, tx, ty)
if col.A > 0 {
b.Image.SetRGBA(x, y, mix(col, b.Image.RGBAAt(x, y)))
}
})
samples := (b.MSAA + 1) * (b.MSAA + 1)
for i, px := range msaaPixels {
if px.ix < 0 || b.clip.AlphaAt(px.ix, px.iy).A == 0 || b.stencil.AlphaAt(px.ix, px.iy).A > 0 {
continue
}
b.stencil.SetAlpha(px.ix, px.iy, color.Alpha{A: 255})
var mr, mg, mb, ma int
for j, px2 := range msaaPixels[i:] {
if px2.ix != px.ix || px2.iy != px.iy {
continue
}
col := fn(px2.fx, px2.fy, px2.tx, px2.ty)
mr += int(col.R)
mg += int(col.G)
mb += int(col.B)
ma += int(col.A)
msaaPixels[i+j].ix = -1
}
combined := color.RGBA{
R: uint8(mr / samples),
G: uint8(mg / samples),
B: uint8(mb / samples),
A: uint8(ma / samples),
}
b.Image.SetRGBA(px.ix, px.iy, mix(combined, b.Image.RGBAAt(px.ix, px.iy)))
}
} else {
b.fillQuadNoAA(pts, func(x, y int, tx, ty float64) {
if b.clip.AlphaAt(x, y).A == 0 {
return
}
if b.stencil.AlphaAt(x, y).A > 0 {
return
}
b.stencil.SetAlpha(x, y, color.Alpha{A: 255})
col := fn(float64(x)+0.5, float64(y)+0.5, tx, ty)
if col.A > 0 {
b.Image.SetRGBA(x, y, mix(col, b.Image.RGBAAt(x, y)))
}
})
}
}
func iterateTriangles(pts []backendbase.Vec, fn func(tri []backendbase.Vec)) {
if len(pts) == 4 {
var buf [3]backendbase.Vec
buf[0] = pts[0]
buf[1] = pts[1]
buf[2] = pts[2]
fn(buf[:])
buf[1] = pts[2]
buf[2] = pts[3]
fn(buf[:])
return
}
for i := 3; i <= len(pts); i += 3 {
fn(pts[i-3 : i])
}
}
func (b *SoftwareBackend) fillTrianglesNoAA(pts []backendbase.Vec, fn func(x, y float64) color.RGBA) {
iterateTriangles(pts[:], func(tri []backendbase.Vec) {
b.fillTriangleNoAA(tri, func(x, y int) {
if b.clip.AlphaAt(x, y).A == 0 {
return
}
if b.stencil.AlphaAt(x, y).A > 0 {
return
}
b.stencil.SetAlpha(x, y, color.Alpha{A: 255})
col := fn(float64(x), float64(y))
if col.A > 0 {
b.Image.SetRGBA(x, y, mix(col, b.Image.RGBAAt(x, y)))
}
})
})
}
func (b *SoftwareBackend) fillTrianglesMSAA(pts []backendbase.Vec, msaaLevel int, fn func(x, y float64) color.RGBA) {
var msaaPixelBuf [500]msaaPixel
msaaPixels := msaaPixelBuf[:0]
iterateTriangles(pts[:], func(tri []backendbase.Vec) {
msaaPixels = b.fillTriangleMSAA(tri, msaaLevel, msaaPixels, func(x, y int) {
if b.clip.AlphaAt(x, y).A == 0 {
return
}
if b.stencil.AlphaAt(x, y).A > 0 {
return
}
b.stencil.SetAlpha(x, y, color.Alpha{A: 255})
col := fn(float64(x), float64(y))
if col.A > 0 {
b.Image.SetRGBA(x, y, mix(col, b.Image.RGBAAt(x, y)))
}
})
})
samples := (msaaLevel + 1) * (msaaLevel + 1)
for i, px := range msaaPixels {
if px.ix < 0 || b.clip.AlphaAt(px.ix, px.iy).A == 0 || b.stencil.AlphaAt(px.ix, px.iy).A > 0 {
continue
}
b.stencil.SetAlpha(px.ix, px.iy, color.Alpha{A: 255})
var mr, mg, mb, ma int
for j, px2 := range msaaPixels[i:] {
if px2.ix != px.ix || px2.iy != px.iy {
continue
}
col := fn(px2.fx, px2.fy)
mr += int(col.R)
mg += int(col.G)
mb += int(col.B)
ma += int(col.A)
msaaPixels[i+j].ix = -1
}
combined := color.RGBA{
R: uint8(mr / samples),
G: uint8(mg / samples),
B: uint8(mb / samples),
A: uint8(ma / samples),
}
b.Image.SetRGBA(px.ix, px.iy, mix(combined, b.Image.RGBAAt(px.ix, px.iy)))
}
}
func (b *SoftwareBackend) fillTriangles(pts []backendbase.Vec, fn func(x, y float64) color.RGBA) {
b.clearStencil()
if b.MSAA > 0 {
b.fillTrianglesMSAA(pts, b.MSAA, fn)
} else {
b.fillTrianglesNoAA(pts, fn)
}
} | backend/softwarebackend/triangles.go | 0.598664 | 0.447279 | triangles.go | starcoder |
package accel3xdigital
// Mode The sensor has three power modes: Off Mode, Standby Mode, and Active Mode to offer the customer different power
// consumption options. The sensor is only capable of running in one of these modes at a time.
type Mode byte
var (
downMask = [3]bool{true, false, true}
upMask = [3]bool{false, true, true}
leftMask = [3]bool{true, false, false}
rightMask = [3]bool{false, true, false}
stateBuff = make([]byte, 4)
)
const (
// Standby Mode is ideal for battery operated products. When Standby Mode is active the device outputs are turned off
// providing a significant reduction in operating current. When the device is in Standby Mode the current will be reduced to
// approximately 3 µA. Standby Mode is entered as soon as both analog and digital power supplies are up.
// In this mode, the device can read and write to the registers with I2C, but no new measurements can be taken.
StandBy = Mode(accelStandBy)
// Active Mode, continuous measurement on all three axes is enabled. In addition, the user can choose to enable:
// Shake Detection, Tap Detection, Orientation Detection, and/or Auto-Wake/Sleep Feature and in this mode the digital analysis for
// any of these functions is done.
Active = Mode(accelActive)
)
const (
// addr is the i2c address of this sensor
addr = 0x4c
accelX = 0x00
accelY = 0x01
accelZ = 0x02
accelTilt = 0x03
accelSrst = 0x04
accelSpcnt = 0x05
accelIntsu = 0x06
accelMode = 0x07
accelStandBy = 0x00
accelActive = 0x01
// sample rate
accelSr = 0x08
accelAutoSleep120 = 0x00
accelAutoSleep64 = 0x01
// 32 samples per second (default)
accelAutoSleep32 = 0x02
accelAutoSleep16 = 0x03
accelAutoSleep8 = 0x04
accelAutoSleep4 = 0x05
accelAutoSleep2 = 0x06
accelAutoSleep1 = 0x07
accelPdet = 0x09
accelPd = 0x0A
)
// Position indicates the position of the sensor/device
type Position int
const (
// Unknown condition of up or down or left or right
Unknown Position = iota
// Left is true if in landscape mode to the left
Left
// Right is true if in landscape mode to the right
Right
// Down is true if standing vertically in inverted orientation
Down
// Up is true if standing vertically in normal orientation
Up
)
func (p Position) String() string {
switch p {
case Right:
return "right"
case Left:
return "left"
case Down:
return "down"
case Up:
return "up"
default:
return "unkown"
}
} | grove/accel3xdigital/protocol.go | 0.594787 | 0.46642 | protocol.go | starcoder |
package processor
import (
"bytes"
"context"
"encoding/json"
"fmt"
"github.com/itchyny/gojq"
"github.com/benthosdev/benthos/v4/internal/component/metrics"
"github.com/benthosdev/benthos/v4/internal/component/processor"
"github.com/benthosdev/benthos/v4/internal/docs"
"github.com/benthosdev/benthos/v4/internal/interop"
"github.com/benthosdev/benthos/v4/internal/log"
"github.com/benthosdev/benthos/v4/internal/message"
)
func init() {
Constructors[TypeJQ] = TypeSpec{
constructor: func(conf Config, mgr interop.Manager, log log.Modular, stats metrics.Type) (processor.V1, error) {
p, err := newJQ(conf.JQ, mgr)
if err != nil {
return nil, err
}
return processor.NewV2ToV1Processor("jq", p, mgr.Metrics()), nil
},
Status: docs.StatusStable,
Categories: []string{
"Mapping",
},
Summary: `
Transforms and filters messages using jq queries.`,
Description: `
:::note Try out Bloblang
For better performance and improved capabilities try out native Benthos mapping with the [bloblang processor](/docs/components/processors/bloblang).
:::
The provided query is executed on each message, targeting either the contents
as a structured JSON value or as a raw string using the field ` + "`raw`" + `,
and the message is replaced with the query result.
Message metadata is also accessible within the query from the variable
` + "`$metadata`" + `.
This processor uses the [gojq library][gojq], and therefore does not require
jq to be installed as a dependency. However, this also means there are some
differences in how these queries are executed versus the jq cli which you can
[read about here][gojq-difference].
If the query does not emit any value then the message is filtered, if the query
returns multiple values then the resulting message will be an array containing
all values.
The full query syntax is described in [jq's documentation][jq-docs].
## Error Handling
Queries can fail, in which case the message remains unchanged, errors are
logged, and the message is flagged as having failed, allowing you to use
[standard processor error handling patterns](/docs/configuration/error_handling).`,
Footnotes: `
[gojq]: https://github.com/itchyny/gojq
[gojq-difference]: https://github.com/itchyny/gojq#difference-to-jq
[jq-docs]: https://stedolan.github.io/jq/manual/`,
Examples: []docs.AnnotatedExample{
{
Title: "Mapping",
Summary: `
When receiving JSON documents of the form:
` + "```json" + `
{
"locations": [
{"name": "Seattle", "state": "WA"},
{"name": "New York", "state": "NY"},
{"name": "Bellevue", "state": "WA"},
{"name": "Olympia", "state": "WA"}
]
}
` + "```" + `
We could collapse the location names from the state of Washington into a field ` + "`Cities`" + `:
` + "```json" + `
{"Cities": "Bellevue, Olympia, Seattle"}
` + "```" + `
With the following config:`,
Config: `
pipeline:
processors:
- jq:
query: '{Cities: .locations | map(select(.state == "WA").name) | sort | join(", ") }'
`,
},
},
Config: docs.FieldComponent().WithChildren(
docs.FieldString("query", "The jq query to filter and transform messages with."),
docs.FieldBool("raw", "Whether to process the input as a raw string instead of as JSON.").Advanced(),
docs.FieldBool("output_raw", "Whether to output raw text (unquoted) instead of JSON strings when the emitted values are string types.").Advanced(),
),
}
}
//------------------------------------------------------------------------------
// JQConfig contains configuration fields for the JQ processor.
type JQConfig struct {
Query string `json:"query" yaml:"query"`
Raw bool `json:"raw" yaml:"raw"`
OutputRaw bool `json:"output_raw" yaml:"output_raw"`
}
// NewJQConfig returns a JQConfig with default values.
func NewJQConfig() JQConfig {
return JQConfig{
Query: "",
}
}
//------------------------------------------------------------------------------
var jqCompileOptions = []gojq.CompilerOption{
gojq.WithVariables([]string{"$metadata"}),
}
type jqProc struct {
inRaw bool
outRaw bool
log log.Modular
code *gojq.Code
}
func newJQ(conf JQConfig, mgr interop.Manager) (*jqProc, error) {
j := &jqProc{
inRaw: conf.Raw,
outRaw: conf.OutputRaw,
log: mgr.Logger(),
}
query, err := gojq.Parse(conf.Query)
if err != nil {
return nil, fmt.Errorf("error parsing jq query: %w", err)
}
j.code, err = gojq.Compile(query, jqCompileOptions...)
if err != nil {
return nil, fmt.Errorf("error compiling jq query: %w", err)
}
return j, nil
}
func (j *jqProc) getPartMetadata(part *message.Part) map[string]interface{} {
metadata := map[string]interface{}{}
_ = part.MetaIter(func(k, v string) error {
metadata[k] = v
return nil
})
return metadata
}
func (j *jqProc) getPartValue(part *message.Part, raw bool) (obj interface{}, err error) {
if raw {
return string(part.Get()), nil
}
obj, err = part.JSON()
if err == nil {
obj, err = message.CopyJSON(obj)
}
if err != nil {
j.log.Debugf("Failed to parse part into json: %v\n", err)
return nil, err
}
return obj, nil
}
func (j *jqProc) Process(ctx context.Context, msg *message.Part) ([]*message.Part, error) {
part := msg.Copy()
in, err := j.getPartValue(part, j.inRaw)
if err != nil {
return nil, err
}
metadata := j.getPartMetadata(part)
var emitted []interface{}
iter := j.code.Run(in, metadata)
for {
out, ok := iter.Next()
if !ok {
break
}
if err, ok := out.(error); ok {
j.log.Debugf(err.Error())
return nil, err
}
emitted = append(emitted, out)
}
if j.outRaw {
raw, err := j.marshalRaw(emitted)
if err != nil {
j.log.Debugf("Failed to marshal raw text: %s", err)
return nil, err
}
// Sometimes the query result is an empty string. Example:
// echo '{ "foo": "" }' | jq .foo
// In that case we want pass on the empty string instead of treating it as
// an empty message and dropping it
if len(raw) == 0 && len(emitted) == 0 {
return nil, nil
}
part.Set(raw)
return []*message.Part{part}, nil
} else if len(emitted) > 1 {
part.SetJSON(emitted)
} else if len(emitted) == 1 {
part.SetJSON(emitted[0])
} else {
return nil, nil
}
return []*message.Part{part}, nil
}
func (*jqProc) Close(ctx context.Context) error {
return nil
}
func (j *jqProc) marshalRaw(values []interface{}) ([]byte, error) {
buf := bytes.NewBufferString("")
for index, el := range values {
var rawResult []byte
val, isString := el.(string)
if isString {
rawResult = []byte(val)
} else {
marshalled, err := json.Marshal(el)
if err != nil {
return nil, fmt.Errorf("failed marshal JQ result at index %d: %w", index, err)
}
rawResult = marshalled
}
if _, err := buf.Write(rawResult); err != nil {
return nil, fmt.Errorf("failed to write JQ result at index %d: %w", index, err)
}
}
bs := buf.Bytes()
return bs, nil
} | internal/old/processor/jq.go | 0.765418 | 0.635166 | jq.go | starcoder |
package encryptor
import (
"strings"
"github.com/jrapoport/chestnut/encryptor/crypto"
)
// ChainEncryptor is an encryptor that supports an chain of other Encryptors.
// Bytes will be encrypted by chaining the Encryptors in a FIFO order.
type ChainEncryptor struct {
id string
name string
ids []string
names []string
encryption []crypto.Encryptor
decryption []crypto.Encryptor
}
var _ crypto.Encryptor = (*ChainEncryptor)(nil)
const chainSep = " "
// NewChainEncryptor creates a new ChainEncryptor consisting of a chain
// of the supplied Encryptors.
func NewChainEncryptor(encryptors ...crypto.Encryptor) *ChainEncryptor {
if len(encryptors) == 0 {
return nil
}
// reverse the encryptors from FIFO to LIFO
decryptors := make([]crypto.Encryptor, len(encryptors))
for i := range encryptors {
decryptors[len(encryptors)-1-i] = encryptors[i]
}
chain := new(ChainEncryptor)
chain.encryption = encryptors
chain.decryption = decryptors
chain.ids = make([]string, len(encryptors))
chain.names = make([]string, len(encryptors))
for i, e := range chain.encryption {
chain.ids[i] = e.ID()
chain.names[i] = e.Name()
}
chain.id = strings.Join(chain.ids, chainSep)
chain.name = strings.Join(chain.names, chainSep)
return chain
}
// ID returns a concatenated list of the ids of chained encryptor(s) / secrets
// that were used to encrypt the data (for tracking) separated by spaces.
func (e *ChainEncryptor) ID() string {
return e.id
}
// Name returns a concatenated list of the cipher names of the chained encryptor(s)
// that were used to encrypt the data separated by spaces.
func (e *ChainEncryptor) Name() string {
return e.name
}
// Encrypt returns data encrypted with the chain of Encryptors.
func (e *ChainEncryptor) Encrypt(plaintext []byte) ([]byte, error) {
var err error
ciphertext := plaintext
for _, en := range e.encryption {
ciphertext, err = en.Encrypt(ciphertext)
if err != nil {
break
}
}
return ciphertext, err
}
// Decrypt returns data decrypted with the chain of Encryptors.
func (e *ChainEncryptor) Decrypt(ciphertext []byte) ([]byte, error) {
var err error
plaintext := ciphertext
for _, de := range e.decryption {
plaintext, err = de.Decrypt(plaintext)
if err != nil {
break
}
}
return plaintext, err
} | encryptor/chain.go | 0.716318 | 0.40342 | chain.go | starcoder |
package cmdargs
import (
"strconv"
)
// Generic provides a set of methods that can be used to convert the value into specific types.
type Generic interface {
String() (string, bool)
ToString() string
Bool() (bool, bool)
ToBool() bool
Int() (int64, bool)
ToInt() int64
Uint() (uint64, bool)
ToUint() uint64
Float() (float64, bool)
ToFloat() float64
}
// Underlying type that implements the Generic interface.
type String string
// String simply returns the unaltered string value of the String datatype.
func (t String) String() (ret string, ok bool) {
ret = string(t)
ok = true
return
}
// ToString behaves just like String, but omits the second return value. Returns the zero value of type string in
// case of an error.
func (t String) ToString() string {
ret, _ := t.String()
return ret
}
// Bool returns true for strings "t", "T", "TRUE", "true", "True" and any non-zero numeric values.
// It returns false for "f", "F", "FALSE", "false", "False" and numeric zero.
// ok indicates whether the conversion was successful.
func (t String) Bool() (ret bool, ok bool) {
b, err := strconv.ParseBool(string(t))
if err == nil { ret = b; ok = true; return }
i, err := strconv.ParseInt(string(t), 0, 0)
if err == nil { ret = (i != 0); ok = true; return }
f, err := strconv.ParseFloat(string(t), 64)
if err == nil { ret = (f != 0.0); ok = true; return }
return
}
// ToBool behaves just like Bool, but omits the second return value. Returns the zero value of type bool in case of
// an error.
func (t String) ToBool() bool {
ret, _ := t.Bool()
return ret
}
// Int attempts to interpret the string as a numeric value. It takes prefixes into account to determine
// the right numeric base. Boolean strings will be converted to 0 for "false" and 1 for "true".
func (t String) Int() (ret int64, ok bool) {
i, err := strconv.ParseInt(string(t), 0, 64)
if err == nil { ret = i; ok = true; return }
f, err := strconv.ParseFloat(string(t), 64)
if err == nil { ret = int64(f); ok = true; return }
b, err := strconv.ParseBool(string(t))
if err == nil { if b { ret = 1 }; ok = true; return }
return
}
// ToInt behaves just like Int, but omits the second return value. Returns the zero value of type int64 in case of
// an error.
func (t String) ToInt() int64 {
ret, _ := t.Int()
return ret
}
// Uint attempts to interpret the string as an unsigned numeric value. It takes prefixes into account to determine
// the right numeric base. Boolean strings will be converted to 0 for "false" and 1 for "true".
func (t String) Uint() (ret uint64, ok bool) {
u, err := strconv.ParseUint(string(t), 0, 64)
if err == nil { ret = u; ok = true; return }
f, err := strconv.ParseFloat(string(t), 64)
if err == nil && f >= 0.0 { ret = uint64(f); ok = true; return }
b, err := strconv.ParseBool(string(t))
if err == nil { if b { ret = 1 }; ok = true; return }
return
}
// ToUint behaves just like Uint, but omits the second return value. Returns the zero value of type uint64 in case of
// an error.
func (t String) ToUint() uint64 {
ret, _ := t.Uint()
return ret
}
// Float attempts to interpret the string as a floating point value.
// Boolean strings will be converted to 0 for "false" and 1 for "true".
func (t String) Float() (ret float64, ok bool) {
f, err := strconv.ParseFloat(string(t), 64)
if err == nil { ret = f; ok = true; return }
i, err := strconv.ParseInt(string(t), 0, 64)
if err == nil { ret = float64(i); ok = true; return }
b, err := strconv.ParseBool(string(t))
if err == nil { if b { ret = 1.0 }; ok = true; return }
return
}
// ToFloat behaves just like Float, but omits the second return value. Returns the zero value of type float64 in case
// of an error.
func (t String) ToFloat() float64 {
ret, _ := t.Float()
return ret
} | datatypes.go | 0.800926 | 0.514949 | datatypes.go | starcoder |
package mlpack
/*
#cgo CFLAGS: -I./capi -Wall
#cgo LDFLAGS: -L. -lmlpack_go_perceptron
#include <capi/perceptron.h>
#include <stdlib.h>
*/
import "C"
import "gonum.org/v1/gonum/mat"
type PerceptronOptionalParam struct {
InputModel *perceptronModel
Labels *mat.Dense
MaxIterations int
Test *mat.Dense
Training *mat.Dense
Verbose bool
}
func PerceptronOptions() *PerceptronOptionalParam {
return &PerceptronOptionalParam{
InputModel: nil,
Labels: nil,
MaxIterations: 1000,
Test: nil,
Training: nil,
Verbose: false,
}
}
/*
This program implements a perceptron, which is a single level neural network.
The perceptron makes its predictions based on a linear predictor function
combining a set of weights with the feature vector. The perceptron learning
rule is able to converge, given enough iterations (specified using the
"MaxIterations" parameter), if the data supplied is linearly separable. The
perceptron is parameterized by a matrix of weight vectors that denote the
numerical weights of the neural network.
This program allows loading a perceptron from a model (via the "InputModel"
parameter) or training a perceptron given training data (via the "Training"
parameter), or both those things at once. In addition, this program allows
classification on a test dataset (via the "Test" parameter) and the
classification results on the test set may be saved with the "Predictions"
output parameter. The perceptron model may be saved with the "OutputModel"
output parameter.
Note: the following parameter is deprecated and will be removed in mlpack
4.0.0: "Output".
Use "Predictions" instead of "Output".
The training data given with the "Training" option may have class labels as
its last dimension (so, if the training data is in CSV format, labels should
be the last column). Alternately, the "Labels" parameter may be used to
specify a separate matrix of labels.
All these options make it easy to train a perceptron, and then re-use that
perceptron for later classification. The invocation below trains a perceptron
on training_data with labels training_labels, and saves the model to
perceptron_model.
// Initialize optional parameters for Perceptron().
param := mlpack.PerceptronOptions()
param.Training = training_data
param.Labels = training_labels
_, perceptron_model, _ := mlpack.Perceptron(param)
Then, this model can be re-used for classification on the test data test_data.
The example below does precisely that, saving the predicted classes to
predictions.
// Initialize optional parameters for Perceptron().
param := mlpack.PerceptronOptions()
param.InputModel = &perceptron_model
param.Test = test_data
_, _, predictions := mlpack.Perceptron(param)
Note that all of the options may be specified at once: predictions may be
calculated right after training a model, and model training can occur even if
an existing perceptron model is passed with the "InputModel" parameter.
However, note that the number of classes and the dimensionality of all data
must match. So you cannot pass a perceptron model trained on 2 classes and
then re-train with a 4-class dataset. Similarly, attempting classification on
a 3-dimensional dataset with a perceptron that has been trained on 8
dimensions will cause an error.
Input parameters:
- InputModel (perceptronModel): Input perceptron model.
- Labels (mat.Dense): A matrix containing labels for the training set.
- MaxIterations (int): The maximum number of iterations the perceptron
is to be run Default value 1000.
- Test (mat.Dense): A matrix containing the test set.
- Training (mat.Dense): A matrix containing the training set.
- Verbose (bool): Display informational messages and the full list of
parameters and timers at the end of execution.
Output parameters:
- output (mat.Dense): The matrix in which the predicted labels for the
test set will be written.
- outputModel (perceptronModel): Output for trained perceptron model.
- predictions (mat.Dense): The matrix in which the predicted labels for
the test set will be written.
*/
func Perceptron(param *PerceptronOptionalParam) (*mat.Dense, perceptronModel, *mat.Dense) {
resetTimers()
enableTimers()
disableBacktrace()
disableVerbose()
restoreSettings("Perceptron")
// Detect if the parameter was passed; set if so.
if param.InputModel != nil {
setPerceptronModel("input_model", param.InputModel)
setPassed("input_model")
}
// Detect if the parameter was passed; set if so.
if param.Labels != nil {
gonumToArmaUrow("labels", param.Labels)
setPassed("labels")
}
// Detect if the parameter was passed; set if so.
if param.MaxIterations != 1000 {
setParamInt("max_iterations", param.MaxIterations)
setPassed("max_iterations")
}
// Detect if the parameter was passed; set if so.
if param.Test != nil {
gonumToArmaMat("test", param.Test)
setPassed("test")
}
// Detect if the parameter was passed; set if so.
if param.Training != nil {
gonumToArmaMat("training", param.Training)
setPassed("training")
}
// Detect if the parameter was passed; set if so.
if param.Verbose != false {
setParamBool("verbose", param.Verbose)
setPassed("verbose")
enableVerbose()
}
// Mark all output options as passed.
setPassed("output")
setPassed("output_model")
setPassed("predictions")
// Call the mlpack program.
C.mlpackPerceptron()
// Initialize result variable and get output.
var outputPtr mlpackArma
output := outputPtr.armaToGonumUrow("output")
var outputModel perceptronModel
outputModel.getPerceptronModel("output_model")
var predictionsPtr mlpackArma
predictions := predictionsPtr.armaToGonumUrow("predictions")
// Clear settings.
clearSettings()
// Return output(s).
return output, outputModel, predictions
} | perceptron.go | 0.763572 | 0.518729 | perceptron.go | starcoder |
package videosource
import (
"image"
"math"
)
// CorrectRectangle will fix a rectangle to fit within the Image i
func CorrectRectangle(i Image, rect image.Rectangle) (result image.Rectangle) {
if !i.IsFilled() {
return
}
result = rect
if result.Min.X < 0 {
result.Min.X = 0
}
if result.Min.Y < 0 {
result.Min.Y = 0
}
if result.Max.X > i.Width() {
result.Max.X = i.Width()
}
if result.Max.Y > i.Height() {
result.Max.Y = i.Height()
}
return
}
// RectAddWidth will add width to the rect as evenly as possible
func RectAddWidth(i Image, rect image.Rectangle, width int) (result image.Rectangle) {
result = CorrectRectangle(i, rect)
if width <= 0 {
return
}
availMin := result.Min.X
availMax := i.Width() - result.Max.X
half := width / 2
if availMin >= half && availMax >= half {
result.Min.X -= half
result.Max.X += half
} else if availMin > availMax {
remain := width - availMax
result.Min.X -= remain
result.Max.X += availMax
} else {
remain := width - availMin
result.Min.X -= availMin
result.Max.X += remain
}
result = CorrectRectangle(i, result)
return
}
// RectAddHeight will add height to the rect as evenly as possible
func RectAddHeight(i Image, rect image.Rectangle, height int) (result image.Rectangle) {
result = CorrectRectangle(i, rect)
if height <= 0 {
return
}
availMin := result.Min.Y
availMax := i.Height() - result.Max.Y
half := height / 2
if availMin >= half && availMax >= half {
result.Min.Y -= half
result.Max.Y += half
} else if availMin > availMax {
remain := height - availMax
result.Min.Y -= remain
result.Max.Y += availMax
} else {
remain := height - availMin
result.Min.Y -= availMin
result.Max.Y += remain
}
result = CorrectRectangle(i, result)
return
}
// RectScale will scale the rect as evenly as possible
func RectScale(i Image, rect image.Rectangle, scale float64) (result image.Rectangle) {
if !i.IsFilled() {
return
}
if scale <= 0.0 {
return
}
scaleInt := int(math.Ceil(scale))
result = rect
scaledMin := result.Min.Mul(scaleInt)
scaledMax := result.Max.Mul(scaleInt)
result.Min = scaledMin
result.Max = scaledMax
result = CorrectRectangle(i, result)
return
}
// RectPadded returns a padded rectangle
func RectPadded(i Image, rect image.Rectangle, paddingPercent int) (result image.Rectangle) {
if !i.IsFilled() {
return
}
result = CorrectRectangle(i, rect)
if paddingPercent <= 0 {
return
}
rectWidth := result.Dx()
rectHeight := result.Dy()
addWidth := rectWidth * paddingPercent / 100
addHeight := rectHeight * paddingPercent / 100
result = RectAddWidth(i, result, addWidth)
result = RectAddHeight(i, result, addHeight)
return
}
// RectSquare will return a square that fits within the Image i
func RectSquare(i Image, rect image.Rectangle) (result image.Rectangle) {
if !i.IsFilled() {
return
}
result = CorrectRectangle(i, rect)
width := result.Dx()
height := result.Dy()
if width > height {
delta := width - height
result = RectAddHeight(i, result, delta)
} else if height > width {
delta := height - width
result = RectAddWidth(i, result, delta)
}
return
}
// RectRect will return a rectangle that fits within the Image i
func RectRect(i Image, rect image.Rectangle) (result image.Rectangle) {
if !i.IsFilled() {
return
}
result = CorrectRectangle(i, rect)
width := result.Dx()
height := result.Dy()
newWidth := height * 16 / 9
if width < newWidth {
delta := newWidth - width
result = RectAddWidth(i, result, delta)
}
return
}
// RectRelative returns a relative rectangle given child and parent rectangles
func RectRelative(i Image, child image.Rectangle, parent image.Rectangle) (result image.Rectangle) {
result = child
result = result.Add(parent.Min)
result = CorrectRectangle(i, result)
return
}
// RectOverlap returns the rectangle's percentage overlapped by the other
func RectOverlap(rect1 image.Rectangle, rect2 image.Rectangle) (percentage1 int, percentage2 int) {
rect1Area := rect1.Dx() * rect1.Dy()
rect2Area := rect2.Dx() * rect2.Dy()
overlapRect := rect1.Intersect(rect2)
overlapArea := overlapRect.Dx() * overlapRect.Dy()
if rect1Area > 0 {
percentage1 = 100 * overlapArea / rect1Area
}
if rect2Area > 0 {
percentage2 = 100 * overlapArea / rect2Area
}
return
} | videosource/rect.go | 0.764628 | 0.560373 | rect.go | starcoder |
package xhuman
import (
"errors"
"math"
"strconv"
"strings"
"unicode"
)
// Bytes unit convert
const (
B = 1 << (10 * iota)
KB
MB
GB
TB
PB
EB
)
// Version returns package version
func Version() string {
return "0.1.0"
}
// Author returns package author
func Author() string {
return "[<NAME>](https://www.likexian.com/)"
}
// License returns package license
func License() string {
return "Licensed under the Apache License 2.0"
}
// FormatByteSize returns human string of byte size
func FormatByteSize(n int64, precision int) string {
value, unit := float64(n), "B"
switch {
case value >= EB:
value, unit = value/EB, "EB"
case value >= PB:
value, unit = value/PB, "PB"
case value >= TB:
value, unit = value/TB, "TB"
case value >= GB:
value, unit = value/GB, "GB"
case value >= MB:
value, unit = value/MB, "MB"
case value >= KB:
value, unit = value/KB, "KB"
}
r := strconv.FormatFloat(value, 'f', precision, 64)
r += unit
return r
}
// ParseByteSize returns int size of string size
func ParseByteSize(s string) (int64, error) {
s = strings.TrimSpace(strings.ToUpper(s))
i := strings.IndexFunc(s, unicode.IsLetter)
if i == -1 {
i = len(s)
s += "B"
}
value, unit := strings.TrimSpace(s[:i]), strings.TrimSpace(s[i:])
bytes, err := strconv.ParseFloat(value, 64)
if err != nil {
return 0, err
}
if bytes < 0 {
return 0, errors.New("byte size string invalid")
}
switch unit {
case "E", "EB":
return int64(bytes * EB), nil
case "P", "PB":
return int64(bytes * PB), nil
case "T", "TB":
return int64(bytes * TB), nil
case "G", "GB":
return int64(bytes * GB), nil
case "M", "MB":
return int64(bytes * MB), nil
case "K", "KB":
return int64(bytes * KB), nil
case "B":
return int64(bytes * B), nil
default:
return 0, errors.New("byte size string invalid")
}
}
// Round returns round number with precision
func Round(n float64, precision int) (r float64) {
pow := math.Pow(10, float64(precision))
num := n * pow
_, div := math.Modf(num)
if n >= 0 && div >= 0.5 {
r = math.Ceil(num)
} else if n < 0 && div > -0.5 {
r = math.Ceil(num)
} else {
r = math.Floor(num)
}
return r / pow
}
// Comma returns number string with comma
func Comma(n float64, precision int) string {
s := strconv.FormatFloat(n, 'f', precision, 64)
sc := ""
if s[0] == '-' {
sc = "-"
s = s[1:]
}
si, sf := s, ""
if strings.Contains(s, ".") {
ss := strings.Split(s, ".")
si, sf = ss[0], ss[1]
}
ss := []string{}
for {
if len(si) == 0 {
break
} else {
start := len(si) - 3
if start < 0 {
start = 0
}
ss = append([]string{si[start:]}, ss...)
si = si[:start]
}
}
s = sc + strings.Join(ss, ",")
if sf != "" {
s += "." + sf
}
return s
} | xhuman/xhuman.go | 0.787768 | 0.419172 | xhuman.go | starcoder |
package note
import (
"hash/fnv"
"sort"
"strconv"
"strings"
"sync"
)
// Word index data structure
type Word struct {
WordIndex uint
}
var sortedWords []Word
var sortedWordsInitialized = false
var sortedWordsInitLock sync.RWMutex
// Class used to sort an index of words
type byWord []Word
func (a byWord) Len() int { return len(a) }
func (a byWord) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byWord) Less(i, j int) bool { return words2048[a[i].WordIndex] < words2048[a[j].WordIndex] }
// WordToNumber converts a single word to a number
func WordToNumber(word string) (num uint, success bool) {
// Initialize sorted words array if necessary
if !sortedWordsInitialized {
sortedWordsInitLock.Lock()
if !sortedWordsInitialized {
// Init the index array
sortedWords = make([]Word, 2048)
for i := 0; i < 2048; i++ {
sortedWords[i].WordIndex = uint(i)
}
// Sort the array
sort.Sort(byWord(sortedWords))
// We're now initialized
sortedWordsInitialized = true
}
sortedWordsInitLock.Unlock()
}
// First normalize the word
word = strings.ToLower(word)
// Do a binary chop to find the word or its insertion slot
i := sort.Search(2048, func(i int) bool { return words2048[sortedWords[i].WordIndex] >= word })
// Exit if found. (If we failed to match the result, it's an insertion slot.)
if i < 2048 && words2048[sortedWords[i].WordIndex] == word {
return sortedWords[i].WordIndex, true
}
return 0, false
}
// WordsToNumber looks up a number from two or three simple words
func WordsToNumber(words string) (num uint32, found bool) {
var left, middle, right uint
var success bool
// For convenience, if a number is supplied just return that number. I do this so
// that you can use this same method to parse either a number or the words to get that number.
word := strings.Split(words, "-")
if len(word) == 1 {
// See if this parses cleanly as a number
i64, err := strconv.ParseUint(words, 10, 32)
if err == nil {
return uint32(i64), true
}
return 0, false
}
// Convert two or three words to numbers, msb to lsb
if len(word) == 2 {
middle, success = WordToNumber(word[0])
if !success {
return 0, false
}
right, success = WordToNumber(word[1])
if !success {
return 0, false
}
} else {
left, success = WordToNumber(word[0])
if !success {
return 0, false
}
middle, success = WordToNumber(word[1])
if !success {
return 0, false
}
right, success = WordToNumber(word[2])
if !success {
return 0, false
}
}
// Map back to bit fields
result := uint32(left) << 22
result |= uint32(middle) << 11
result |= uint32(right)
return result, true
}
// WordsFromString hashes a string with a 32-bit function and converts it to three simple words
func WordsFromString(in string) (out string) {
hash := fnv.New32a()
inbytes := []byte(in)
hash.Write(inbytes)
hashval := hash.Sum32()
out = WordsFromNumber(hashval)
return
}
// WordsFromNumber converts a number to three simple words
func WordsFromNumber(number uint32) string {
// Break the 32-bit uint down into 3 bit fields
left := (number >> 22) & 0x000003ff
middle := (number >> 11) & 0x000007ff
right := number & 0x000007ff
// If the high order is 0, which is frequently the case, just use two words
if left == 0 {
return words2048[middle] + "-" + words2048[right]
}
return words2048[left] + "-" + words2048[middle] + "-" + words2048[right]
}
// 2048 words, ORDERED but alphabetically unsorted
var words2048 = []string{
"act",
"add",
"age",
"ago",
"point",
"big",
"all",
"and",
"any",
"arm",
"art",
"ash",
"ask",
"bad",
"bag",
"ban",
"bar",
"bat",
"bay",
"bed",
"bee",
"beg",
"bet",
"bid",
"air",
"bit",
"bow",
"box",
"boy",
"bug",
"bus",
"buy",
"cab",
"can",
"cap",
"car",
"cat",
"cop",
"cow",
"cry",
"cue",
"cup",
"cut",
"dad",
"day",
"die",
"dig",
"dip",
"dog",
"dot",
"dry",
"due",
"ear",
"eat",
"egg",
"ego",
"end",
"era",
"etc",
"eye",
"fan",
"far",
"fat",
"fee",
"few",
"fit",
"fix",
"fly",
"fog",
"for",
"fun",
"fur",
"gap",
"gas",
"get",
"gun",
"gut",
"guy",
"gym",
"hat",
"hay",
"her",
"hey",
"him",
"hip",
"his",
"hit",
"hot",
"how",
"hug",
"huh",
"ice",
"its",
"jar",
"jaw",
"jet",
"job",
"joy",
"key",
"kid",
"kit",
"lab",
"lap",
"law",
"leg",
"let",
"lid",
"lie",
"lip",
"log",
"lot",
"low",
"mars",
"mango",
"map",
"may",
"mix",
"mom",
"mud",
"net",
"new",
"nod",
"not",
"now",
"nut",
"oak",
"odd",
"off",
"oil",
"old",
"one",
"our",
"out",
"owe",
"own",
"pad",
"pan",
"pat",
"pay",
"pen",
"pet",
"pie",
"pig",
"pin",
"pit",
"pop",
"pot",
"put",
"rat",
"raw",
"red",
"rib",
"rid",
"rip",
"row",
"run",
"say",
"see",
"set",
"she",
"shy",
"sir",
"sit",
"six",
"ski",
"sky",
"son",
"spy",
"sum",
"sun",
"tag",
"tap",
"tax",
"tea",
"ten",
"the",
"tie",
"tip",
"toe",
"top",
"toy",
"try",
"two",
"use",
"van",
"war",
"way",
"web",
"who",
"why",
"win",
"wow",
"yes",
"yet",
"you",
"able",
"acid",
"aide",
"ally",
"also",
"amid",
"area",
"army",
"atop",
"aunt",
"auto",
"away",
"baby",
"back",
"bake",
"ball",
"band",
"bank",
"bare",
"barn",
"base",
"bath",
"beam",
"bean",
"bear",
"beat",
"beef",
"beer",
"bell",
"belt",
"bend",
"best",
"bias",
"bike",
"bill",
"bind",
"bird",
"bite",
"blue",
"boat",
"body",
"boil",
"bold",
"bolt",
"bomb",
"bond",
"bone",
"book",
"boom",
"boot",
"born",
"boss",
"both",
"bowl",
"buck",
"bulb",
"bulk",
"bull",
"burn",
"bury",
"bush",
"busy",
"cage",
"cake",
"call",
"calm",
"camp",
"card",
"care",
"cart",
"case",
"cash",
"cast",
"cave",
"cell",
"chef",
"chew",
"chin",
"chip",
"chop",
"cite",
"city",
"clay",
"clip",
"club",
"clue",
"coal",
"coat",
"code",
"coin",
"cold",
"come",
"cook",
"cool",
"cope",
"copy",
"cord",
"core",
"corn",
"cost",
"coup",
"crew",
"crop",
"cure",
"cute",
"dare",
"dark",
"data",
"date",
"dawn",
"dead",
"deal",
"dear",
"debt",
"deck",
"deem",
"deep",
"deer",
"deny",
"desk",
"diet",
"dirt",
"dish",
"dock",
"doll",
"door",
"dose",
"down",
"drag",
"draw",
"drop",
"drug",
"drum",
"duck",
"dumb",
"dump",
"dust",
"duty",
"each",
"earn",
"ease",
"east",
"easy",
"echo",
"edge",
"edit",
"else",
"even",
"ever",
"evil",
"exam",
"exit",
"face",
"fact",
"fade",
"fail",
"fair",
"fall",
"fame",
"fare",
"farm",
"fast",
"fate",
"feed",
"feel",
"file",
"fill",
"film",
"find",
"fine",
"fire",
"firm",
"fish",
"five",
"flag",
"flat",
"flee",
"flip",
"flow",
"fold",
"folk",
"food",
"foot",
"fork",
"form",
"four",
"free",
"from",
"fuel",
"full",
"fund",
"gain",
"game",
"gang",
"gate",
"gaze",
"gear",
"gene",
"gift",
"girl",
"give",
"glad",
"goal",
"goat",
"gold",
"golf",
"good",
"grab",
"gray",
"grin",
"grip",
"grow",
"half",
"hall",
"hand",
"hang",
"hard",
"harm",
"hate",
"haul",
"have",
"head",
"heal",
"hear",
"heat",
"heel",
"help",
"herb",
"here",
"hero",
"hers",
"hide",
"high",
"hike",
"hill",
"hint",
"hire",
"hold",
"home",
"hook",
"hope",
"horn",
"host",
"hour",
"huge",
"hunt",
"hurt",
"icon",
"idea",
"into",
"iron",
"item",
"jail",
"jazz",
"join",
"joke",
"jump",
"jury",
"just",
"keep",
"kick",
"kilt",
"kind",
"king",
"kiss",
"knee",
"know",
"lack",
"lake",
"lamp",
"land",
"lane",
"last",
"late",
"lawn",
"lead",
"leaf",
"lean",
"leap",
"left",
"lend",
"lens",
"less",
"life",
"lift",
"like",
"limb",
"line",
"link",
"lion",
"list",
"live",
"load",
"loan",
"lock",
"long",
"look",
"loop",
"loss",
"lost",
"lots",
"loud",
"love",
"luck",
"lung",
"mail",
"main",
"make",
"mall",
"many",
"mark",
"mask",
"mass",
"mate",
"math",
"meal",
"mean",
"meat",
"meet",
"melt",
"menu",
"mere",
"mild",
"milk",
"mill",
"mind",
"mine",
"miss",
"mode",
"mood",
"moon",
"more",
"most",
"move",
"much",
"must",
"myth",
"nail",
"name",
"near",
"neat",
"neck",
"need",
"nest",
"news",
"next",
"nice",
"nine",
"none",
"noon",
"norm",
"nose",
"note",
"odds",
"okay",
"once",
"only",
"onto",
"open",
"ours",
"oven",
"over",
"pace",
"pack",
"page",
"pain",
"pair",
"pale",
"palm",
"pant",
"park",
"part",
"pass",
"past",
"path",
"peak",
"peel",
"peer",
"pick",
"pile",
"pill",
"pine",
"pink",
"pipe",
"plan",
"play",
"plea",
"plot",
"plus",
"poem",
"poet",
"poke",
"pole",
"poll",
"pond",
"pool",
"poor",
"pork",
"port",
"pose",
"post",
"pour",
"pray",
"pull",
"pump",
"pure",
"push",
"quit",
"race",
"rack",
"rage",
"rail",
"rain",
"rank",
"rare",
"rate",
"read",
"real",
"rear",
"rely",
"rent",
"rest",
"rice",
"rich",
"ride",
"ring",
"riot",
"rise",
"risk",
"road",
"rock",
"role",
"roll",
"roof",
"room",
"root",
"rope",
"rose",
"ruin",
"rule",
"rush",
"sack",
"safe",
"sail",
"sake",
"sale",
"salt",
"same",
"sand",
"save",
"scan",
"seal",
"seat",
"seed",
"seek",
"seem",
"self",
"sell",
"send",
"sexy",
"shed",
"ship",
"shoe",
"shop",
"shot",
"show",
"shut",
"side",
"sign",
"silk",
"sing",
"sink",
"site",
"size",
"skip",
"slam",
"slip",
"slot",
"slow",
"snap",
"snow",
"soak",
"soap",
"soar",
"sock",
"sofa",
"soft",
"soil",
"sole",
"some",
"song",
"soon",
"sort",
"soul",
"soup",
"spin",
"spit",
"spot",
"star",
"stay",
"stem",
"step",
"stir",
"stop",
"such",
"suck",
"suit",
"sure",
"swim",
"tail",
"take",
"tale",
"talk",
"tall",
"tank",
"tape",
"task",
"team",
"tear",
"teen",
"tell",
"tend",
"tent",
"term",
"test",
"text",
"than",
"that",
"them",
"then",
"they",
"thin",
"this",
"thus",
"tide",
"tile",
"till",
"time",
"tiny",
"tire",
"toll",
"tone",
"tool",
"toss",
"tour",
"town",
"trap",
"tray",
"tree",
"trim",
"trip",
"tube",
"tuck",
"tune",
"turn",
"twin",
"type",
"unit",
"upon",
"urge",
"used",
"user",
"vary",
"vast",
"very",
"view",
"vote",
"wage",
"wait",
"wake",
"walk",
"wall",
"want",
"warn",
"wash",
"wave",
"weak",
"wear",
"weed",
"week",
"well",
"west",
"what",
"when",
"whip",
"whom",
"wide",
"wink",
"wild",
"will",
"wind",
"wine",
"wing",
"wipe",
"wire",
"wise",
"wish",
"with",
"wolf",
"word",
"work",
"wrap",
"yard",
"yeah",
"year",
"yell",
"your",
"zone",
"true",
"about",
"above",
"actor",
"adapt",
"added",
"admit",
"adopt",
"after",
"again",
"agent",
"agree",
"ahead",
"aisle",
"alarm",
"album",
"alien",
"alike",
"alive",
"alley",
"allow",
"alone",
"along",
"alter",
"among",
"angle",
"ankle",
"apart",
"apple",
"apply",
"arena",
"argue",
"arise",
"armed",
"array",
"arrow",
"aside",
"asset",
"avoid",
"await",
"awake",
"award",
"aware",
"basic",
"beach",
"beast",
"begin",
"being",
"belly",
"below",
"bench",
"birth",
"blare",
"blade",
"bling",
"blank",
"blast",
"blend",
"bless",
"blind",
"blink",
"block",
"blond",
"blotter",
"board",
"boast",
"bonus",
"boost",
"booth",
"brain",
"brake",
"brand",
"brave",
"bread",
"break",
"brick",
"bride",
"brief",
"bring",
"broad",
"brood",
"brush",
"buddy",
"build",
"bunch",
"burst",
"buyer",
"cabin",
"cable",
"candy",
"cargo",
"carry",
"carve",
"catch",
"cause",
"cease",
"chain",
"chair",
"chaos",
"charm",
"chart",
"chase",
"cheat",
"check",
"cheek",
"cheer",
"chest",
"chief",
"child",
"chill",
"chunk",
"claim",
"class",
"clean",
"clear",
"clerk",
"click",
"cliff",
"climb",
"cling",
"clock",
"close",
"cloth",
"cloud",
"coach",
"coast",
"color",
"couch",
"could",
"count",
"court",
"cover",
"crave",
"craft",
"crash",
"crawl",
"crater",
"creek",
"crime",
"cross",
"crowd",
"crown",
"crush",
"curve",
"cycle",
"daily",
"dance",
"death",
"debut",
"delay",
"dense",
"depth",
"diary",
"dirty",
"donor",
"doubt",
"dough",
"dozen",
"draft",
"drain",
"drama",
"dream",
"dress",
"dried",
"drift",
"drill",
"drink",
"drive",
"drown",
"drunk",
"dying",
"eager",
"early",
"earth",
"salty",
"elbow",
"elder",
"elect",
"elite",
"empty",
"enact",
"enemy",
"enjoy",
"enter",
"entry",
"equal",
"equip",
"erase",
"essay",
"event",
"every",
"exact",
"exist",
"extra",
"faint",
"faith",
"fatal",
"fault",
"favor",
"fence",
"fever",
"fewer",
"fiber",
"field",
"fifth",
"fifty",
"fight",
"final",
"first",
"fixed",
"flame",
"flash",
"fleet",
"flesh",
"float",
"flood",
"floor",
"flour",
"fluid",
"focus",
"force",
"forth",
"forty",
"forum",
"found",
"frame",
"fraud",
"fresh",
"front",
"frown",
"fruit",
"fully",
"funny",
"genre",
"ghost",
"giant",
"given",
"glass",
"globe",
"glory",
"glove",
"grace",
"grade",
"grain",
"grand",
"grant",
"grape",
"grasp",
"grass",
"gravel",
"great",
"green",
"greet",
"grief",
"gross",
"group",
"guard",
"guess",
"guest",
"guide",
"guilt",
"habit",
"happy",
"harsh",
"heart",
"heavy",
"hello",
"hence",
"honey",
"honor",
"horse",
"hotel",
"house",
"human",
"humor",
"hurry",
"ideal",
"image",
"imply",
"index",
"inner",
"input",
"irony",
"issue",
"jeans",
"joint",
"judge",
"juice",
"juror",
"kneel",
"kayak",
"knock",
"known",
"label",
"labor",
"large",
"laser",
"later",
"laugh",
"layer",
"learn",
"least",
"leave",
"legal",
"lemon",
"level",
"light",
"limit",
"liver",
"lobby",
"local",
"logic",
"loose",
"lover",
"lower",
"loyal",
"lucky",
"lunch",
"magic",
"major",
"maker",
"march",
"match",
"maybe",
"mayor",
"medal",
"media",
"merit",
"metal",
"meter",
"midst",
"might",
"minor",
"mixed",
"model",
"month",
"moral",
"motor",
"mount",
"mouse",
"mouth",
"movie",
"music",
"naked",
"olive",
"cricket",
"nerve",
"never",
"jade",
"night",
"noise",
"north",
"novel",
"nurse",
"occur",
"ocean",
"offer",
"often",
"onion",
"opera",
"orbit",
"order",
"other",
"ought",
"outer",
"owner",
"paint",
"panel",
"panic",
"paper",
"party",
"pasta",
"patch",
"pause",
"phase",
"phone",
"photo",
"piano",
"piece",
"pilot",
"pitch",
"pizza",
"place",
"plain",
"plant",
"plate",
"plead",
"aim",
"porch",
"pound",
"power",
"press",
"price",
"pride",
"prime",
"print",
"prior",
"prize",
"proof",
"proud",
"prove",
"pulse",
"punch",
"purse",
"quest",
"quick",
"quiet",
"quite",
"quote",
"radar",
"radio",
"raise",
"rally",
"ranch",
"range",
"rapid",
"ratio",
"reach",
"react",
"ready",
"realm",
"rebel",
"refer",
"relax",
"reply",
"rider",
"ridge",
"rifle",
"right",
"risky",
"rival",
"river",
"robot",
"round",
"route",
"royal",
"rumor",
"rural",
"salad",
"sales",
"sauce",
"scale",
"scare",
"scene",
"scent",
"scope",
"score",
"screw",
"seize",
"sense",
"serve",
"seven",
"shade",
"shake",
"shall",
"shame",
"shape",
"share",
"shark",
"sharp",
"sheep",
"sheer",
"sheet",
"shelf",
"shell",
"shift",
"shirt",
"shock",
"shoot",
"shore",
"short",
"shout",
"shove",
"shrug",
"sight",
"silly",
"since",
"sixth",
"skill",
"skirt",
"skull",
"slave",
"sleep",
"slice",
"slide",
"slope",
"small",
"smart",
"smell",
"smile",
"smoke",
"snake",
"sneak",
"solar",
"solid",
"solve",
"sorry",
"sound",
"south",
"space",
"spare",
"spark",
"speak",
"speed",
"spell",
"spend",
"spill",
"spine",
"spite",
"split",
"spoon",
"sport",
"spray",
"squad",
"stack",
"staff",
"stage",
"stair",
"stake",
"stand",
"stare",
"start",
"state",
"steak",
"steam",
"steel",
"steep",
"steer",
"stick",
"stiff",
"still",
"stock",
"stone",
"store",
"storm",
"story",
"stove",
"straw",
"strip",
"study",
"stuff",
"style",
"sugar",
"suite",
"sunny",
"super",
"sweat",
"sweep",
"sweet",
"swell",
"swing",
"sword",
"table",
"taste",
"teach",
"thank",
"their",
"theme",
"there",
"these",
"thick",
"thigh",
"thing",
"think",
"third",
"those",
"three",
"throw",
"thumb",
"tight",
"tired",
"title",
"today",
"tooth",
"topic",
"total",
"touch",
"tough",
"towel",
"tower",
"trace",
"track",
"trade",
"trail",
"train",
"trait",
"treat",
"trend",
"trial",
"tribe",
"trick",
"troop",
"truck",
"truly",
"trunk",
"trust",
"truth",
"tumor",
"twice",
"twist",
"uncle",
"under",
"union",
"unite",
"unity",
"until",
"upper",
"upset",
"urban",
"usual",
"valid",
"value",
"video",
"virus",
"visit",
"vital",
"vocal",
"voice",
"voter",
"wagon",
"waist",
"waste",
"watch",
"water",
"weave",
"weigh",
"weird",
"whale",
"wheat",
"wheel",
"where",
"which",
"while",
"whoop",
"whole",
"whose",
"wider",
"worm",
"works",
"world",
"worry",
"worth",
"would",
"wound",
"wrist",
"write",
"wrong",
"yield",
"young",
"yours",
"youth",
"false",
"abroad",
"absorb",
"accent",
"accept",
"access",
"accuse",
"across",
"action",
"active",
"actual",
"adjust",
"admire",
"affect",
"afford",
"agency",
"agenda",
"almost",
"always",
"amount",
"animal",
"annual",
"answer",
"anyone",
"anyway",
"appear",
"around",
"arrest",
"arrive",
"artist",
"aspect",
"assert",
"assess",
"assign",
"assist",
"assume",
"assure",
"attach",
"attack",
"attend",
"author",
"ballot",
"banana",
"banker",
"barrel",
"basket",
"battle",
"beauty",
"become",
"before",
"behalf",
"behave",
"behind",
"belief",
"belong",
"beside",
"better",
"beyond",
"bitter",
"bloody",
"border",
"borrow",
"bottle",
"bounce",
"branch",
"breath",
"breeze",
"bridge",
"bright",
"broken",
"broker",
"bronze",
"brutal",
"bubble",
"bucket",
"bullet",
"bureau",
"butter",
"button",
"camera",
"campus",
"candle",
"canvas",
"carbon",
"career",
"carpet",
"carrot",
"casino",
"casual",
"cattle",
"center",
"change",
"charge",
"cheese",
"choice",
"choose",
"circle",
"client",
"clinic",
"closed",
"closet",
"coffee",
"collar",
"combat",
"comedy",
"commit",
"comply",
"cookie",
"corner",
"cotton",
"county",
"cousin",
"create",
"credit",
"crisis",
"cruise",
"custom",
"dancer",
"danger",
"deadly",
"dealer",
"debate",
"debris",
"decade",
"deeply",
"defeat",
"defend",
"define",
"degree",
"depart",
"depend",
"depict",
"deploy",
"deputy",
"derive",
"desert",
"design",
"desire",
"detail",
"detect",
"device",
"devote",
"differ",
"dining",
"dinner",
"direct",
"divide",
"doctor",
"domain",
"donate",
"double",
"drawer",
"driver",
"during",
"easily",
"eating",
"editor",
"effect",
"effort",
"either",
"eleven",
"emerge",
"empire",
"employ",
"enable",
"endure",
"energy",
"engage",
"engine",
"enough",
"enroll",
"ensure",
"entire",
"entity",
"equity",
"escape",
"estate",
"evolve",
"exceed",
"except",
"expand",
"expect",
"expert",
"export",
"expose",
"extend",
"extent",
"fabric",
"factor",
"fairly",
"family",
"famous",
"farmer",
"faster",
"father",
"fellow",
"fierce",
"figure",
"filter",
"fishy",
"finish",
"firmly",
"fiscal",
"flavor",
"flight",
"flower",
"flying",
"follow",
"forest",
"forget",
"formal",
"format",
"former",
"foster",
"fourth",
"freely",
"freeze",
"friend",
"frozen",
"future",
"galaxy",
"garage",
"garden",
"garlic",
"gather",
"gender",
"genius",
"gifted",
"glance",
"global",
"golden",
"ground",
"growth",
"guitar",
"handle",
"happen",
"hardly",
"hazard",
"health",
"heaven",
"height",
"hidden",
"highly",
"hockey",
"honest",
"hunger",
"hungry",
"hunter",
"ignore",
"immune",
"impact",
"import",
"impose",
"income",
"indeed",
"infant",
"inform",
"injure",
"injury",
"inmate",
"insect",
"inside",
"insist",
"intact",
"intend",
"intent",
"invent",
"invest",
"invite",
"island",
"itself",
"jacket",
"jungle",
"junior",
"ladder",
"lately",
"latter",
"launch",
"lawyer",
"leader",
"league",
"legacy",
"legend",
"length",
"lesson",
"letter",
"likely",
"liquid",
"listen",
"little",
"living",
"locate",
"lovely",
"mainly",
"makeup",
"manage",
"manual",
"marble",
"margin",
"marine",
"market",
"master",
"matter",
"medium",
"member",
"memory",
"mentor",
"merely",
"method",
"middle",
"minute",
"mirror",
"mobile",
"modern",
"modest",
"modify",
"moment",
"monkey",
"mostly",
"mother",
"motion",
"motive",
"museum",
"mutter",
"mutual",
"myself",
"narrow",
"nation",
"native",
"nature",
"nearby",
"nearly",
"needle",
"nobody",
"normal",
"notice",
"notion",
"number",
"object",
"obtain",
"occupy",
"office",
"online",
"oppose",
"option",
"orange",
"origin",
"others",
"outfit",
"outlet",
"output",
"oxygen",
"palace",
"parade",
"parent",
"parish",
"partly",
"patent",
"patrol",
"patron",
"pencil",
"people",
"pepper",
"period",
"permit",
"person",
"phrase",
"pickup",
"pillow",
"planet",
"player",
"please",
"plenty",
"plunge",
"pocket",
"poetry",
"policy",
"poster",
"potato",
"powder",
"prefer",
"pretty",
"priest",
"profit",
"prompt",
"proper",
"public",
"purple",
"pursue",
"puzzle",
"rabbit",
"random",
"rarely",
"rather",
"rating",
"reader",
"really",
"reason",
"recall",
"recent",
"recipe",
"record",
"reduce",
"reform",
"refuse",
"regain",
"regard",
"regime",
"region",
"reject",
"relate",
"relief",
"remain",
"remark",
"remind",
"remote",
"remove",
"rental",
"repair",
"repeat",
"report",
"rescue",
"resign",
"resist",
"resort",
"result",
"resume",
"retail",
"retain",
"retire",
"return",
"reveal",
"review",
"reward",
"rhythm",
"ribbon",
"ritual",
"rocket",
"rubber",
"ruling",
"runner",
"safely",
"safety",
"salary",
"salmon",
"sample",
"saving",
"scared",
"scheme",
"school",
"scream",
"screen",
"script",
"search",
"season",
"second",
"secret",
"sector",
"secure",
"seldom",
"select",
"seller",
"senior",
"sensor",
"series",
"settle",
"severe",
"shadow",
"shorts",
"should",
"shrimp",
"signal",
"silent",
"silver",
"simple",
"simply",
"singer",
"single",
"sister",
"sleeve",
"slight",
"slowly",
"smooth",
"soccer",
"social",
"sodium",
"soften",
"softly",
"solely",
"source",
"speech",
"sphere",
"spirit",
"spread",
"spring",
"square",
"stable",
"stance",
"statue",
"status",
"steady",
"strain",
"streak",
"stream",
"street",
"stress",
"strict",
"strike",
"string",
"stroke",
"strong",
"studio",
"stupid",
"submit",
"subtle",
"suburb",
"sudden",
"suffer",
"summer",
"summit",
"supply",
"surely",
"survey",
"switch",
"symbol",
"system",
"tackle",
"tactic",
"talent",
"target",
"temple",
"tender",
"tennis",
"thanks",
"theory",
"thirty",
"though",
"thread",
"thrive",
"throat",
"ticket",
"timber",
"timing",
"tissue",
"toilet",
"tomato",
"tonic",
"toward",
"tragic",
"trauma",
"travel",
"treaty",
"tribal",
"tunnel",
"turkey",
"twelve",
"twenty",
"unfair",
"unfold",
"unique",
"unless",
"unlike",
"update",
"useful",
"vacuum",
"valley",
"vanish",
"vendor",
"verbal",
"versus",
"vessel",
"viewer",
"virtue",
"vision",
"visual",
"volume",
"voting",
"wander",
"warmth",
"wealth",
"weapon",
"weekly",
"weight",
"widely",
"window",
"winner",
"winter",
"wisdom",
"within",
"wonder",
"wooden",
"worker",
"writer",
"yellow"}
// end | note/words.go | 0.558327 | 0.416381 | words.go | starcoder |
package timetable
import (
"time"
"github.com/mtneug/pkg/ulid"
)
// Type represents some category of timetables.
type Type string
const (
// TypeJSON is a hypochronos JSON timetable.
TypeJSON Type = "json"
)
// Spec specifies a timetable.
type Spec struct {
// Type of the timetable.
Type Type
// JSONSpec for a hypochronos JSON timetable.
JSONSpec JSONSpec
// DefaultState if non is given.
DefaultState string
}
// JSONSpec specifies a hypochronos JSON timetable.
type JSONSpec struct {
// URL of the hypochronos JSON timetable.
URL string
}
// Entry of a timetable.
type Entry struct {
StartsAt time.Time
State string
}
// SortedEntries of a timetable.
type SortedEntries []Entry
// Since filters for entries starting after given time.
func (e SortedEntries) Since(t time.Time) SortedEntries {
i := binarySearch(e, t, 0, len(e)-1)
if i == -1 {
return e
}
if e[i].StartsAt.Equal(t) {
return e[i:]
}
if i+1 < len(e) {
return e[i+1:]
}
return make([]Entry, 0, 0)
}
// Until filters for entries starting before given time.
func (e SortedEntries) Until(t time.Time) SortedEntries {
i := binarySearch(e, t, 0, len(e)-1)
if i == -1 {
return make([]Entry, 0, 0)
}
return e[:i+1]
}
type byTime []Entry
func (e byTime) Len() int { return len(e) }
func (e byTime) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e byTime) Less(i, j int) bool { return e[i].StartsAt.Before(e[j].StartsAt) }
// Timetable for resources.
type Timetable struct {
ID string
Spec Spec
FilledAt time.Time
idSortedEntriesMap map[string][]Entry
}
var (
// MaxTime that can be un/marshaled.
MaxTime = time.Date(9999, time.December, 31, 23, 59, 59, 999999999, time.UTC)
)
// New creates a new timetable
func New(spec Spec) Timetable {
tt := Timetable{
ID: ulid.New().String(),
Spec: spec,
}
return tt
}
// Entries returns a copy of the internal entries for the resource with given
// id. The entries are sorted by time.
func (tt *Timetable) Entries(id string) SortedEntries {
eOrig, ok := tt.idSortedEntriesMap[id]
if !ok {
return make([]Entry, 0, 0)
}
e := make([]Entry, len(eOrig))
copy(e, eOrig)
return e
}
// StateAt of the resource at given time.
func (tt *Timetable) StateAt(id string, t time.Time) (state string, until time.Time) {
entries, ok := tt.idSortedEntriesMap[id]
if !ok {
return tt.Spec.DefaultState, MaxTime
}
l := len(entries)
i := binarySearch(entries, t, 0, l-1)
if i == -1 {
state = tt.Spec.DefaultState
} else {
state = entries[i].State
}
if i+1 < l {
until = entries[i+1].StartsAt
} else {
until = MaxTime
}
return
}
func binarySearch(entries []Entry, t time.Time, sIdx, eIdx int) int {
if eIdx < sIdx {
// before first entry
return -1
}
mIdx := (sIdx + eIdx) / 2
if entries[mIdx].StartsAt.After(t) {
// left side
return binarySearch(entries, t, sIdx, mIdx-1)
}
if mIdx == eIdx || entries[mIdx+1].StartsAt.After(t) {
// found
return mIdx
}
// right side
return binarySearch(entries, t, mIdx+1, eIdx)
} | timetable/timetable.go | 0.709321 | 0.448849 | timetable.go | starcoder |
package iso20022
// Amount of money for which goods or services are offered, sold, or bought.
type UnitPrice15 struct {
// Type and information about a price.
Type *TypeOfPrice9Code `xml:"Tp"`
// Type and information about a price.
ExtendedType *Extended350Code `xml:"XtndedTp"`
// Type of pricing calculation method.
PriceMethod *PriceMethod1Code `xml:"PricMtd,omitempty"`
// Value of the price, eg, as a currency and value.
ValueInInvestmentCurrency []*PriceValue1 `xml:"ValInInvstmtCcy"`
// Value of the price, eg, as a currency and value.
ValueInAlternativeCurrency []*PriceValue1 `xml:"ValInAltrntvCcy,omitempty"`
// Indicates whether the price information can be used for the execution of a transaction.
ForExecutionIndicator *YesNoIndicator `xml:"ForExctnInd"`
// Indicates whether the dividend is included, ie, cum-dividend, in the price. When the dividend is not included, the price will be ex-dividend.
CumDividendIndicator *YesNoIndicator `xml:"CumDvddInd"`
// Ratio applied on the non-adjusted price.
CalculationBasis *PercentageRate `xml:"ClctnBsis,omitempty"`
// Indicates whether the price is an estimated price.
EstimatedPriceIndicator *YesNoIndicator `xml:"EstmtdPricInd"`
// Specifies the number of days from trade date that the counterparty on the other side of the trade should "given up" or divulged.
NumberOfDaysAccrued *Number `xml:"NbOfDaysAcrd,omitempty"`
// Amount included in the NAV that corresponds to gains directly or indirectly derived from interest payment in the scope of the European Directive on taxation of savings income in the form of interest payments.
TaxableIncomePerShare *ActiveOrHistoricCurrencyAnd13DecimalAmount `xml:"TaxblIncmPerShr,omitempty"`
// Specifies whether the fund calculates a taxable interest per share (TIS).
TaxableIncomePerShareCalculated *TaxableIncomePerShareCalculated2Code `xml:"TaxblIncmPerShrClctd,omitempty"`
// Specifies whether the fund calculates a taxable interest per share (TIS).
ExtendedTaxableIncomePerShareCalculated *Extended350Code `xml:"XtndedTaxblIncmPerShrClctd,omitempty"`
// Amount included in the dividend that corresponds to gains directly or indirectly derived from interest payment in the scope of the European Directive on taxation of savings income in the form of interest payments.
TaxableIncomePerDividend *ActiveOrHistoricCurrencyAnd13DecimalAmount `xml:"TaxblIncmPerDvdd,omitempty"`
// Specifies whether dividend is in the scope of the European directive on taxation of savings income in the form of interest payments (Council Directive 2003/48/EC 3 June), or an income realised upon sale, a refund or redemption of shares and units, etc.
EUDividendStatus *EUDividendStatus1Code `xml:"EUDvddSts,omitempty"`
// Specifies whether dividend is in the scope of the European directive on taxation of savings income in the form of interest payments (Council Directive 2003/48/EC 3 June), or an income realised upon sale, a refund or redemption of shares and units, etc.
ExtendedEUDividendStatus *Extended350Code `xml:"XtndedEUDvddSts,omitempty"`
// Amount of money associated with a service.
ChargeDetails []*Charge15 `xml:"ChrgDtls,omitempty"`
// Information related to taxes that are due.
TaxLiabilityDetails []*Tax17 `xml:"TaxLbltyDtls,omitempty"`
// Information related to taxes that are paid back.
TaxRefundDetails []*Tax17 `xml:"TaxRfndDtls,omitempty"`
}
func (u *UnitPrice15) SetType(value string) {
u.Type = (*TypeOfPrice9Code)(&value)
}
func (u *UnitPrice15) SetExtendedType(value string) {
u.ExtendedType = (*Extended350Code)(&value)
}
func (u *UnitPrice15) SetPriceMethod(value string) {
u.PriceMethod = (*PriceMethod1Code)(&value)
}
func (u *UnitPrice15) AddValueInInvestmentCurrency() *PriceValue1 {
newValue := new(PriceValue1)
u.ValueInInvestmentCurrency = append(u.ValueInInvestmentCurrency, newValue)
return newValue
}
func (u *UnitPrice15) AddValueInAlternativeCurrency() *PriceValue1 {
newValue := new(PriceValue1)
u.ValueInAlternativeCurrency = append(u.ValueInAlternativeCurrency, newValue)
return newValue
}
func (u *UnitPrice15) SetForExecutionIndicator(value string) {
u.ForExecutionIndicator = (*YesNoIndicator)(&value)
}
func (u *UnitPrice15) SetCumDividendIndicator(value string) {
u.CumDividendIndicator = (*YesNoIndicator)(&value)
}
func (u *UnitPrice15) SetCalculationBasis(value string) {
u.CalculationBasis = (*PercentageRate)(&value)
}
func (u *UnitPrice15) SetEstimatedPriceIndicator(value string) {
u.EstimatedPriceIndicator = (*YesNoIndicator)(&value)
}
func (u *UnitPrice15) SetNumberOfDaysAccrued(value string) {
u.NumberOfDaysAccrued = (*Number)(&value)
}
func (u *UnitPrice15) SetTaxableIncomePerShare(value, currency string) {
u.TaxableIncomePerShare = NewActiveOrHistoricCurrencyAnd13DecimalAmount(value, currency)
}
func (u *UnitPrice15) SetTaxableIncomePerShareCalculated(value string) {
u.TaxableIncomePerShareCalculated = (*TaxableIncomePerShareCalculated2Code)(&value)
}
func (u *UnitPrice15) SetExtendedTaxableIncomePerShareCalculated(value string) {
u.ExtendedTaxableIncomePerShareCalculated = (*Extended350Code)(&value)
}
func (u *UnitPrice15) SetTaxableIncomePerDividend(value, currency string) {
u.TaxableIncomePerDividend = NewActiveOrHistoricCurrencyAnd13DecimalAmount(value, currency)
}
func (u *UnitPrice15) SetEUDividendStatus(value string) {
u.EUDividendStatus = (*EUDividendStatus1Code)(&value)
}
func (u *UnitPrice15) SetExtendedEUDividendStatus(value string) {
u.ExtendedEUDividendStatus = (*Extended350Code)(&value)
}
func (u *UnitPrice15) AddChargeDetails() *Charge15 {
newValue := new(Charge15)
u.ChargeDetails = append(u.ChargeDetails, newValue)
return newValue
}
func (u *UnitPrice15) AddTaxLiabilityDetails() *Tax17 {
newValue := new(Tax17)
u.TaxLiabilityDetails = append(u.TaxLiabilityDetails, newValue)
return newValue
}
func (u *UnitPrice15) AddTaxRefundDetails() *Tax17 {
newValue := new(Tax17)
u.TaxRefundDetails = append(u.TaxRefundDetails, newValue)
return newValue
} | UnitPrice15.go | 0.805364 | 0.510496 | UnitPrice15.go | starcoder |
package day11
import (
aoc "github.com/TipsyPixie/advent-of-code-2020"
)
type state string
const (
EMPTY = state("L")
OCCUPIED = state("#")
NONEXISTENT = state(".")
)
type board struct {
alternativeCounting bool
states [][]state
occupationCount int
}
func parseLine(signs string) []state {
states := make([]state, 0, len(signs))
for _, character := range signs {
states = append(states, state(character))
}
return states
}
func (thisBoard *board) countAround(rowIndex int, columnIndex int) int {
maxRowIndex, maxColumnIndex := len(thisBoard.states)-1, len(thisBoard.states[0])-1
getCoords := func(r int, rChange int, c int, cChange int) (int, int) {
return r + rChange, c + cChange
}
if thisBoard.alternativeCounting {
getCoords = func(r int, rChange int, c int, cChange int) (int, int) {
for {
rNext, cNext := r+rChange, c+cChange
if rNext < 0 || rNext > maxRowIndex || cNext < 0 || cNext > maxColumnIndex {
break
}
r, c = rNext, cNext
if thisBoard.states[r][c] != NONEXISTENT {
break
}
}
return r, c
}
}
oneIfOccupied := func(r int, c int) int {
if thisBoard.states[r][c] == OCCUPIED {
return 1
}
return 0
}
count := 0
if rowIndex > 0 {
count += oneIfOccupied(getCoords(rowIndex, -1, columnIndex, 0))
}
if rowIndex < maxRowIndex {
count += oneIfOccupied(getCoords(rowIndex, +1, columnIndex, 0))
}
if columnIndex > 0 {
count += oneIfOccupied(getCoords(rowIndex, 0, columnIndex, -1))
}
if columnIndex < maxColumnIndex {
count += oneIfOccupied(getCoords(rowIndex, 0, columnIndex, +1))
}
if rowIndex > 0 && columnIndex > 0 {
count += oneIfOccupied(getCoords(rowIndex, -1, columnIndex, -1))
}
if rowIndex > 0 && columnIndex < maxColumnIndex {
count += oneIfOccupied(getCoords(rowIndex, -1, columnIndex, +1))
}
if rowIndex < maxRowIndex && columnIndex > 0 {
count += oneIfOccupied(getCoords(rowIndex, +1, columnIndex, -1))
}
if rowIndex < maxRowIndex && columnIndex < maxColumnIndex {
count += oneIfOccupied(getCoords(rowIndex, +1, columnIndex, +1))
}
return count
}
func (thisBoard *board) getNextState(rowIndex int, columnIndex int) state {
emptyThreshold := 4
if thisBoard.alternativeCounting {
emptyThreshold = 5
}
switch {
case thisBoard.states[rowIndex][columnIndex] == EMPTY && thisBoard.countAround(rowIndex, columnIndex) == 0:
return OCCUPIED
case thisBoard.states[rowIndex][columnIndex] == OCCUPIED && thisBoard.countAround(rowIndex, columnIndex) >= emptyThreshold:
return EMPTY
default:
return thisBoard.states[rowIndex][columnIndex]
}
}
func (thisBoard *board) proceed() (changed bool) {
nextStates := make([][]state, len(thisBoard.states), len(thisBoard.states))
for rowIndex := range nextStates {
nextStates[rowIndex] = make([]state, len(thisBoard.states[rowIndex]), len(thisBoard.states[rowIndex]))
for columnIndex := range nextStates[rowIndex] {
nextStates[rowIndex][columnIndex] = thisBoard.getNextState(rowIndex, columnIndex)
if nextStates[rowIndex][columnIndex] != thisBoard.states[rowIndex][columnIndex] {
changed = true
if nextStates[rowIndex][columnIndex] == OCCUPIED {
thisBoard.occupationCount++
} else {
thisBoard.occupationCount--
}
}
}
}
thisBoard.states = nextStates
return
}
func solve(inputPath string, alternativeCounting bool) (int, error) {
input, err := aoc.FromFile(inputPath)
if err != nil {
return 0, err
}
defer func() { _ = input.Close() }()
states := make([][]state, 0, 64)
for line, ok, err := input.ReadLine(); ok || err != nil; line, ok, err = input.ReadLine() {
if err != nil {
return 0, err
}
states = append(states, parseLine(line))
}
tempBoard := board{
alternativeCounting: alternativeCounting,
states: states,
occupationCount: 0,
}
for changed := tempBoard.proceed(); changed; changed = tempBoard.proceed() {
}
return tempBoard.occupationCount, nil
}
func solvePart1(inputPath string) (int, error) {
return solve(inputPath, false)
}
func solvePart2(inputPath string) (int, error) {
return solve(inputPath, true)
} | day11/day11.go | 0.510496 | 0.429549 | day11.go | starcoder |
package execution
import (
"reflect"
"github.com/pkg/errors"
"gorgonia.org/tensor/internal/storage"
)
func (e E) Gt(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header) (err error) {
as := isScalar(a, t)
bs := isScalar(b, t)
rs := isScalar(retVal, Bool)
rt := retVal.Bools()
if ((as && !bs) || (bs && !as)) && rs {
return errors.Errorf("retVal is a scalar. a: %d, b %d", a.TypedLen(t), b.TypedLen(t))
}
switch t {
case Int:
at := a.Ints()
bt := b.Ints()
switch {
case as && bs:
GtI(at, bt, rt)
case as && !bs:
GtSVI(at[0], bt, rt)
case !as && bs:
GtVSI(at, bt[0], rt)
default:
GtI(at, bt, rt)
}
return
case Int8:
at := a.Int8s()
bt := b.Int8s()
switch {
case as && bs:
GtI8(at, bt, rt)
case as && !bs:
GtSVI8(at[0], bt, rt)
case !as && bs:
GtVSI8(at, bt[0], rt)
default:
GtI8(at, bt, rt)
}
return
case Int16:
at := a.Int16s()
bt := b.Int16s()
switch {
case as && bs:
GtI16(at, bt, rt)
case as && !bs:
GtSVI16(at[0], bt, rt)
case !as && bs:
GtVSI16(at, bt[0], rt)
default:
GtI16(at, bt, rt)
}
return
case Int32:
at := a.Int32s()
bt := b.Int32s()
switch {
case as && bs:
GtI32(at, bt, rt)
case as && !bs:
GtSVI32(at[0], bt, rt)
case !as && bs:
GtVSI32(at, bt[0], rt)
default:
GtI32(at, bt, rt)
}
return
case Int64:
at := a.Int64s()
bt := b.Int64s()
switch {
case as && bs:
GtI64(at, bt, rt)
case as && !bs:
GtSVI64(at[0], bt, rt)
case !as && bs:
GtVSI64(at, bt[0], rt)
default:
GtI64(at, bt, rt)
}
return
case Uint:
at := a.Uints()
bt := b.Uints()
switch {
case as && bs:
GtU(at, bt, rt)
case as && !bs:
GtSVU(at[0], bt, rt)
case !as && bs:
GtVSU(at, bt[0], rt)
default:
GtU(at, bt, rt)
}
return
case Uint8:
at := a.Uint8s()
bt := b.Uint8s()
switch {
case as && bs:
GtU8(at, bt, rt)
case as && !bs:
GtSVU8(at[0], bt, rt)
case !as && bs:
GtVSU8(at, bt[0], rt)
default:
GtU8(at, bt, rt)
}
return
case Uint16:
at := a.Uint16s()
bt := b.Uint16s()
switch {
case as && bs:
GtU16(at, bt, rt)
case as && !bs:
GtSVU16(at[0], bt, rt)
case !as && bs:
GtVSU16(at, bt[0], rt)
default:
GtU16(at, bt, rt)
}
return
case Uint32:
at := a.Uint32s()
bt := b.Uint32s()
switch {
case as && bs:
GtU32(at, bt, rt)
case as && !bs:
GtSVU32(at[0], bt, rt)
case !as && bs:
GtVSU32(at, bt[0], rt)
default:
GtU32(at, bt, rt)
}
return
case Uint64:
at := a.Uint64s()
bt := b.Uint64s()
switch {
case as && bs:
GtU64(at, bt, rt)
case as && !bs:
GtSVU64(at[0], bt, rt)
case !as && bs:
GtVSU64(at, bt[0], rt)
default:
GtU64(at, bt, rt)
}
return
case Float32:
at := a.Float32s()
bt := b.Float32s()
switch {
case as && bs:
GtF32(at, bt, rt)
case as && !bs:
GtSVF32(at[0], bt, rt)
case !as && bs:
GtVSF32(at, bt[0], rt)
default:
GtF32(at, bt, rt)
}
return
case Float64:
at := a.Float64s()
bt := b.Float64s()
switch {
case as && bs:
GtF64(at, bt, rt)
case as && !bs:
GtSVF64(at[0], bt, rt)
case !as && bs:
GtVSF64(at, bt[0], rt)
default:
GtF64(at, bt, rt)
}
return
case String:
at := a.Strings()
bt := b.Strings()
switch {
case as && bs:
GtStr(at, bt, rt)
case as && !bs:
GtSVStr(at[0], bt, rt)
case !as && bs:
GtVSStr(at, bt[0], rt)
default:
GtStr(at, bt, rt)
}
return
default:
return errors.Errorf("Unsupported type %v for Gt", t)
}
}
func (e E) Gte(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header) (err error) {
as := isScalar(a, t)
bs := isScalar(b, t)
rs := isScalar(retVal, Bool)
rt := retVal.Bools()
if ((as && !bs) || (bs && !as)) && rs {
return errors.Errorf("retVal is a scalar. a: %d, b %d", a.TypedLen(t), b.TypedLen(t))
}
switch t {
case Int:
at := a.Ints()
bt := b.Ints()
switch {
case as && bs:
GteI(at, bt, rt)
case as && !bs:
GteSVI(at[0], bt, rt)
case !as && bs:
GteVSI(at, bt[0], rt)
default:
GteI(at, bt, rt)
}
return
case Int8:
at := a.Int8s()
bt := b.Int8s()
switch {
case as && bs:
GteI8(at, bt, rt)
case as && !bs:
GteSVI8(at[0], bt, rt)
case !as && bs:
GteVSI8(at, bt[0], rt)
default:
GteI8(at, bt, rt)
}
return
case Int16:
at := a.Int16s()
bt := b.Int16s()
switch {
case as && bs:
GteI16(at, bt, rt)
case as && !bs:
GteSVI16(at[0], bt, rt)
case !as && bs:
GteVSI16(at, bt[0], rt)
default:
GteI16(at, bt, rt)
}
return
case Int32:
at := a.Int32s()
bt := b.Int32s()
switch {
case as && bs:
GteI32(at, bt, rt)
case as && !bs:
GteSVI32(at[0], bt, rt)
case !as && bs:
GteVSI32(at, bt[0], rt)
default:
GteI32(at, bt, rt)
}
return
case Int64:
at := a.Int64s()
bt := b.Int64s()
switch {
case as && bs:
GteI64(at, bt, rt)
case as && !bs:
GteSVI64(at[0], bt, rt)
case !as && bs:
GteVSI64(at, bt[0], rt)
default:
GteI64(at, bt, rt)
}
return
case Uint:
at := a.Uints()
bt := b.Uints()
switch {
case as && bs:
GteU(at, bt, rt)
case as && !bs:
GteSVU(at[0], bt, rt)
case !as && bs:
GteVSU(at, bt[0], rt)
default:
GteU(at, bt, rt)
}
return
case Uint8:
at := a.Uint8s()
bt := b.Uint8s()
switch {
case as && bs:
GteU8(at, bt, rt)
case as && !bs:
GteSVU8(at[0], bt, rt)
case !as && bs:
GteVSU8(at, bt[0], rt)
default:
GteU8(at, bt, rt)
}
return
case Uint16:
at := a.Uint16s()
bt := b.Uint16s()
switch {
case as && bs:
GteU16(at, bt, rt)
case as && !bs:
GteSVU16(at[0], bt, rt)
case !as && bs:
GteVSU16(at, bt[0], rt)
default:
GteU16(at, bt, rt)
}
return
case Uint32:
at := a.Uint32s()
bt := b.Uint32s()
switch {
case as && bs:
GteU32(at, bt, rt)
case as && !bs:
GteSVU32(at[0], bt, rt)
case !as && bs:
GteVSU32(at, bt[0], rt)
default:
GteU32(at, bt, rt)
}
return
case Uint64:
at := a.Uint64s()
bt := b.Uint64s()
switch {
case as && bs:
GteU64(at, bt, rt)
case as && !bs:
GteSVU64(at[0], bt, rt)
case !as && bs:
GteVSU64(at, bt[0], rt)
default:
GteU64(at, bt, rt)
}
return
case Float32:
at := a.Float32s()
bt := b.Float32s()
switch {
case as && bs:
GteF32(at, bt, rt)
case as && !bs:
GteSVF32(at[0], bt, rt)
case !as && bs:
GteVSF32(at, bt[0], rt)
default:
GteF32(at, bt, rt)
}
return
case Float64:
at := a.Float64s()
bt := b.Float64s()
switch {
case as && bs:
GteF64(at, bt, rt)
case as && !bs:
GteSVF64(at[0], bt, rt)
case !as && bs:
GteVSF64(at, bt[0], rt)
default:
GteF64(at, bt, rt)
}
return
case String:
at := a.Strings()
bt := b.Strings()
switch {
case as && bs:
GteStr(at, bt, rt)
case as && !bs:
GteSVStr(at[0], bt, rt)
case !as && bs:
GteVSStr(at, bt[0], rt)
default:
GteStr(at, bt, rt)
}
return
default:
return errors.Errorf("Unsupported type %v for Gte", t)
}
}
func (e E) Lt(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header) (err error) {
as := isScalar(a, t)
bs := isScalar(b, t)
rs := isScalar(retVal, Bool)
rt := retVal.Bools()
if ((as && !bs) || (bs && !as)) && rs {
return errors.Errorf("retVal is a scalar. a: %d, b %d", a.TypedLen(t), b.TypedLen(t))
}
switch t {
case Int:
at := a.Ints()
bt := b.Ints()
switch {
case as && bs:
LtI(at, bt, rt)
case as && !bs:
LtSVI(at[0], bt, rt)
case !as && bs:
LtVSI(at, bt[0], rt)
default:
LtI(at, bt, rt)
}
return
case Int8:
at := a.Int8s()
bt := b.Int8s()
switch {
case as && bs:
LtI8(at, bt, rt)
case as && !bs:
LtSVI8(at[0], bt, rt)
case !as && bs:
LtVSI8(at, bt[0], rt)
default:
LtI8(at, bt, rt)
}
return
case Int16:
at := a.Int16s()
bt := b.Int16s()
switch {
case as && bs:
LtI16(at, bt, rt)
case as && !bs:
LtSVI16(at[0], bt, rt)
case !as && bs:
LtVSI16(at, bt[0], rt)
default:
LtI16(at, bt, rt)
}
return
case Int32:
at := a.Int32s()
bt := b.Int32s()
switch {
case as && bs:
LtI32(at, bt, rt)
case as && !bs:
LtSVI32(at[0], bt, rt)
case !as && bs:
LtVSI32(at, bt[0], rt)
default:
LtI32(at, bt, rt)
}
return
case Int64:
at := a.Int64s()
bt := b.Int64s()
switch {
case as && bs:
LtI64(at, bt, rt)
case as && !bs:
LtSVI64(at[0], bt, rt)
case !as && bs:
LtVSI64(at, bt[0], rt)
default:
LtI64(at, bt, rt)
}
return
case Uint:
at := a.Uints()
bt := b.Uints()
switch {
case as && bs:
LtU(at, bt, rt)
case as && !bs:
LtSVU(at[0], bt, rt)
case !as && bs:
LtVSU(at, bt[0], rt)
default:
LtU(at, bt, rt)
}
return
case Uint8:
at := a.Uint8s()
bt := b.Uint8s()
switch {
case as && bs:
LtU8(at, bt, rt)
case as && !bs:
LtSVU8(at[0], bt, rt)
case !as && bs:
LtVSU8(at, bt[0], rt)
default:
LtU8(at, bt, rt)
}
return
case Uint16:
at := a.Uint16s()
bt := b.Uint16s()
switch {
case as && bs:
LtU16(at, bt, rt)
case as && !bs:
LtSVU16(at[0], bt, rt)
case !as && bs:
LtVSU16(at, bt[0], rt)
default:
LtU16(at, bt, rt)
}
return
case Uint32:
at := a.Uint32s()
bt := b.Uint32s()
switch {
case as && bs:
LtU32(at, bt, rt)
case as && !bs:
LtSVU32(at[0], bt, rt)
case !as && bs:
LtVSU32(at, bt[0], rt)
default:
LtU32(at, bt, rt)
}
return
case Uint64:
at := a.Uint64s()
bt := b.Uint64s()
switch {
case as && bs:
LtU64(at, bt, rt)
case as && !bs:
LtSVU64(at[0], bt, rt)
case !as && bs:
LtVSU64(at, bt[0], rt)
default:
LtU64(at, bt, rt)
}
return
case Float32:
at := a.Float32s()
bt := b.Float32s()
switch {
case as && bs:
LtF32(at, bt, rt)
case as && !bs:
LtSVF32(at[0], bt, rt)
case !as && bs:
LtVSF32(at, bt[0], rt)
default:
LtF32(at, bt, rt)
}
return
case Float64:
at := a.Float64s()
bt := b.Float64s()
switch {
case as && bs:
LtF64(at, bt, rt)
case as && !bs:
LtSVF64(at[0], bt, rt)
case !as && bs:
LtVSF64(at, bt[0], rt)
default:
LtF64(at, bt, rt)
}
return
case String:
at := a.Strings()
bt := b.Strings()
switch {
case as && bs:
LtStr(at, bt, rt)
case as && !bs:
LtSVStr(at[0], bt, rt)
case !as && bs:
LtVSStr(at, bt[0], rt)
default:
LtStr(at, bt, rt)
}
return
default:
return errors.Errorf("Unsupported type %v for Lt", t)
}
}
func (e E) Lte(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header) (err error) {
as := isScalar(a, t)
bs := isScalar(b, t)
rs := isScalar(retVal, Bool)
rt := retVal.Bools()
if ((as && !bs) || (bs && !as)) && rs {
return errors.Errorf("retVal is a scalar. a: %d, b %d", a.TypedLen(t), b.TypedLen(t))
}
switch t {
case Int:
at := a.Ints()
bt := b.Ints()
switch {
case as && bs:
LteI(at, bt, rt)
case as && !bs:
LteSVI(at[0], bt, rt)
case !as && bs:
LteVSI(at, bt[0], rt)
default:
LteI(at, bt, rt)
}
return
case Int8:
at := a.Int8s()
bt := b.Int8s()
switch {
case as && bs:
LteI8(at, bt, rt)
case as && !bs:
LteSVI8(at[0], bt, rt)
case !as && bs:
LteVSI8(at, bt[0], rt)
default:
LteI8(at, bt, rt)
}
return
case Int16:
at := a.Int16s()
bt := b.Int16s()
switch {
case as && bs:
LteI16(at, bt, rt)
case as && !bs:
LteSVI16(at[0], bt, rt)
case !as && bs:
LteVSI16(at, bt[0], rt)
default:
LteI16(at, bt, rt)
}
return
case Int32:
at := a.Int32s()
bt := b.Int32s()
switch {
case as && bs:
LteI32(at, bt, rt)
case as && !bs:
LteSVI32(at[0], bt, rt)
case !as && bs:
LteVSI32(at, bt[0], rt)
default:
LteI32(at, bt, rt)
}
return
case Int64:
at := a.Int64s()
bt := b.Int64s()
switch {
case as && bs:
LteI64(at, bt, rt)
case as && !bs:
LteSVI64(at[0], bt, rt)
case !as && bs:
LteVSI64(at, bt[0], rt)
default:
LteI64(at, bt, rt)
}
return
case Uint:
at := a.Uints()
bt := b.Uints()
switch {
case as && bs:
LteU(at, bt, rt)
case as && !bs:
LteSVU(at[0], bt, rt)
case !as && bs:
LteVSU(at, bt[0], rt)
default:
LteU(at, bt, rt)
}
return
case Uint8:
at := a.Uint8s()
bt := b.Uint8s()
switch {
case as && bs:
LteU8(at, bt, rt)
case as && !bs:
LteSVU8(at[0], bt, rt)
case !as && bs:
LteVSU8(at, bt[0], rt)
default:
LteU8(at, bt, rt)
}
return
case Uint16:
at := a.Uint16s()
bt := b.Uint16s()
switch {
case as && bs:
LteU16(at, bt, rt)
case as && !bs:
LteSVU16(at[0], bt, rt)
case !as && bs:
LteVSU16(at, bt[0], rt)
default:
LteU16(at, bt, rt)
}
return
case Uint32:
at := a.Uint32s()
bt := b.Uint32s()
switch {
case as && bs:
LteU32(at, bt, rt)
case as && !bs:
LteSVU32(at[0], bt, rt)
case !as && bs:
LteVSU32(at, bt[0], rt)
default:
LteU32(at, bt, rt)
}
return
case Uint64:
at := a.Uint64s()
bt := b.Uint64s()
switch {
case as && bs:
LteU64(at, bt, rt)
case as && !bs:
LteSVU64(at[0], bt, rt)
case !as && bs:
LteVSU64(at, bt[0], rt)
default:
LteU64(at, bt, rt)
}
return
case Float32:
at := a.Float32s()
bt := b.Float32s()
switch {
case as && bs:
LteF32(at, bt, rt)
case as && !bs:
LteSVF32(at[0], bt, rt)
case !as && bs:
LteVSF32(at, bt[0], rt)
default:
LteF32(at, bt, rt)
}
return
case Float64:
at := a.Float64s()
bt := b.Float64s()
switch {
case as && bs:
LteF64(at, bt, rt)
case as && !bs:
LteSVF64(at[0], bt, rt)
case !as && bs:
LteVSF64(at, bt[0], rt)
default:
LteF64(at, bt, rt)
}
return
case String:
at := a.Strings()
bt := b.Strings()
switch {
case as && bs:
LteStr(at, bt, rt)
case as && !bs:
LteSVStr(at[0], bt, rt)
case !as && bs:
LteVSStr(at, bt[0], rt)
default:
LteStr(at, bt, rt)
}
return
default:
return errors.Errorf("Unsupported type %v for Lte", t)
}
}
func (e E) Eq(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header) (err error) {
as := isScalar(a, t)
bs := isScalar(b, t)
rs := isScalar(retVal, Bool)
rt := retVal.Bools()
if ((as && !bs) || (bs && !as)) && rs {
return errors.Errorf("retVal is a scalar. a: %d, b %d", a.TypedLen(t), b.TypedLen(t))
}
switch t {
case Bool:
at := a.Bools()
bt := b.Bools()
switch {
case as && bs:
EqB(at, bt, rt)
case as && !bs:
EqSVB(at[0], bt, rt)
case !as && bs:
EqVSB(at, bt[0], rt)
default:
EqB(at, bt, rt)
}
return
case Int:
at := a.Ints()
bt := b.Ints()
switch {
case as && bs:
EqI(at, bt, rt)
case as && !bs:
EqSVI(at[0], bt, rt)
case !as && bs:
EqVSI(at, bt[0], rt)
default:
EqI(at, bt, rt)
}
return
case Int8:
at := a.Int8s()
bt := b.Int8s()
switch {
case as && bs:
EqI8(at, bt, rt)
case as && !bs:
EqSVI8(at[0], bt, rt)
case !as && bs:
EqVSI8(at, bt[0], rt)
default:
EqI8(at, bt, rt)
}
return
case Int16:
at := a.Int16s()
bt := b.Int16s()
switch {
case as && bs:
EqI16(at, bt, rt)
case as && !bs:
EqSVI16(at[0], bt, rt)
case !as && bs:
EqVSI16(at, bt[0], rt)
default:
EqI16(at, bt, rt)
}
return
case Int32:
at := a.Int32s()
bt := b.Int32s()
switch {
case as && bs:
EqI32(at, bt, rt)
case as && !bs:
EqSVI32(at[0], bt, rt)
case !as && bs:
EqVSI32(at, bt[0], rt)
default:
EqI32(at, bt, rt)
}
return
case Int64:
at := a.Int64s()
bt := b.Int64s()
switch {
case as && bs:
EqI64(at, bt, rt)
case as && !bs:
EqSVI64(at[0], bt, rt)
case !as && bs:
EqVSI64(at, bt[0], rt)
default:
EqI64(at, bt, rt)
}
return
case Uint:
at := a.Uints()
bt := b.Uints()
switch {
case as && bs:
EqU(at, bt, rt)
case as && !bs:
EqSVU(at[0], bt, rt)
case !as && bs:
EqVSU(at, bt[0], rt)
default:
EqU(at, bt, rt)
}
return
case Uint8:
at := a.Uint8s()
bt := b.Uint8s()
switch {
case as && bs:
EqU8(at, bt, rt)
case as && !bs:
EqSVU8(at[0], bt, rt)
case !as && bs:
EqVSU8(at, bt[0], rt)
default:
EqU8(at, bt, rt)
}
return
case Uint16:
at := a.Uint16s()
bt := b.Uint16s()
switch {
case as && bs:
EqU16(at, bt, rt)
case as && !bs:
EqSVU16(at[0], bt, rt)
case !as && bs:
EqVSU16(at, bt[0], rt)
default:
EqU16(at, bt, rt)
}
return
case Uint32:
at := a.Uint32s()
bt := b.Uint32s()
switch {
case as && bs:
EqU32(at, bt, rt)
case as && !bs:
EqSVU32(at[0], bt, rt)
case !as && bs:
EqVSU32(at, bt[0], rt)
default:
EqU32(at, bt, rt)
}
return
case Uint64:
at := a.Uint64s()
bt := b.Uint64s()
switch {
case as && bs:
EqU64(at, bt, rt)
case as && !bs:
EqSVU64(at[0], bt, rt)
case !as && bs:
EqVSU64(at, bt[0], rt)
default:
EqU64(at, bt, rt)
}
return
case Uintptr:
at := a.Uintptrs()
bt := b.Uintptrs()
switch {
case as && bs:
EqUintptr(at, bt, rt)
case as && !bs:
EqSVUintptr(at[0], bt, rt)
case !as && bs:
EqVSUintptr(at, bt[0], rt)
default:
EqUintptr(at, bt, rt)
}
return
case Float32:
at := a.Float32s()
bt := b.Float32s()
switch {
case as && bs:
EqF32(at, bt, rt)
case as && !bs:
EqSVF32(at[0], bt, rt)
case !as && bs:
EqVSF32(at, bt[0], rt)
default:
EqF32(at, bt, rt)
}
return
case Float64:
at := a.Float64s()
bt := b.Float64s()
switch {
case as && bs:
EqF64(at, bt, rt)
case as && !bs:
EqSVF64(at[0], bt, rt)
case !as && bs:
EqVSF64(at, bt[0], rt)
default:
EqF64(at, bt, rt)
}
return
case Complex64:
at := a.Complex64s()
bt := b.Complex64s()
switch {
case as && bs:
EqC64(at, bt, rt)
case as && !bs:
EqSVC64(at[0], bt, rt)
case !as && bs:
EqVSC64(at, bt[0], rt)
default:
EqC64(at, bt, rt)
}
return
case Complex128:
at := a.Complex128s()
bt := b.Complex128s()
switch {
case as && bs:
EqC128(at, bt, rt)
case as && !bs:
EqSVC128(at[0], bt, rt)
case !as && bs:
EqVSC128(at, bt[0], rt)
default:
EqC128(at, bt, rt)
}
return
case String:
at := a.Strings()
bt := b.Strings()
switch {
case as && bs:
EqStr(at, bt, rt)
case as && !bs:
EqSVStr(at[0], bt, rt)
case !as && bs:
EqVSStr(at, bt[0], rt)
default:
EqStr(at, bt, rt)
}
return
case UnsafePointer:
at := a.UnsafePointers()
bt := b.UnsafePointers()
switch {
case as && bs:
EqUnsafePointer(at, bt, rt)
case as && !bs:
EqSVUnsafePointer(at[0], bt, rt)
case !as && bs:
EqVSUnsafePointer(at, bt[0], rt)
default:
EqUnsafePointer(at, bt, rt)
}
return
default:
return errors.Errorf("Unsupported type %v for Eq", t)
}
}
func (e E) Ne(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header) (err error) {
as := isScalar(a, t)
bs := isScalar(b, t)
rs := isScalar(retVal, Bool)
rt := retVal.Bools()
if ((as && !bs) || (bs && !as)) && rs {
return errors.Errorf("retVal is a scalar. a: %d, b %d", a.TypedLen(t), b.TypedLen(t))
}
switch t {
case Bool:
at := a.Bools()
bt := b.Bools()
switch {
case as && bs:
NeB(at, bt, rt)
case as && !bs:
NeSVB(at[0], bt, rt)
case !as && bs:
NeVSB(at, bt[0], rt)
default:
NeB(at, bt, rt)
}
return
case Int:
at := a.Ints()
bt := b.Ints()
switch {
case as && bs:
NeI(at, bt, rt)
case as && !bs:
NeSVI(at[0], bt, rt)
case !as && bs:
NeVSI(at, bt[0], rt)
default:
NeI(at, bt, rt)
}
return
case Int8:
at := a.Int8s()
bt := b.Int8s()
switch {
case as && bs:
NeI8(at, bt, rt)
case as && !bs:
NeSVI8(at[0], bt, rt)
case !as && bs:
NeVSI8(at, bt[0], rt)
default:
NeI8(at, bt, rt)
}
return
case Int16:
at := a.Int16s()
bt := b.Int16s()
switch {
case as && bs:
NeI16(at, bt, rt)
case as && !bs:
NeSVI16(at[0], bt, rt)
case !as && bs:
NeVSI16(at, bt[0], rt)
default:
NeI16(at, bt, rt)
}
return
case Int32:
at := a.Int32s()
bt := b.Int32s()
switch {
case as && bs:
NeI32(at, bt, rt)
case as && !bs:
NeSVI32(at[0], bt, rt)
case !as && bs:
NeVSI32(at, bt[0], rt)
default:
NeI32(at, bt, rt)
}
return
case Int64:
at := a.Int64s()
bt := b.Int64s()
switch {
case as && bs:
NeI64(at, bt, rt)
case as && !bs:
NeSVI64(at[0], bt, rt)
case !as && bs:
NeVSI64(at, bt[0], rt)
default:
NeI64(at, bt, rt)
}
return
case Uint:
at := a.Uints()
bt := b.Uints()
switch {
case as && bs:
NeU(at, bt, rt)
case as && !bs:
NeSVU(at[0], bt, rt)
case !as && bs:
NeVSU(at, bt[0], rt)
default:
NeU(at, bt, rt)
}
return
case Uint8:
at := a.Uint8s()
bt := b.Uint8s()
switch {
case as && bs:
NeU8(at, bt, rt)
case as && !bs:
NeSVU8(at[0], bt, rt)
case !as && bs:
NeVSU8(at, bt[0], rt)
default:
NeU8(at, bt, rt)
}
return
case Uint16:
at := a.Uint16s()
bt := b.Uint16s()
switch {
case as && bs:
NeU16(at, bt, rt)
case as && !bs:
NeSVU16(at[0], bt, rt)
case !as && bs:
NeVSU16(at, bt[0], rt)
default:
NeU16(at, bt, rt)
}
return
case Uint32:
at := a.Uint32s()
bt := b.Uint32s()
switch {
case as && bs:
NeU32(at, bt, rt)
case as && !bs:
NeSVU32(at[0], bt, rt)
case !as && bs:
NeVSU32(at, bt[0], rt)
default:
NeU32(at, bt, rt)
}
return
case Uint64:
at := a.Uint64s()
bt := b.Uint64s()
switch {
case as && bs:
NeU64(at, bt, rt)
case as && !bs:
NeSVU64(at[0], bt, rt)
case !as && bs:
NeVSU64(at, bt[0], rt)
default:
NeU64(at, bt, rt)
}
return
case Uintptr:
at := a.Uintptrs()
bt := b.Uintptrs()
switch {
case as && bs:
NeUintptr(at, bt, rt)
case as && !bs:
NeSVUintptr(at[0], bt, rt)
case !as && bs:
NeVSUintptr(at, bt[0], rt)
default:
NeUintptr(at, bt, rt)
}
return
case Float32:
at := a.Float32s()
bt := b.Float32s()
switch {
case as && bs:
NeF32(at, bt, rt)
case as && !bs:
NeSVF32(at[0], bt, rt)
case !as && bs:
NeVSF32(at, bt[0], rt)
default:
NeF32(at, bt, rt)
}
return
case Float64:
at := a.Float64s()
bt := b.Float64s()
switch {
case as && bs:
NeF64(at, bt, rt)
case as && !bs:
NeSVF64(at[0], bt, rt)
case !as && bs:
NeVSF64(at, bt[0], rt)
default:
NeF64(at, bt, rt)
}
return
case Complex64:
at := a.Complex64s()
bt := b.Complex64s()
switch {
case as && bs:
NeC64(at, bt, rt)
case as && !bs:
NeSVC64(at[0], bt, rt)
case !as && bs:
NeVSC64(at, bt[0], rt)
default:
NeC64(at, bt, rt)
}
return
case Complex128:
at := a.Complex128s()
bt := b.Complex128s()
switch {
case as && bs:
NeC128(at, bt, rt)
case as && !bs:
NeSVC128(at[0], bt, rt)
case !as && bs:
NeVSC128(at, bt[0], rt)
default:
NeC128(at, bt, rt)
}
return
case String:
at := a.Strings()
bt := b.Strings()
switch {
case as && bs:
NeStr(at, bt, rt)
case as && !bs:
NeSVStr(at[0], bt, rt)
case !as && bs:
NeVSStr(at, bt[0], rt)
default:
NeStr(at, bt, rt)
}
return
case UnsafePointer:
at := a.UnsafePointers()
bt := b.UnsafePointers()
switch {
case as && bs:
NeUnsafePointer(at, bt, rt)
case as && !bs:
NeSVUnsafePointer(at[0], bt, rt)
case !as && bs:
NeVSUnsafePointer(at, bt[0], rt)
default:
NeUnsafePointer(at, bt, rt)
}
return
default:
return errors.Errorf("Unsupported type %v for Ne", t)
}
}
func (e E) GtSame(t reflect.Type, a *storage.Header, b *storage.Header) (err error) {
as := isScalar(a, t)
bs := isScalar(b, t)
switch t {
case Int:
at := a.Ints()
bt := b.Ints()
switch {
case as && bs:
GtSameI(at, bt)
case as && !bs:
GtSameSVI(at[0], bt)
case !as && bs:
GtSameVSI(at, bt[0])
default:
GtSameI(at, bt)
}
return
case Int8:
at := a.Int8s()
bt := b.Int8s()
switch {
case as && bs:
GtSameI8(at, bt)
case as && !bs:
GtSameSVI8(at[0], bt)
case !as && bs:
GtSameVSI8(at, bt[0])
default:
GtSameI8(at, bt)
}
return
case Int16:
at := a.Int16s()
bt := b.Int16s()
switch {
case as && bs:
GtSameI16(at, bt)
case as && !bs:
GtSameSVI16(at[0], bt)
case !as && bs:
GtSameVSI16(at, bt[0])
default:
GtSameI16(at, bt)
}
return
case Int32:
at := a.Int32s()
bt := b.Int32s()
switch {
case as && bs:
GtSameI32(at, bt)
case as && !bs:
GtSameSVI32(at[0], bt)
case !as && bs:
GtSameVSI32(at, bt[0])
default:
GtSameI32(at, bt)
}
return
case Int64:
at := a.Int64s()
bt := b.Int64s()
switch {
case as && bs:
GtSameI64(at, bt)
case as && !bs:
GtSameSVI64(at[0], bt)
case !as && bs:
GtSameVSI64(at, bt[0])
default:
GtSameI64(at, bt)
}
return
case Uint:
at := a.Uints()
bt := b.Uints()
switch {
case as && bs:
GtSameU(at, bt)
case as && !bs:
GtSameSVU(at[0], bt)
case !as && bs:
GtSameVSU(at, bt[0])
default:
GtSameU(at, bt)
}
return
case Uint8:
at := a.Uint8s()
bt := b.Uint8s()
switch {
case as && bs:
GtSameU8(at, bt)
case as && !bs:
GtSameSVU8(at[0], bt)
case !as && bs:
GtSameVSU8(at, bt[0])
default:
GtSameU8(at, bt)
}
return
case Uint16:
at := a.Uint16s()
bt := b.Uint16s()
switch {
case as && bs:
GtSameU16(at, bt)
case as && !bs:
GtSameSVU16(at[0], bt)
case !as && bs:
GtSameVSU16(at, bt[0])
default:
GtSameU16(at, bt)
}
return
case Uint32:
at := a.Uint32s()
bt := b.Uint32s()
switch {
case as && bs:
GtSameU32(at, bt)
case as && !bs:
GtSameSVU32(at[0], bt)
case !as && bs:
GtSameVSU32(at, bt[0])
default:
GtSameU32(at, bt)
}
return
case Uint64:
at := a.Uint64s()
bt := b.Uint64s()
switch {
case as && bs:
GtSameU64(at, bt)
case as && !bs:
GtSameSVU64(at[0], bt)
case !as && bs:
GtSameVSU64(at, bt[0])
default:
GtSameU64(at, bt)
}
return
case Float32:
at := a.Float32s()
bt := b.Float32s()
switch {
case as && bs:
GtSameF32(at, bt)
case as && !bs:
GtSameSVF32(at[0], bt)
case !as && bs:
GtSameVSF32(at, bt[0])
default:
GtSameF32(at, bt)
}
return
case Float64:
at := a.Float64s()
bt := b.Float64s()
switch {
case as && bs:
GtSameF64(at, bt)
case as && !bs:
GtSameSVF64(at[0], bt)
case !as && bs:
GtSameVSF64(at, bt[0])
default:
GtSameF64(at, bt)
}
return
case String:
at := a.Strings()
bt := b.Strings()
switch {
case as && bs:
GtSameStr(at, bt)
case as && !bs:
GtSameSVStr(at[0], bt)
case !as && bs:
GtSameVSStr(at, bt[0])
default:
GtSameStr(at, bt)
}
return
default:
return errors.Errorf("Unsupported type %v for Gt", t)
}
}
func (e E) GteSame(t reflect.Type, a *storage.Header, b *storage.Header) (err error) {
as := isScalar(a, t)
bs := isScalar(b, t)
switch t {
case Int:
at := a.Ints()
bt := b.Ints()
switch {
case as && bs:
GteSameI(at, bt)
case as && !bs:
GteSameSVI(at[0], bt)
case !as && bs:
GteSameVSI(at, bt[0])
default:
GteSameI(at, bt)
}
return
case Int8:
at := a.Int8s()
bt := b.Int8s()
switch {
case as && bs:
GteSameI8(at, bt)
case as && !bs:
GteSameSVI8(at[0], bt)
case !as && bs:
GteSameVSI8(at, bt[0])
default:
GteSameI8(at, bt)
}
return
case Int16:
at := a.Int16s()
bt := b.Int16s()
switch {
case as && bs:
GteSameI16(at, bt)
case as && !bs:
GteSameSVI16(at[0], bt)
case !as && bs:
GteSameVSI16(at, bt[0])
default:
GteSameI16(at, bt)
}
return
case Int32:
at := a.Int32s()
bt := b.Int32s()
switch {
case as && bs:
GteSameI32(at, bt)
case as && !bs:
GteSameSVI32(at[0], bt)
case !as && bs:
GteSameVSI32(at, bt[0])
default:
GteSameI32(at, bt)
}
return
case Int64:
at := a.Int64s()
bt := b.Int64s()
switch {
case as && bs:
GteSameI64(at, bt)
case as && !bs:
GteSameSVI64(at[0], bt)
case !as && bs:
GteSameVSI64(at, bt[0])
default:
GteSameI64(at, bt)
}
return
case Uint:
at := a.Uints()
bt := b.Uints()
switch {
case as && bs:
GteSameU(at, bt)
case as && !bs:
GteSameSVU(at[0], bt)
case !as && bs:
GteSameVSU(at, bt[0])
default:
GteSameU(at, bt)
}
return
case Uint8:
at := a.Uint8s()
bt := b.Uint8s()
switch {
case as && bs:
GteSameU8(at, bt)
case as && !bs:
GteSameSVU8(at[0], bt)
case !as && bs:
GteSameVSU8(at, bt[0])
default:
GteSameU8(at, bt)
}
return
case Uint16:
at := a.Uint16s()
bt := b.Uint16s()
switch {
case as && bs:
GteSameU16(at, bt)
case as && !bs:
GteSameSVU16(at[0], bt)
case !as && bs:
GteSameVSU16(at, bt[0])
default:
GteSameU16(at, bt)
}
return
case Uint32:
at := a.Uint32s()
bt := b.Uint32s()
switch {
case as && bs:
GteSameU32(at, bt)
case as && !bs:
GteSameSVU32(at[0], bt)
case !as && bs:
GteSameVSU32(at, bt[0])
default:
GteSameU32(at, bt)
}
return
case Uint64:
at := a.Uint64s()
bt := b.Uint64s()
switch {
case as && bs:
GteSameU64(at, bt)
case as && !bs:
GteSameSVU64(at[0], bt)
case !as && bs:
GteSameVSU64(at, bt[0])
default:
GteSameU64(at, bt)
}
return
case Float32:
at := a.Float32s()
bt := b.Float32s()
switch {
case as && bs:
GteSameF32(at, bt)
case as && !bs:
GteSameSVF32(at[0], bt)
case !as && bs:
GteSameVSF32(at, bt[0])
default:
GteSameF32(at, bt)
}
return
case Float64:
at := a.Float64s()
bt := b.Float64s()
switch {
case as && bs:
GteSameF64(at, bt)
case as && !bs:
GteSameSVF64(at[0], bt)
case !as && bs:
GteSameVSF64(at, bt[0])
default:
GteSameF64(at, bt)
}
return
case String:
at := a.Strings()
bt := b.Strings()
switch {
case as && bs:
GteSameStr(at, bt)
case as && !bs:
GteSameSVStr(at[0], bt)
case !as && bs:
GteSameVSStr(at, bt[0])
default:
GteSameStr(at, bt)
}
return
default:
return errors.Errorf("Unsupported type %v for Gte", t)
}
}
func (e E) LtSame(t reflect.Type, a *storage.Header, b *storage.Header) (err error) {
as := isScalar(a, t)
bs := isScalar(b, t)
switch t {
case Int:
at := a.Ints()
bt := b.Ints()
switch {
case as && bs:
LtSameI(at, bt)
case as && !bs:
LtSameSVI(at[0], bt)
case !as && bs:
LtSameVSI(at, bt[0])
default:
LtSameI(at, bt)
}
return
case Int8:
at := a.Int8s()
bt := b.Int8s()
switch {
case as && bs:
LtSameI8(at, bt)
case as && !bs:
LtSameSVI8(at[0], bt)
case !as && bs:
LtSameVSI8(at, bt[0])
default:
LtSameI8(at, bt)
}
return
case Int16:
at := a.Int16s()
bt := b.Int16s()
switch {
case as && bs:
LtSameI16(at, bt)
case as && !bs:
LtSameSVI16(at[0], bt)
case !as && bs:
LtSameVSI16(at, bt[0])
default:
LtSameI16(at, bt)
}
return
case Int32:
at := a.Int32s()
bt := b.Int32s()
switch {
case as && bs:
LtSameI32(at, bt)
case as && !bs:
LtSameSVI32(at[0], bt)
case !as && bs:
LtSameVSI32(at, bt[0])
default:
LtSameI32(at, bt)
}
return
case Int64:
at := a.Int64s()
bt := b.Int64s()
switch {
case as && bs:
LtSameI64(at, bt)
case as && !bs:
LtSameSVI64(at[0], bt)
case !as && bs:
LtSameVSI64(at, bt[0])
default:
LtSameI64(at, bt)
}
return
case Uint:
at := a.Uints()
bt := b.Uints()
switch {
case as && bs:
LtSameU(at, bt)
case as && !bs:
LtSameSVU(at[0], bt)
case !as && bs:
LtSameVSU(at, bt[0])
default:
LtSameU(at, bt)
}
return
case Uint8:
at := a.Uint8s()
bt := b.Uint8s()
switch {
case as && bs:
LtSameU8(at, bt)
case as && !bs:
LtSameSVU8(at[0], bt)
case !as && bs:
LtSameVSU8(at, bt[0])
default:
LtSameU8(at, bt)
}
return
case Uint16:
at := a.Uint16s()
bt := b.Uint16s()
switch {
case as && bs:
LtSameU16(at, bt)
case as && !bs:
LtSameSVU16(at[0], bt)
case !as && bs:
LtSameVSU16(at, bt[0])
default:
LtSameU16(at, bt)
}
return
case Uint32:
at := a.Uint32s()
bt := b.Uint32s()
switch {
case as && bs:
LtSameU32(at, bt)
case as && !bs:
LtSameSVU32(at[0], bt)
case !as && bs:
LtSameVSU32(at, bt[0])
default:
LtSameU32(at, bt)
}
return
case Uint64:
at := a.Uint64s()
bt := b.Uint64s()
switch {
case as && bs:
LtSameU64(at, bt)
case as && !bs:
LtSameSVU64(at[0], bt)
case !as && bs:
LtSameVSU64(at, bt[0])
default:
LtSameU64(at, bt)
}
return
case Float32:
at := a.Float32s()
bt := b.Float32s()
switch {
case as && bs:
LtSameF32(at, bt)
case as && !bs:
LtSameSVF32(at[0], bt)
case !as && bs:
LtSameVSF32(at, bt[0])
default:
LtSameF32(at, bt)
}
return
case Float64:
at := a.Float64s()
bt := b.Float64s()
switch {
case as && bs:
LtSameF64(at, bt)
case as && !bs:
LtSameSVF64(at[0], bt)
case !as && bs:
LtSameVSF64(at, bt[0])
default:
LtSameF64(at, bt)
}
return
case String:
at := a.Strings()
bt := b.Strings()
switch {
case as && bs:
LtSameStr(at, bt)
case as && !bs:
LtSameSVStr(at[0], bt)
case !as && bs:
LtSameVSStr(at, bt[0])
default:
LtSameStr(at, bt)
}
return
default:
return errors.Errorf("Unsupported type %v for Lt", t)
}
}
func (e E) LteSame(t reflect.Type, a *storage.Header, b *storage.Header) (err error) {
as := isScalar(a, t)
bs := isScalar(b, t)
switch t {
case Int:
at := a.Ints()
bt := b.Ints()
switch {
case as && bs:
LteSameI(at, bt)
case as && !bs:
LteSameSVI(at[0], bt)
case !as && bs:
LteSameVSI(at, bt[0])
default:
LteSameI(at, bt)
}
return
case Int8:
at := a.Int8s()
bt := b.Int8s()
switch {
case as && bs:
LteSameI8(at, bt)
case as && !bs:
LteSameSVI8(at[0], bt)
case !as && bs:
LteSameVSI8(at, bt[0])
default:
LteSameI8(at, bt)
}
return
case Int16:
at := a.Int16s()
bt := b.Int16s()
switch {
case as && bs:
LteSameI16(at, bt)
case as && !bs:
LteSameSVI16(at[0], bt)
case !as && bs:
LteSameVSI16(at, bt[0])
default:
LteSameI16(at, bt)
}
return
case Int32:
at := a.Int32s()
bt := b.Int32s()
switch {
case as && bs:
LteSameI32(at, bt)
case as && !bs:
LteSameSVI32(at[0], bt)
case !as && bs:
LteSameVSI32(at, bt[0])
default:
LteSameI32(at, bt)
}
return
case Int64:
at := a.Int64s()
bt := b.Int64s()
switch {
case as && bs:
LteSameI64(at, bt)
case as && !bs:
LteSameSVI64(at[0], bt)
case !as && bs:
LteSameVSI64(at, bt[0])
default:
LteSameI64(at, bt)
}
return
case Uint:
at := a.Uints()
bt := b.Uints()
switch {
case as && bs:
LteSameU(at, bt)
case as && !bs:
LteSameSVU(at[0], bt)
case !as && bs:
LteSameVSU(at, bt[0])
default:
LteSameU(at, bt)
}
return
case Uint8:
at := a.Uint8s()
bt := b.Uint8s()
switch {
case as && bs:
LteSameU8(at, bt)
case as && !bs:
LteSameSVU8(at[0], bt)
case !as && bs:
LteSameVSU8(at, bt[0])
default:
LteSameU8(at, bt)
}
return
case Uint16:
at := a.Uint16s()
bt := b.Uint16s()
switch {
case as && bs:
LteSameU16(at, bt)
case as && !bs:
LteSameSVU16(at[0], bt)
case !as && bs:
LteSameVSU16(at, bt[0])
default:
LteSameU16(at, bt)
}
return
case Uint32:
at := a.Uint32s()
bt := b.Uint32s()
switch {
case as && bs:
LteSameU32(at, bt)
case as && !bs:
LteSameSVU32(at[0], bt)
case !as && bs:
LteSameVSU32(at, bt[0])
default:
LteSameU32(at, bt)
}
return
case Uint64:
at := a.Uint64s()
bt := b.Uint64s()
switch {
case as && bs:
LteSameU64(at, bt)
case as && !bs:
LteSameSVU64(at[0], bt)
case !as && bs:
LteSameVSU64(at, bt[0])
default:
LteSameU64(at, bt)
}
return
case Float32:
at := a.Float32s()
bt := b.Float32s()
switch {
case as && bs:
LteSameF32(at, bt)
case as && !bs:
LteSameSVF32(at[0], bt)
case !as && bs:
LteSameVSF32(at, bt[0])
default:
LteSameF32(at, bt)
}
return
case Float64:
at := a.Float64s()
bt := b.Float64s()
switch {
case as && bs:
LteSameF64(at, bt)
case as && !bs:
LteSameSVF64(at[0], bt)
case !as && bs:
LteSameVSF64(at, bt[0])
default:
LteSameF64(at, bt)
}
return
case String:
at := a.Strings()
bt := b.Strings()
switch {
case as && bs:
LteSameStr(at, bt)
case as && !bs:
LteSameSVStr(at[0], bt)
case !as && bs:
LteSameVSStr(at, bt[0])
default:
LteSameStr(at, bt)
}
return
default:
return errors.Errorf("Unsupported type %v for Lte", t)
}
}
func (e E) EqSame(t reflect.Type, a *storage.Header, b *storage.Header) (err error) {
as := isScalar(a, t)
bs := isScalar(b, t)
switch t {
case Bool:
at := a.Bools()
bt := b.Bools()
switch {
case as && bs:
EqSameB(at, bt)
case as && !bs:
EqSameSVB(at[0], bt)
case !as && bs:
EqSameVSB(at, bt[0])
default:
EqSameB(at, bt)
}
return
case Int:
at := a.Ints()
bt := b.Ints()
switch {
case as && bs:
EqSameI(at, bt)
case as && !bs:
EqSameSVI(at[0], bt)
case !as && bs:
EqSameVSI(at, bt[0])
default:
EqSameI(at, bt)
}
return
case Int8:
at := a.Int8s()
bt := b.Int8s()
switch {
case as && bs:
EqSameI8(at, bt)
case as && !bs:
EqSameSVI8(at[0], bt)
case !as && bs:
EqSameVSI8(at, bt[0])
default:
EqSameI8(at, bt)
}
return
case Int16:
at := a.Int16s()
bt := b.Int16s()
switch {
case as && bs:
EqSameI16(at, bt)
case as && !bs:
EqSameSVI16(at[0], bt)
case !as && bs:
EqSameVSI16(at, bt[0])
default:
EqSameI16(at, bt)
}
return
case Int32:
at := a.Int32s()
bt := b.Int32s()
switch {
case as && bs:
EqSameI32(at, bt)
case as && !bs:
EqSameSVI32(at[0], bt)
case !as && bs:
EqSameVSI32(at, bt[0])
default:
EqSameI32(at, bt)
}
return
case Int64:
at := a.Int64s()
bt := b.Int64s()
switch {
case as && bs:
EqSameI64(at, bt)
case as && !bs:
EqSameSVI64(at[0], bt)
case !as && bs:
EqSameVSI64(at, bt[0])
default:
EqSameI64(at, bt)
}
return
case Uint:
at := a.Uints()
bt := b.Uints()
switch {
case as && bs:
EqSameU(at, bt)
case as && !bs:
EqSameSVU(at[0], bt)
case !as && bs:
EqSameVSU(at, bt[0])
default:
EqSameU(at, bt)
}
return
case Uint8:
at := a.Uint8s()
bt := b.Uint8s()
switch {
case as && bs:
EqSameU8(at, bt)
case as && !bs:
EqSameSVU8(at[0], bt)
case !as && bs:
EqSameVSU8(at, bt[0])
default:
EqSameU8(at, bt)
}
return
case Uint16:
at := a.Uint16s()
bt := b.Uint16s()
switch {
case as && bs:
EqSameU16(at, bt)
case as && !bs:
EqSameSVU16(at[0], bt)
case !as && bs:
EqSameVSU16(at, bt[0])
default:
EqSameU16(at, bt)
}
return
case Uint32:
at := a.Uint32s()
bt := b.Uint32s()
switch {
case as && bs:
EqSameU32(at, bt)
case as && !bs:
EqSameSVU32(at[0], bt)
case !as && bs:
EqSameVSU32(at, bt[0])
default:
EqSameU32(at, bt)
}
return
case Uint64:
at := a.Uint64s()
bt := b.Uint64s()
switch {
case as && bs:
EqSameU64(at, bt)
case as && !bs:
EqSameSVU64(at[0], bt)
case !as && bs:
EqSameVSU64(at, bt[0])
default:
EqSameU64(at, bt)
}
return
case Uintptr:
at := a.Uintptrs()
bt := b.Uintptrs()
switch {
case as && bs:
EqSameUintptr(at, bt)
case as && !bs:
EqSameSVUintptr(at[0], bt)
case !as && bs:
EqSameVSUintptr(at, bt[0])
default:
EqSameUintptr(at, bt)
}
return
case Float32:
at := a.Float32s()
bt := b.Float32s()
switch {
case as && bs:
EqSameF32(at, bt)
case as && !bs:
EqSameSVF32(at[0], bt)
case !as && bs:
EqSameVSF32(at, bt[0])
default:
EqSameF32(at, bt)
}
return
case Float64:
at := a.Float64s()
bt := b.Float64s()
switch {
case as && bs:
EqSameF64(at, bt)
case as && !bs:
EqSameSVF64(at[0], bt)
case !as && bs:
EqSameVSF64(at, bt[0])
default:
EqSameF64(at, bt)
}
return
case Complex64:
at := a.Complex64s()
bt := b.Complex64s()
switch {
case as && bs:
EqSameC64(at, bt)
case as && !bs:
EqSameSVC64(at[0], bt)
case !as && bs:
EqSameVSC64(at, bt[0])
default:
EqSameC64(at, bt)
}
return
case Complex128:
at := a.Complex128s()
bt := b.Complex128s()
switch {
case as && bs:
EqSameC128(at, bt)
case as && !bs:
EqSameSVC128(at[0], bt)
case !as && bs:
EqSameVSC128(at, bt[0])
default:
EqSameC128(at, bt)
}
return
case String:
at := a.Strings()
bt := b.Strings()
switch {
case as && bs:
EqSameStr(at, bt)
case as && !bs:
EqSameSVStr(at[0], bt)
case !as && bs:
EqSameVSStr(at, bt[0])
default:
EqSameStr(at, bt)
}
return
default:
return errors.Errorf("Unsupported type %v for Eq", t)
}
}
func (e E) NeSame(t reflect.Type, a *storage.Header, b *storage.Header) (err error) {
as := isScalar(a, t)
bs := isScalar(b, t)
switch t {
case Bool:
at := a.Bools()
bt := b.Bools()
switch {
case as && bs:
NeSameB(at, bt)
case as && !bs:
NeSameSVB(at[0], bt)
case !as && bs:
NeSameVSB(at, bt[0])
default:
NeSameB(at, bt)
}
return
case Int:
at := a.Ints()
bt := b.Ints()
switch {
case as && bs:
NeSameI(at, bt)
case as && !bs:
NeSameSVI(at[0], bt)
case !as && bs:
NeSameVSI(at, bt[0])
default:
NeSameI(at, bt)
}
return
case Int8:
at := a.Int8s()
bt := b.Int8s()
switch {
case as && bs:
NeSameI8(at, bt)
case as && !bs:
NeSameSVI8(at[0], bt)
case !as && bs:
NeSameVSI8(at, bt[0])
default:
NeSameI8(at, bt)
}
return
case Int16:
at := a.Int16s()
bt := b.Int16s()
switch {
case as && bs:
NeSameI16(at, bt)
case as && !bs:
NeSameSVI16(at[0], bt)
case !as && bs:
NeSameVSI16(at, bt[0])
default:
NeSameI16(at, bt)
}
return
case Int32:
at := a.Int32s()
bt := b.Int32s()
switch {
case as && bs:
NeSameI32(at, bt)
case as && !bs:
NeSameSVI32(at[0], bt)
case !as && bs:
NeSameVSI32(at, bt[0])
default:
NeSameI32(at, bt)
}
return
case Int64:
at := a.Int64s()
bt := b.Int64s()
switch {
case as && bs:
NeSameI64(at, bt)
case as && !bs:
NeSameSVI64(at[0], bt)
case !as && bs:
NeSameVSI64(at, bt[0])
default:
NeSameI64(at, bt)
}
return
case Uint:
at := a.Uints()
bt := b.Uints()
switch {
case as && bs:
NeSameU(at, bt)
case as && !bs:
NeSameSVU(at[0], bt)
case !as && bs:
NeSameVSU(at, bt[0])
default:
NeSameU(at, bt)
}
return
case Uint8:
at := a.Uint8s()
bt := b.Uint8s()
switch {
case as && bs:
NeSameU8(at, bt)
case as && !bs:
NeSameSVU8(at[0], bt)
case !as && bs:
NeSameVSU8(at, bt[0])
default:
NeSameU8(at, bt)
}
return
case Uint16:
at := a.Uint16s()
bt := b.Uint16s()
switch {
case as && bs:
NeSameU16(at, bt)
case as && !bs:
NeSameSVU16(at[0], bt)
case !as && bs:
NeSameVSU16(at, bt[0])
default:
NeSameU16(at, bt)
}
return
case Uint32:
at := a.Uint32s()
bt := b.Uint32s()
switch {
case as && bs:
NeSameU32(at, bt)
case as && !bs:
NeSameSVU32(at[0], bt)
case !as && bs:
NeSameVSU32(at, bt[0])
default:
NeSameU32(at, bt)
}
return
case Uint64:
at := a.Uint64s()
bt := b.Uint64s()
switch {
case as && bs:
NeSameU64(at, bt)
case as && !bs:
NeSameSVU64(at[0], bt)
case !as && bs:
NeSameVSU64(at, bt[0])
default:
NeSameU64(at, bt)
}
return
case Uintptr:
at := a.Uintptrs()
bt := b.Uintptrs()
switch {
case as && bs:
NeSameUintptr(at, bt)
case as && !bs:
NeSameSVUintptr(at[0], bt)
case !as && bs:
NeSameVSUintptr(at, bt[0])
default:
NeSameUintptr(at, bt)
}
return
case Float32:
at := a.Float32s()
bt := b.Float32s()
switch {
case as && bs:
NeSameF32(at, bt)
case as && !bs:
NeSameSVF32(at[0], bt)
case !as && bs:
NeSameVSF32(at, bt[0])
default:
NeSameF32(at, bt)
}
return
case Float64:
at := a.Float64s()
bt := b.Float64s()
switch {
case as && bs:
NeSameF64(at, bt)
case as && !bs:
NeSameSVF64(at[0], bt)
case !as && bs:
NeSameVSF64(at, bt[0])
default:
NeSameF64(at, bt)
}
return
case Complex64:
at := a.Complex64s()
bt := b.Complex64s()
switch {
case as && bs:
NeSameC64(at, bt)
case as && !bs:
NeSameSVC64(at[0], bt)
case !as && bs:
NeSameVSC64(at, bt[0])
default:
NeSameC64(at, bt)
}
return
case Complex128:
at := a.Complex128s()
bt := b.Complex128s()
switch {
case as && bs:
NeSameC128(at, bt)
case as && !bs:
NeSameSVC128(at[0], bt)
case !as && bs:
NeSameVSC128(at, bt[0])
default:
NeSameC128(at, bt)
}
return
case String:
at := a.Strings()
bt := b.Strings()
switch {
case as && bs:
NeSameStr(at, bt)
case as && !bs:
NeSameSVStr(at[0], bt)
case !as && bs:
NeSameVSStr(at, bt[0])
default:
NeSameStr(at, bt)
}
return
default:
return errors.Errorf("Unsupported type %v for Ne", t)
}
}
func (e E) GtIter(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header, ait Iterator, bit Iterator, rit Iterator) (err error) {
as := isScalar(a, t)
bs := isScalar(b, t)
rs := isScalar(retVal, Bool)
rt := retVal.Bools()
if ((as && !bs) || (bs && !as)) && rs {
return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t))
}
switch t {
case Int:
at := a.Ints()
bt := b.Ints()
switch {
case as && bs:
GtI(at, bt, rt)
return
case as && !bs:
return GtIterSVI(at[0], bt, rt, bit, rit)
case !as && bs:
return GtIterVSI(at, bt[0], rt, ait, rit)
default:
return GtIterI(at, bt, rt, ait, bit, rit)
}
case Int8:
at := a.Int8s()
bt := b.Int8s()
switch {
case as && bs:
GtI8(at, bt, rt)
return
case as && !bs:
return GtIterSVI8(at[0], bt, rt, bit, rit)
case !as && bs:
return GtIterVSI8(at, bt[0], rt, ait, rit)
default:
return GtIterI8(at, bt, rt, ait, bit, rit)
}
case Int16:
at := a.Int16s()
bt := b.Int16s()
switch {
case as && bs:
GtI16(at, bt, rt)
return
case as && !bs:
return GtIterSVI16(at[0], bt, rt, bit, rit)
case !as && bs:
return GtIterVSI16(at, bt[0], rt, ait, rit)
default:
return GtIterI16(at, bt, rt, ait, bit, rit)
}
case Int32:
at := a.Int32s()
bt := b.Int32s()
switch {
case as && bs:
GtI32(at, bt, rt)
return
case as && !bs:
return GtIterSVI32(at[0], bt, rt, bit, rit)
case !as && bs:
return GtIterVSI32(at, bt[0], rt, ait, rit)
default:
return GtIterI32(at, bt, rt, ait, bit, rit)
}
case Int64:
at := a.Int64s()
bt := b.Int64s()
switch {
case as && bs:
GtI64(at, bt, rt)
return
case as && !bs:
return GtIterSVI64(at[0], bt, rt, bit, rit)
case !as && bs:
return GtIterVSI64(at, bt[0], rt, ait, rit)
default:
return GtIterI64(at, bt, rt, ait, bit, rit)
}
case Uint:
at := a.Uints()
bt := b.Uints()
switch {
case as && bs:
GtU(at, bt, rt)
return
case as && !bs:
return GtIterSVU(at[0], bt, rt, bit, rit)
case !as && bs:
return GtIterVSU(at, bt[0], rt, ait, rit)
default:
return GtIterU(at, bt, rt, ait, bit, rit)
}
case Uint8:
at := a.Uint8s()
bt := b.Uint8s()
switch {
case as && bs:
GtU8(at, bt, rt)
return
case as && !bs:
return GtIterSVU8(at[0], bt, rt, bit, rit)
case !as && bs:
return GtIterVSU8(at, bt[0], rt, ait, rit)
default:
return GtIterU8(at, bt, rt, ait, bit, rit)
}
case Uint16:
at := a.Uint16s()
bt := b.Uint16s()
switch {
case as && bs:
GtU16(at, bt, rt)
return
case as && !bs:
return GtIterSVU16(at[0], bt, rt, bit, rit)
case !as && bs:
return GtIterVSU16(at, bt[0], rt, ait, rit)
default:
return GtIterU16(at, bt, rt, ait, bit, rit)
}
case Uint32:
at := a.Uint32s()
bt := b.Uint32s()
switch {
case as && bs:
GtU32(at, bt, rt)
return
case as && !bs:
return GtIterSVU32(at[0], bt, rt, bit, rit)
case !as && bs:
return GtIterVSU32(at, bt[0], rt, ait, rit)
default:
return GtIterU32(at, bt, rt, ait, bit, rit)
}
case Uint64:
at := a.Uint64s()
bt := b.Uint64s()
switch {
case as && bs:
GtU64(at, bt, rt)
return
case as && !bs:
return GtIterSVU64(at[0], bt, rt, bit, rit)
case !as && bs:
return GtIterVSU64(at, bt[0], rt, ait, rit)
default:
return GtIterU64(at, bt, rt, ait, bit, rit)
}
case Float32:
at := a.Float32s()
bt := b.Float32s()
switch {
case as && bs:
GtF32(at, bt, rt)
return
case as && !bs:
return GtIterSVF32(at[0], bt, rt, bit, rit)
case !as && bs:
return GtIterVSF32(at, bt[0], rt, ait, rit)
default:
return GtIterF32(at, bt, rt, ait, bit, rit)
}
case Float64:
at := a.Float64s()
bt := b.Float64s()
switch {
case as && bs:
GtF64(at, bt, rt)
return
case as && !bs:
return GtIterSVF64(at[0], bt, rt, bit, rit)
case !as && bs:
return GtIterVSF64(at, bt[0], rt, ait, rit)
default:
return GtIterF64(at, bt, rt, ait, bit, rit)
}
case String:
at := a.Strings()
bt := b.Strings()
switch {
case as && bs:
GtStr(at, bt, rt)
return
case as && !bs:
return GtIterSVStr(at[0], bt, rt, bit, rit)
case !as && bs:
return GtIterVSStr(at, bt[0], rt, ait, rit)
default:
return GtIterStr(at, bt, rt, ait, bit, rit)
}
default:
return errors.Errorf("Unsupported type %v for Gt", t)
}
}
func (e E) GteIter(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header, ait Iterator, bit Iterator, rit Iterator) (err error) {
as := isScalar(a, t)
bs := isScalar(b, t)
rs := isScalar(retVal, Bool)
rt := retVal.Bools()
if ((as && !bs) || (bs && !as)) && rs {
return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t))
}
switch t {
case Int:
at := a.Ints()
bt := b.Ints()
switch {
case as && bs:
GteI(at, bt, rt)
return
case as && !bs:
return GteIterSVI(at[0], bt, rt, bit, rit)
case !as && bs:
return GteIterVSI(at, bt[0], rt, ait, rit)
default:
return GteIterI(at, bt, rt, ait, bit, rit)
}
case Int8:
at := a.Int8s()
bt := b.Int8s()
switch {
case as && bs:
GteI8(at, bt, rt)
return
case as && !bs:
return GteIterSVI8(at[0], bt, rt, bit, rit)
case !as && bs:
return GteIterVSI8(at, bt[0], rt, ait, rit)
default:
return GteIterI8(at, bt, rt, ait, bit, rit)
}
case Int16:
at := a.Int16s()
bt := b.Int16s()
switch {
case as && bs:
GteI16(at, bt, rt)
return
case as && !bs:
return GteIterSVI16(at[0], bt, rt, bit, rit)
case !as && bs:
return GteIterVSI16(at, bt[0], rt, ait, rit)
default:
return GteIterI16(at, bt, rt, ait, bit, rit)
}
case Int32:
at := a.Int32s()
bt := b.Int32s()
switch {
case as && bs:
GteI32(at, bt, rt)
return
case as && !bs:
return GteIterSVI32(at[0], bt, rt, bit, rit)
case !as && bs:
return GteIterVSI32(at, bt[0], rt, ait, rit)
default:
return GteIterI32(at, bt, rt, ait, bit, rit)
}
case Int64:
at := a.Int64s()
bt := b.Int64s()
switch {
case as && bs:
GteI64(at, bt, rt)
return
case as && !bs:
return GteIterSVI64(at[0], bt, rt, bit, rit)
case !as && bs:
return GteIterVSI64(at, bt[0], rt, ait, rit)
default:
return GteIterI64(at, bt, rt, ait, bit, rit)
}
case Uint:
at := a.Uints()
bt := b.Uints()
switch {
case as && bs:
GteU(at, bt, rt)
return
case as && !bs:
return GteIterSVU(at[0], bt, rt, bit, rit)
case !as && bs:
return GteIterVSU(at, bt[0], rt, ait, rit)
default:
return GteIterU(at, bt, rt, ait, bit, rit)
}
case Uint8:
at := a.Uint8s()
bt := b.Uint8s()
switch {
case as && bs:
GteU8(at, bt, rt)
return
case as && !bs:
return GteIterSVU8(at[0], bt, rt, bit, rit)
case !as && bs:
return GteIterVSU8(at, bt[0], rt, ait, rit)
default:
return GteIterU8(at, bt, rt, ait, bit, rit)
}
case Uint16:
at := a.Uint16s()
bt := b.Uint16s()
switch {
case as && bs:
GteU16(at, bt, rt)
return
case as && !bs:
return GteIterSVU16(at[0], bt, rt, bit, rit)
case !as && bs:
return GteIterVSU16(at, bt[0], rt, ait, rit)
default:
return GteIterU16(at, bt, rt, ait, bit, rit)
}
case Uint32:
at := a.Uint32s()
bt := b.Uint32s()
switch {
case as && bs:
GteU32(at, bt, rt)
return
case as && !bs:
return GteIterSVU32(at[0], bt, rt, bit, rit)
case !as && bs:
return GteIterVSU32(at, bt[0], rt, ait, rit)
default:
return GteIterU32(at, bt, rt, ait, bit, rit)
}
case Uint64:
at := a.Uint64s()
bt := b.Uint64s()
switch {
case as && bs:
GteU64(at, bt, rt)
return
case as && !bs:
return GteIterSVU64(at[0], bt, rt, bit, rit)
case !as && bs:
return GteIterVSU64(at, bt[0], rt, ait, rit)
default:
return GteIterU64(at, bt, rt, ait, bit, rit)
}
case Float32:
at := a.Float32s()
bt := b.Float32s()
switch {
case as && bs:
GteF32(at, bt, rt)
return
case as && !bs:
return GteIterSVF32(at[0], bt, rt, bit, rit)
case !as && bs:
return GteIterVSF32(at, bt[0], rt, ait, rit)
default:
return GteIterF32(at, bt, rt, ait, bit, rit)
}
case Float64:
at := a.Float64s()
bt := b.Float64s()
switch {
case as && bs:
GteF64(at, bt, rt)
return
case as && !bs:
return GteIterSVF64(at[0], bt, rt, bit, rit)
case !as && bs:
return GteIterVSF64(at, bt[0], rt, ait, rit)
default:
return GteIterF64(at, bt, rt, ait, bit, rit)
}
case String:
at := a.Strings()
bt := b.Strings()
switch {
case as && bs:
GteStr(at, bt, rt)
return
case as && !bs:
return GteIterSVStr(at[0], bt, rt, bit, rit)
case !as && bs:
return GteIterVSStr(at, bt[0], rt, ait, rit)
default:
return GteIterStr(at, bt, rt, ait, bit, rit)
}
default:
return errors.Errorf("Unsupported type %v for Gte", t)
}
}
func (e E) LtIter(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header, ait Iterator, bit Iterator, rit Iterator) (err error) {
as := isScalar(a, t)
bs := isScalar(b, t)
rs := isScalar(retVal, Bool)
rt := retVal.Bools()
if ((as && !bs) || (bs && !as)) && rs {
return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t))
}
switch t {
case Int:
at := a.Ints()
bt := b.Ints()
switch {
case as && bs:
LtI(at, bt, rt)
return
case as && !bs:
return LtIterSVI(at[0], bt, rt, bit, rit)
case !as && bs:
return LtIterVSI(at, bt[0], rt, ait, rit)
default:
return LtIterI(at, bt, rt, ait, bit, rit)
}
case Int8:
at := a.Int8s()
bt := b.Int8s()
switch {
case as && bs:
LtI8(at, bt, rt)
return
case as && !bs:
return LtIterSVI8(at[0], bt, rt, bit, rit)
case !as && bs:
return LtIterVSI8(at, bt[0], rt, ait, rit)
default:
return LtIterI8(at, bt, rt, ait, bit, rit)
}
case Int16:
at := a.Int16s()
bt := b.Int16s()
switch {
case as && bs:
LtI16(at, bt, rt)
return
case as && !bs:
return LtIterSVI16(at[0], bt, rt, bit, rit)
case !as && bs:
return LtIterVSI16(at, bt[0], rt, ait, rit)
default:
return LtIterI16(at, bt, rt, ait, bit, rit)
}
case Int32:
at := a.Int32s()
bt := b.Int32s()
switch {
case as && bs:
LtI32(at, bt, rt)
return
case as && !bs:
return LtIterSVI32(at[0], bt, rt, bit, rit)
case !as && bs:
return LtIterVSI32(at, bt[0], rt, ait, rit)
default:
return LtIterI32(at, bt, rt, ait, bit, rit)
}
case Int64:
at := a.Int64s()
bt := b.Int64s()
switch {
case as && bs:
LtI64(at, bt, rt)
return
case as && !bs:
return LtIterSVI64(at[0], bt, rt, bit, rit)
case !as && bs:
return LtIterVSI64(at, bt[0], rt, ait, rit)
default:
return LtIterI64(at, bt, rt, ait, bit, rit)
}
case Uint:
at := a.Uints()
bt := b.Uints()
switch {
case as && bs:
LtU(at, bt, rt)
return
case as && !bs:
return LtIterSVU(at[0], bt, rt, bit, rit)
case !as && bs:
return LtIterVSU(at, bt[0], rt, ait, rit)
default:
return LtIterU(at, bt, rt, ait, bit, rit)
}
case Uint8:
at := a.Uint8s()
bt := b.Uint8s()
switch {
case as && bs:
LtU8(at, bt, rt)
return
case as && !bs:
return LtIterSVU8(at[0], bt, rt, bit, rit)
case !as && bs:
return LtIterVSU8(at, bt[0], rt, ait, rit)
default:
return LtIterU8(at, bt, rt, ait, bit, rit)
}
case Uint16:
at := a.Uint16s()
bt := b.Uint16s()
switch {
case as && bs:
LtU16(at, bt, rt)
return
case as && !bs:
return LtIterSVU16(at[0], bt, rt, bit, rit)
case !as && bs:
return LtIterVSU16(at, bt[0], rt, ait, rit)
default:
return LtIterU16(at, bt, rt, ait, bit, rit)
}
case Uint32:
at := a.Uint32s()
bt := b.Uint32s()
switch {
case as && bs:
LtU32(at, bt, rt)
return
case as && !bs:
return LtIterSVU32(at[0], bt, rt, bit, rit)
case !as && bs:
return LtIterVSU32(at, bt[0], rt, ait, rit)
default:
return LtIterU32(at, bt, rt, ait, bit, rit)
}
case Uint64:
at := a.Uint64s()
bt := b.Uint64s()
switch {
case as && bs:
LtU64(at, bt, rt)
return
case as && !bs:
return LtIterSVU64(at[0], bt, rt, bit, rit)
case !as && bs:
return LtIterVSU64(at, bt[0], rt, ait, rit)
default:
return LtIterU64(at, bt, rt, ait, bit, rit)
}
case Float32:
at := a.Float32s()
bt := b.Float32s()
switch {
case as && bs:
LtF32(at, bt, rt)
return
case as && !bs:
return LtIterSVF32(at[0], bt, rt, bit, rit)
case !as && bs:
return LtIterVSF32(at, bt[0], rt, ait, rit)
default:
return LtIterF32(at, bt, rt, ait, bit, rit)
}
case Float64:
at := a.Float64s()
bt := b.Float64s()
switch {
case as && bs:
LtF64(at, bt, rt)
return
case as && !bs:
return LtIterSVF64(at[0], bt, rt, bit, rit)
case !as && bs:
return LtIterVSF64(at, bt[0], rt, ait, rit)
default:
return LtIterF64(at, bt, rt, ait, bit, rit)
}
case String:
at := a.Strings()
bt := b.Strings()
switch {
case as && bs:
LtStr(at, bt, rt)
return
case as && !bs:
return LtIterSVStr(at[0], bt, rt, bit, rit)
case !as && bs:
return LtIterVSStr(at, bt[0], rt, ait, rit)
default:
return LtIterStr(at, bt, rt, ait, bit, rit)
}
default:
return errors.Errorf("Unsupported type %v for Lt", t)
}
}
func (e E) LteIter(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header, ait Iterator, bit Iterator, rit Iterator) (err error) {
as := isScalar(a, t)
bs := isScalar(b, t)
rs := isScalar(retVal, Bool)
rt := retVal.Bools()
if ((as && !bs) || (bs && !as)) && rs {
return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t))
}
switch t {
case Int:
at := a.Ints()
bt := b.Ints()
switch {
case as && bs:
LteI(at, bt, rt)
return
case as && !bs:
return LteIterSVI(at[0], bt, rt, bit, rit)
case !as && bs:
return LteIterVSI(at, bt[0], rt, ait, rit)
default:
return LteIterI(at, bt, rt, ait, bit, rit)
}
case Int8:
at := a.Int8s()
bt := b.Int8s()
switch {
case as && bs:
LteI8(at, bt, rt)
return
case as && !bs:
return LteIterSVI8(at[0], bt, rt, bit, rit)
case !as && bs:
return LteIterVSI8(at, bt[0], rt, ait, rit)
default:
return LteIterI8(at, bt, rt, ait, bit, rit)
}
case Int16:
at := a.Int16s()
bt := b.Int16s()
switch {
case as && bs:
LteI16(at, bt, rt)
return
case as && !bs:
return LteIterSVI16(at[0], bt, rt, bit, rit)
case !as && bs:
return LteIterVSI16(at, bt[0], rt, ait, rit)
default:
return LteIterI16(at, bt, rt, ait, bit, rit)
}
case Int32:
at := a.Int32s()
bt := b.Int32s()
switch {
case as && bs:
LteI32(at, bt, rt)
return
case as && !bs:
return LteIterSVI32(at[0], bt, rt, bit, rit)
case !as && bs:
return LteIterVSI32(at, bt[0], rt, ait, rit)
default:
return LteIterI32(at, bt, rt, ait, bit, rit)
}
case Int64:
at := a.Int64s()
bt := b.Int64s()
switch {
case as && bs:
LteI64(at, bt, rt)
return
case as && !bs:
return LteIterSVI64(at[0], bt, rt, bit, rit)
case !as && bs:
return LteIterVSI64(at, bt[0], rt, ait, rit)
default:
return LteIterI64(at, bt, rt, ait, bit, rit)
}
case Uint:
at := a.Uints()
bt := b.Uints()
switch {
case as && bs:
LteU(at, bt, rt)
return
case as && !bs:
return LteIterSVU(at[0], bt, rt, bit, rit)
case !as && bs:
return LteIterVSU(at, bt[0], rt, ait, rit)
default:
return LteIterU(at, bt, rt, ait, bit, rit)
}
case Uint8:
at := a.Uint8s()
bt := b.Uint8s()
switch {
case as && bs:
LteU8(at, bt, rt)
return
case as && !bs:
return LteIterSVU8(at[0], bt, rt, bit, rit)
case !as && bs:
return LteIterVSU8(at, bt[0], rt, ait, rit)
default:
return LteIterU8(at, bt, rt, ait, bit, rit)
}
case Uint16:
at := a.Uint16s()
bt := b.Uint16s()
switch {
case as && bs:
LteU16(at, bt, rt)
return
case as && !bs:
return LteIterSVU16(at[0], bt, rt, bit, rit)
case !as && bs:
return LteIterVSU16(at, bt[0], rt, ait, rit)
default:
return LteIterU16(at, bt, rt, ait, bit, rit)
}
case Uint32:
at := a.Uint32s()
bt := b.Uint32s()
switch {
case as && bs:
LteU32(at, bt, rt)
return
case as && !bs:
return LteIterSVU32(at[0], bt, rt, bit, rit)
case !as && bs:
return LteIterVSU32(at, bt[0], rt, ait, rit)
default:
return LteIterU32(at, bt, rt, ait, bit, rit)
}
case Uint64:
at := a.Uint64s()
bt := b.Uint64s()
switch {
case as && bs:
LteU64(at, bt, rt)
return
case as && !bs:
return LteIterSVU64(at[0], bt, rt, bit, rit)
case !as && bs:
return LteIterVSU64(at, bt[0], rt, ait, rit)
default:
return LteIterU64(at, bt, rt, ait, bit, rit)
}
case Float32:
at := a.Float32s()
bt := b.Float32s()
switch {
case as && bs:
LteF32(at, bt, rt)
return
case as && !bs:
return LteIterSVF32(at[0], bt, rt, bit, rit)
case !as && bs:
return LteIterVSF32(at, bt[0], rt, ait, rit)
default:
return LteIterF32(at, bt, rt, ait, bit, rit)
}
case Float64:
at := a.Float64s()
bt := b.Float64s()
switch {
case as && bs:
LteF64(at, bt, rt)
return
case as && !bs:
return LteIterSVF64(at[0], bt, rt, bit, rit)
case !as && bs:
return LteIterVSF64(at, bt[0], rt, ait, rit)
default:
return LteIterF64(at, bt, rt, ait, bit, rit)
}
case String:
at := a.Strings()
bt := b.Strings()
switch {
case as && bs:
LteStr(at, bt, rt)
return
case as && !bs:
return LteIterSVStr(at[0], bt, rt, bit, rit)
case !as && bs:
return LteIterVSStr(at, bt[0], rt, ait, rit)
default:
return LteIterStr(at, bt, rt, ait, bit, rit)
}
default:
return errors.Errorf("Unsupported type %v for Lte", t)
}
}
func (e E) EqIter(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header, ait Iterator, bit Iterator, rit Iterator) (err error) {
as := isScalar(a, t)
bs := isScalar(b, t)
rs := isScalar(retVal, Bool)
rt := retVal.Bools()
if ((as && !bs) || (bs && !as)) && rs {
return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t))
}
switch t {
case Bool:
at := a.Bools()
bt := b.Bools()
switch {
case as && bs:
EqB(at, bt, rt)
return
case as && !bs:
return EqIterSVB(at[0], bt, rt, bit, rit)
case !as && bs:
return EqIterVSB(at, bt[0], rt, ait, rit)
default:
return EqIterB(at, bt, rt, ait, bit, rit)
}
case Int:
at := a.Ints()
bt := b.Ints()
switch {
case as && bs:
EqI(at, bt, rt)
return
case as && !bs:
return EqIterSVI(at[0], bt, rt, bit, rit)
case !as && bs:
return EqIterVSI(at, bt[0], rt, ait, rit)
default:
return EqIterI(at, bt, rt, ait, bit, rit)
}
case Int8:
at := a.Int8s()
bt := b.Int8s()
switch {
case as && bs:
EqI8(at, bt, rt)
return
case as && !bs:
return EqIterSVI8(at[0], bt, rt, bit, rit)
case !as && bs:
return EqIterVSI8(at, bt[0], rt, ait, rit)
default:
return EqIterI8(at, bt, rt, ait, bit, rit)
}
case Int16:
at := a.Int16s()
bt := b.Int16s()
switch {
case as && bs:
EqI16(at, bt, rt)
return
case as && !bs:
return EqIterSVI16(at[0], bt, rt, bit, rit)
case !as && bs:
return EqIterVSI16(at, bt[0], rt, ait, rit)
default:
return EqIterI16(at, bt, rt, ait, bit, rit)
}
case Int32:
at := a.Int32s()
bt := b.Int32s()
switch {
case as && bs:
EqI32(at, bt, rt)
return
case as && !bs:
return EqIterSVI32(at[0], bt, rt, bit, rit)
case !as && bs:
return EqIterVSI32(at, bt[0], rt, ait, rit)
default:
return EqIterI32(at, bt, rt, ait, bit, rit)
}
case Int64:
at := a.Int64s()
bt := b.Int64s()
switch {
case as && bs:
EqI64(at, bt, rt)
return
case as && !bs:
return EqIterSVI64(at[0], bt, rt, bit, rit)
case !as && bs:
return EqIterVSI64(at, bt[0], rt, ait, rit)
default:
return EqIterI64(at, bt, rt, ait, bit, rit)
}
case Uint:
at := a.Uints()
bt := b.Uints()
switch {
case as && bs:
EqU(at, bt, rt)
return
case as && !bs:
return EqIterSVU(at[0], bt, rt, bit, rit)
case !as && bs:
return EqIterVSU(at, bt[0], rt, ait, rit)
default:
return EqIterU(at, bt, rt, ait, bit, rit)
}
case Uint8:
at := a.Uint8s()
bt := b.Uint8s()
switch {
case as && bs:
EqU8(at, bt, rt)
return
case as && !bs:
return EqIterSVU8(at[0], bt, rt, bit, rit)
case !as && bs:
return EqIterVSU8(at, bt[0], rt, ait, rit)
default:
return EqIterU8(at, bt, rt, ait, bit, rit)
}
case Uint16:
at := a.Uint16s()
bt := b.Uint16s()
switch {
case as && bs:
EqU16(at, bt, rt)
return
case as && !bs:
return EqIterSVU16(at[0], bt, rt, bit, rit)
case !as && bs:
return EqIterVSU16(at, bt[0], rt, ait, rit)
default:
return EqIterU16(at, bt, rt, ait, bit, rit)
}
case Uint32:
at := a.Uint32s()
bt := b.Uint32s()
switch {
case as && bs:
EqU32(at, bt, rt)
return
case as && !bs:
return EqIterSVU32(at[0], bt, rt, bit, rit)
case !as && bs:
return EqIterVSU32(at, bt[0], rt, ait, rit)
default:
return EqIterU32(at, bt, rt, ait, bit, rit)
}
case Uint64:
at := a.Uint64s()
bt := b.Uint64s()
switch {
case as && bs:
EqU64(at, bt, rt)
return
case as && !bs:
return EqIterSVU64(at[0], bt, rt, bit, rit)
case !as && bs:
return EqIterVSU64(at, bt[0], rt, ait, rit)
default:
return EqIterU64(at, bt, rt, ait, bit, rit)
}
case Uintptr:
at := a.Uintptrs()
bt := b.Uintptrs()
switch {
case as && bs:
EqUintptr(at, bt, rt)
return
case as && !bs:
return EqIterSVUintptr(at[0], bt, rt, bit, rit)
case !as && bs:
return EqIterVSUintptr(at, bt[0], rt, ait, rit)
default:
return EqIterUintptr(at, bt, rt, ait, bit, rit)
}
case Float32:
at := a.Float32s()
bt := b.Float32s()
switch {
case as && bs:
EqF32(at, bt, rt)
return
case as && !bs:
return EqIterSVF32(at[0], bt, rt, bit, rit)
case !as && bs:
return EqIterVSF32(at, bt[0], rt, ait, rit)
default:
return EqIterF32(at, bt, rt, ait, bit, rit)
}
case Float64:
at := a.Float64s()
bt := b.Float64s()
switch {
case as && bs:
EqF64(at, bt, rt)
return
case as && !bs:
return EqIterSVF64(at[0], bt, rt, bit, rit)
case !as && bs:
return EqIterVSF64(at, bt[0], rt, ait, rit)
default:
return EqIterF64(at, bt, rt, ait, bit, rit)
}
case Complex64:
at := a.Complex64s()
bt := b.Complex64s()
switch {
case as && bs:
EqC64(at, bt, rt)
return
case as && !bs:
return EqIterSVC64(at[0], bt, rt, bit, rit)
case !as && bs:
return EqIterVSC64(at, bt[0], rt, ait, rit)
default:
return EqIterC64(at, bt, rt, ait, bit, rit)
}
case Complex128:
at := a.Complex128s()
bt := b.Complex128s()
switch {
case as && bs:
EqC128(at, bt, rt)
return
case as && !bs:
return EqIterSVC128(at[0], bt, rt, bit, rit)
case !as && bs:
return EqIterVSC128(at, bt[0], rt, ait, rit)
default:
return EqIterC128(at, bt, rt, ait, bit, rit)
}
case String:
at := a.Strings()
bt := b.Strings()
switch {
case as && bs:
EqStr(at, bt, rt)
return
case as && !bs:
return EqIterSVStr(at[0], bt, rt, bit, rit)
case !as && bs:
return EqIterVSStr(at, bt[0], rt, ait, rit)
default:
return EqIterStr(at, bt, rt, ait, bit, rit)
}
case UnsafePointer:
at := a.UnsafePointers()
bt := b.UnsafePointers()
switch {
case as && bs:
EqUnsafePointer(at, bt, rt)
return
case as && !bs:
return EqIterSVUnsafePointer(at[0], bt, rt, bit, rit)
case !as && bs:
return EqIterVSUnsafePointer(at, bt[0], rt, ait, rit)
default:
return EqIterUnsafePointer(at, bt, rt, ait, bit, rit)
}
default:
return errors.Errorf("Unsupported type %v for Eq", t)
}
}
func (e E) NeIter(t reflect.Type, a *storage.Header, b *storage.Header, retVal *storage.Header, ait Iterator, bit Iterator, rit Iterator) (err error) {
as := isScalar(a, t)
bs := isScalar(b, t)
rs := isScalar(retVal, Bool)
rt := retVal.Bools()
if ((as && !bs) || (bs && !as)) && rs {
return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t))
}
switch t {
case Bool:
at := a.Bools()
bt := b.Bools()
switch {
case as && bs:
NeB(at, bt, rt)
return
case as && !bs:
return NeIterSVB(at[0], bt, rt, bit, rit)
case !as && bs:
return NeIterVSB(at, bt[0], rt, ait, rit)
default:
return NeIterB(at, bt, rt, ait, bit, rit)
}
case Int:
at := a.Ints()
bt := b.Ints()
switch {
case as && bs:
NeI(at, bt, rt)
return
case as && !bs:
return NeIterSVI(at[0], bt, rt, bit, rit)
case !as && bs:
return NeIterVSI(at, bt[0], rt, ait, rit)
default:
return NeIterI(at, bt, rt, ait, bit, rit)
}
case Int8:
at := a.Int8s()
bt := b.Int8s()
switch {
case as && bs:
NeI8(at, bt, rt)
return
case as && !bs:
return NeIterSVI8(at[0], bt, rt, bit, rit)
case !as && bs:
return NeIterVSI8(at, bt[0], rt, ait, rit)
default:
return NeIterI8(at, bt, rt, ait, bit, rit)
}
case Int16:
at := a.Int16s()
bt := b.Int16s()
switch {
case as && bs:
NeI16(at, bt, rt)
return
case as && !bs:
return NeIterSVI16(at[0], bt, rt, bit, rit)
case !as && bs:
return NeIterVSI16(at, bt[0], rt, ait, rit)
default:
return NeIterI16(at, bt, rt, ait, bit, rit)
}
case Int32:
at := a.Int32s()
bt := b.Int32s()
switch {
case as && bs:
NeI32(at, bt, rt)
return
case as && !bs:
return NeIterSVI32(at[0], bt, rt, bit, rit)
case !as && bs:
return NeIterVSI32(at, bt[0], rt, ait, rit)
default:
return NeIterI32(at, bt, rt, ait, bit, rit)
}
case Int64:
at := a.Int64s()
bt := b.Int64s()
switch {
case as && bs:
NeI64(at, bt, rt)
return
case as && !bs:
return NeIterSVI64(at[0], bt, rt, bit, rit)
case !as && bs:
return NeIterVSI64(at, bt[0], rt, ait, rit)
default:
return NeIterI64(at, bt, rt, ait, bit, rit)
}
case Uint:
at := a.Uints()
bt := b.Uints()
switch {
case as && bs:
NeU(at, bt, rt)
return
case as && !bs:
return NeIterSVU(at[0], bt, rt, bit, rit)
case !as && bs:
return NeIterVSU(at, bt[0], rt, ait, rit)
default:
return NeIterU(at, bt, rt, ait, bit, rit)
}
case Uint8:
at := a.Uint8s()
bt := b.Uint8s()
switch {
case as && bs:
NeU8(at, bt, rt)
return
case as && !bs:
return NeIterSVU8(at[0], bt, rt, bit, rit)
case !as && bs:
return NeIterVSU8(at, bt[0], rt, ait, rit)
default:
return NeIterU8(at, bt, rt, ait, bit, rit)
}
case Uint16:
at := a.Uint16s()
bt := b.Uint16s()
switch {
case as && bs:
NeU16(at, bt, rt)
return
case as && !bs:
return NeIterSVU16(at[0], bt, rt, bit, rit)
case !as && bs:
return NeIterVSU16(at, bt[0], rt, ait, rit)
default:
return NeIterU16(at, bt, rt, ait, bit, rit)
}
case Uint32:
at := a.Uint32s()
bt := b.Uint32s()
switch {
case as && bs:
NeU32(at, bt, rt)
return
case as && !bs:
return NeIterSVU32(at[0], bt, rt, bit, rit)
case !as && bs:
return NeIterVSU32(at, bt[0], rt, ait, rit)
default:
return NeIterU32(at, bt, rt, ait, bit, rit)
}
case Uint64:
at := a.Uint64s()
bt := b.Uint64s()
switch {
case as && bs:
NeU64(at, bt, rt)
return
case as && !bs:
return NeIterSVU64(at[0], bt, rt, bit, rit)
case !as && bs:
return NeIterVSU64(at, bt[0], rt, ait, rit)
default:
return NeIterU64(at, bt, rt, ait, bit, rit)
}
case Uintptr:
at := a.Uintptrs()
bt := b.Uintptrs()
switch {
case as && bs:
NeUintptr(at, bt, rt)
return
case as && !bs:
return NeIterSVUintptr(at[0], bt, rt, bit, rit)
case !as && bs:
return NeIterVSUintptr(at, bt[0], rt, ait, rit)
default:
return NeIterUintptr(at, bt, rt, ait, bit, rit)
}
case Float32:
at := a.Float32s()
bt := b.Float32s()
switch {
case as && bs:
NeF32(at, bt, rt)
return
case as && !bs:
return NeIterSVF32(at[0], bt, rt, bit, rit)
case !as && bs:
return NeIterVSF32(at, bt[0], rt, ait, rit)
default:
return NeIterF32(at, bt, rt, ait, bit, rit)
}
case Float64:
at := a.Float64s()
bt := b.Float64s()
switch {
case as && bs:
NeF64(at, bt, rt)
return
case as && !bs:
return NeIterSVF64(at[0], bt, rt, bit, rit)
case !as && bs:
return NeIterVSF64(at, bt[0], rt, ait, rit)
default:
return NeIterF64(at, bt, rt, ait, bit, rit)
}
case Complex64:
at := a.Complex64s()
bt := b.Complex64s()
switch {
case as && bs:
NeC64(at, bt, rt)
return
case as && !bs:
return NeIterSVC64(at[0], bt, rt, bit, rit)
case !as && bs:
return NeIterVSC64(at, bt[0], rt, ait, rit)
default:
return NeIterC64(at, bt, rt, ait, bit, rit)
}
case Complex128:
at := a.Complex128s()
bt := b.Complex128s()
switch {
case as && bs:
NeC128(at, bt, rt)
return
case as && !bs:
return NeIterSVC128(at[0], bt, rt, bit, rit)
case !as && bs:
return NeIterVSC128(at, bt[0], rt, ait, rit)
default:
return NeIterC128(at, bt, rt, ait, bit, rit)
}
case String:
at := a.Strings()
bt := b.Strings()
switch {
case as && bs:
NeStr(at, bt, rt)
return
case as && !bs:
return NeIterSVStr(at[0], bt, rt, bit, rit)
case !as && bs:
return NeIterVSStr(at, bt[0], rt, ait, rit)
default:
return NeIterStr(at, bt, rt, ait, bit, rit)
}
case UnsafePointer:
at := a.UnsafePointers()
bt := b.UnsafePointers()
switch {
case as && bs:
NeUnsafePointer(at, bt, rt)
return
case as && !bs:
return NeIterSVUnsafePointer(at[0], bt, rt, bit, rit)
case !as && bs:
return NeIterVSUnsafePointer(at, bt[0], rt, ait, rit)
default:
return NeIterUnsafePointer(at, bt, rt, ait, bit, rit)
}
default:
return errors.Errorf("Unsupported type %v for Ne", t)
}
}
func (e E) GtSameIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) {
as := isScalar(a, t)
bs := isScalar(b, t)
switch t {
case Int:
at := a.Ints()
bt := b.Ints()
switch {
case as && bs:
GtSameI(at, bt)
case as && !bs:
GtSameIterSVI(at[0], bt, bit)
case !as && bs:
GtSameIterVSI(at, bt[0], ait)
default:
GtSameIterI(at, bt, ait, bit)
}
return
case Int8:
at := a.Int8s()
bt := b.Int8s()
switch {
case as && bs:
GtSameI8(at, bt)
case as && !bs:
GtSameIterSVI8(at[0], bt, bit)
case !as && bs:
GtSameIterVSI8(at, bt[0], ait)
default:
GtSameIterI8(at, bt, ait, bit)
}
return
case Int16:
at := a.Int16s()
bt := b.Int16s()
switch {
case as && bs:
GtSameI16(at, bt)
case as && !bs:
GtSameIterSVI16(at[0], bt, bit)
case !as && bs:
GtSameIterVSI16(at, bt[0], ait)
default:
GtSameIterI16(at, bt, ait, bit)
}
return
case Int32:
at := a.Int32s()
bt := b.Int32s()
switch {
case as && bs:
GtSameI32(at, bt)
case as && !bs:
GtSameIterSVI32(at[0], bt, bit)
case !as && bs:
GtSameIterVSI32(at, bt[0], ait)
default:
GtSameIterI32(at, bt, ait, bit)
}
return
case Int64:
at := a.Int64s()
bt := b.Int64s()
switch {
case as && bs:
GtSameI64(at, bt)
case as && !bs:
GtSameIterSVI64(at[0], bt, bit)
case !as && bs:
GtSameIterVSI64(at, bt[0], ait)
default:
GtSameIterI64(at, bt, ait, bit)
}
return
case Uint:
at := a.Uints()
bt := b.Uints()
switch {
case as && bs:
GtSameU(at, bt)
case as && !bs:
GtSameIterSVU(at[0], bt, bit)
case !as && bs:
GtSameIterVSU(at, bt[0], ait)
default:
GtSameIterU(at, bt, ait, bit)
}
return
case Uint8:
at := a.Uint8s()
bt := b.Uint8s()
switch {
case as && bs:
GtSameU8(at, bt)
case as && !bs:
GtSameIterSVU8(at[0], bt, bit)
case !as && bs:
GtSameIterVSU8(at, bt[0], ait)
default:
GtSameIterU8(at, bt, ait, bit)
}
return
case Uint16:
at := a.Uint16s()
bt := b.Uint16s()
switch {
case as && bs:
GtSameU16(at, bt)
case as && !bs:
GtSameIterSVU16(at[0], bt, bit)
case !as && bs:
GtSameIterVSU16(at, bt[0], ait)
default:
GtSameIterU16(at, bt, ait, bit)
}
return
case Uint32:
at := a.Uint32s()
bt := b.Uint32s()
switch {
case as && bs:
GtSameU32(at, bt)
case as && !bs:
GtSameIterSVU32(at[0], bt, bit)
case !as && bs:
GtSameIterVSU32(at, bt[0], ait)
default:
GtSameIterU32(at, bt, ait, bit)
}
return
case Uint64:
at := a.Uint64s()
bt := b.Uint64s()
switch {
case as && bs:
GtSameU64(at, bt)
case as && !bs:
GtSameIterSVU64(at[0], bt, bit)
case !as && bs:
GtSameIterVSU64(at, bt[0], ait)
default:
GtSameIterU64(at, bt, ait, bit)
}
return
case Float32:
at := a.Float32s()
bt := b.Float32s()
switch {
case as && bs:
GtSameF32(at, bt)
case as && !bs:
GtSameIterSVF32(at[0], bt, bit)
case !as && bs:
GtSameIterVSF32(at, bt[0], ait)
default:
GtSameIterF32(at, bt, ait, bit)
}
return
case Float64:
at := a.Float64s()
bt := b.Float64s()
switch {
case as && bs:
GtSameF64(at, bt)
case as && !bs:
GtSameIterSVF64(at[0], bt, bit)
case !as && bs:
GtSameIterVSF64(at, bt[0], ait)
default:
GtSameIterF64(at, bt, ait, bit)
}
return
case String:
at := a.Strings()
bt := b.Strings()
switch {
case as && bs:
GtSameStr(at, bt)
case as && !bs:
GtSameIterSVStr(at[0], bt, bit)
case !as && bs:
GtSameIterVSStr(at, bt[0], ait)
default:
GtSameIterStr(at, bt, ait, bit)
}
return
default:
return errors.Errorf("Unsupported type %v for Gt", t)
}
}
func (e E) GteSameIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) {
as := isScalar(a, t)
bs := isScalar(b, t)
switch t {
case Int:
at := a.Ints()
bt := b.Ints()
switch {
case as && bs:
GteSameI(at, bt)
case as && !bs:
GteSameIterSVI(at[0], bt, bit)
case !as && bs:
GteSameIterVSI(at, bt[0], ait)
default:
GteSameIterI(at, bt, ait, bit)
}
return
case Int8:
at := a.Int8s()
bt := b.Int8s()
switch {
case as && bs:
GteSameI8(at, bt)
case as && !bs:
GteSameIterSVI8(at[0], bt, bit)
case !as && bs:
GteSameIterVSI8(at, bt[0], ait)
default:
GteSameIterI8(at, bt, ait, bit)
}
return
case Int16:
at := a.Int16s()
bt := b.Int16s()
switch {
case as && bs:
GteSameI16(at, bt)
case as && !bs:
GteSameIterSVI16(at[0], bt, bit)
case !as && bs:
GteSameIterVSI16(at, bt[0], ait)
default:
GteSameIterI16(at, bt, ait, bit)
}
return
case Int32:
at := a.Int32s()
bt := b.Int32s()
switch {
case as && bs:
GteSameI32(at, bt)
case as && !bs:
GteSameIterSVI32(at[0], bt, bit)
case !as && bs:
GteSameIterVSI32(at, bt[0], ait)
default:
GteSameIterI32(at, bt, ait, bit)
}
return
case Int64:
at := a.Int64s()
bt := b.Int64s()
switch {
case as && bs:
GteSameI64(at, bt)
case as && !bs:
GteSameIterSVI64(at[0], bt, bit)
case !as && bs:
GteSameIterVSI64(at, bt[0], ait)
default:
GteSameIterI64(at, bt, ait, bit)
}
return
case Uint:
at := a.Uints()
bt := b.Uints()
switch {
case as && bs:
GteSameU(at, bt)
case as && !bs:
GteSameIterSVU(at[0], bt, bit)
case !as && bs:
GteSameIterVSU(at, bt[0], ait)
default:
GteSameIterU(at, bt, ait, bit)
}
return
case Uint8:
at := a.Uint8s()
bt := b.Uint8s()
switch {
case as && bs:
GteSameU8(at, bt)
case as && !bs:
GteSameIterSVU8(at[0], bt, bit)
case !as && bs:
GteSameIterVSU8(at, bt[0], ait)
default:
GteSameIterU8(at, bt, ait, bit)
}
return
case Uint16:
at := a.Uint16s()
bt := b.Uint16s()
switch {
case as && bs:
GteSameU16(at, bt)
case as && !bs:
GteSameIterSVU16(at[0], bt, bit)
case !as && bs:
GteSameIterVSU16(at, bt[0], ait)
default:
GteSameIterU16(at, bt, ait, bit)
}
return
case Uint32:
at := a.Uint32s()
bt := b.Uint32s()
switch {
case as && bs:
GteSameU32(at, bt)
case as && !bs:
GteSameIterSVU32(at[0], bt, bit)
case !as && bs:
GteSameIterVSU32(at, bt[0], ait)
default:
GteSameIterU32(at, bt, ait, bit)
}
return
case Uint64:
at := a.Uint64s()
bt := b.Uint64s()
switch {
case as && bs:
GteSameU64(at, bt)
case as && !bs:
GteSameIterSVU64(at[0], bt, bit)
case !as && bs:
GteSameIterVSU64(at, bt[0], ait)
default:
GteSameIterU64(at, bt, ait, bit)
}
return
case Float32:
at := a.Float32s()
bt := b.Float32s()
switch {
case as && bs:
GteSameF32(at, bt)
case as && !bs:
GteSameIterSVF32(at[0], bt, bit)
case !as && bs:
GteSameIterVSF32(at, bt[0], ait)
default:
GteSameIterF32(at, bt, ait, bit)
}
return
case Float64:
at := a.Float64s()
bt := b.Float64s()
switch {
case as && bs:
GteSameF64(at, bt)
case as && !bs:
GteSameIterSVF64(at[0], bt, bit)
case !as && bs:
GteSameIterVSF64(at, bt[0], ait)
default:
GteSameIterF64(at, bt, ait, bit)
}
return
case String:
at := a.Strings()
bt := b.Strings()
switch {
case as && bs:
GteSameStr(at, bt)
case as && !bs:
GteSameIterSVStr(at[0], bt, bit)
case !as && bs:
GteSameIterVSStr(at, bt[0], ait)
default:
GteSameIterStr(at, bt, ait, bit)
}
return
default:
return errors.Errorf("Unsupported type %v for Gte", t)
}
}
func (e E) LtSameIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) {
as := isScalar(a, t)
bs := isScalar(b, t)
switch t {
case Int:
at := a.Ints()
bt := b.Ints()
switch {
case as && bs:
LtSameI(at, bt)
case as && !bs:
LtSameIterSVI(at[0], bt, bit)
case !as && bs:
LtSameIterVSI(at, bt[0], ait)
default:
LtSameIterI(at, bt, ait, bit)
}
return
case Int8:
at := a.Int8s()
bt := b.Int8s()
switch {
case as && bs:
LtSameI8(at, bt)
case as && !bs:
LtSameIterSVI8(at[0], bt, bit)
case !as && bs:
LtSameIterVSI8(at, bt[0], ait)
default:
LtSameIterI8(at, bt, ait, bit)
}
return
case Int16:
at := a.Int16s()
bt := b.Int16s()
switch {
case as && bs:
LtSameI16(at, bt)
case as && !bs:
LtSameIterSVI16(at[0], bt, bit)
case !as && bs:
LtSameIterVSI16(at, bt[0], ait)
default:
LtSameIterI16(at, bt, ait, bit)
}
return
case Int32:
at := a.Int32s()
bt := b.Int32s()
switch {
case as && bs:
LtSameI32(at, bt)
case as && !bs:
LtSameIterSVI32(at[0], bt, bit)
case !as && bs:
LtSameIterVSI32(at, bt[0], ait)
default:
LtSameIterI32(at, bt, ait, bit)
}
return
case Int64:
at := a.Int64s()
bt := b.Int64s()
switch {
case as && bs:
LtSameI64(at, bt)
case as && !bs:
LtSameIterSVI64(at[0], bt, bit)
case !as && bs:
LtSameIterVSI64(at, bt[0], ait)
default:
LtSameIterI64(at, bt, ait, bit)
}
return
case Uint:
at := a.Uints()
bt := b.Uints()
switch {
case as && bs:
LtSameU(at, bt)
case as && !bs:
LtSameIterSVU(at[0], bt, bit)
case !as && bs:
LtSameIterVSU(at, bt[0], ait)
default:
LtSameIterU(at, bt, ait, bit)
}
return
case Uint8:
at := a.Uint8s()
bt := b.Uint8s()
switch {
case as && bs:
LtSameU8(at, bt)
case as && !bs:
LtSameIterSVU8(at[0], bt, bit)
case !as && bs:
LtSameIterVSU8(at, bt[0], ait)
default:
LtSameIterU8(at, bt, ait, bit)
}
return
case Uint16:
at := a.Uint16s()
bt := b.Uint16s()
switch {
case as && bs:
LtSameU16(at, bt)
case as && !bs:
LtSameIterSVU16(at[0], bt, bit)
case !as && bs:
LtSameIterVSU16(at, bt[0], ait)
default:
LtSameIterU16(at, bt, ait, bit)
}
return
case Uint32:
at := a.Uint32s()
bt := b.Uint32s()
switch {
case as && bs:
LtSameU32(at, bt)
case as && !bs:
LtSameIterSVU32(at[0], bt, bit)
case !as && bs:
LtSameIterVSU32(at, bt[0], ait)
default:
LtSameIterU32(at, bt, ait, bit)
}
return
case Uint64:
at := a.Uint64s()
bt := b.Uint64s()
switch {
case as && bs:
LtSameU64(at, bt)
case as && !bs:
LtSameIterSVU64(at[0], bt, bit)
case !as && bs:
LtSameIterVSU64(at, bt[0], ait)
default:
LtSameIterU64(at, bt, ait, bit)
}
return
case Float32:
at := a.Float32s()
bt := b.Float32s()
switch {
case as && bs:
LtSameF32(at, bt)
case as && !bs:
LtSameIterSVF32(at[0], bt, bit)
case !as && bs:
LtSameIterVSF32(at, bt[0], ait)
default:
LtSameIterF32(at, bt, ait, bit)
}
return
case Float64:
at := a.Float64s()
bt := b.Float64s()
switch {
case as && bs:
LtSameF64(at, bt)
case as && !bs:
LtSameIterSVF64(at[0], bt, bit)
case !as && bs:
LtSameIterVSF64(at, bt[0], ait)
default:
LtSameIterF64(at, bt, ait, bit)
}
return
case String:
at := a.Strings()
bt := b.Strings()
switch {
case as && bs:
LtSameStr(at, bt)
case as && !bs:
LtSameIterSVStr(at[0], bt, bit)
case !as && bs:
LtSameIterVSStr(at, bt[0], ait)
default:
LtSameIterStr(at, bt, ait, bit)
}
return
default:
return errors.Errorf("Unsupported type %v for Lt", t)
}
}
func (e E) LteSameIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) {
as := isScalar(a, t)
bs := isScalar(b, t)
switch t {
case Int:
at := a.Ints()
bt := b.Ints()
switch {
case as && bs:
LteSameI(at, bt)
case as && !bs:
LteSameIterSVI(at[0], bt, bit)
case !as && bs:
LteSameIterVSI(at, bt[0], ait)
default:
LteSameIterI(at, bt, ait, bit)
}
return
case Int8:
at := a.Int8s()
bt := b.Int8s()
switch {
case as && bs:
LteSameI8(at, bt)
case as && !bs:
LteSameIterSVI8(at[0], bt, bit)
case !as && bs:
LteSameIterVSI8(at, bt[0], ait)
default:
LteSameIterI8(at, bt, ait, bit)
}
return
case Int16:
at := a.Int16s()
bt := b.Int16s()
switch {
case as && bs:
LteSameI16(at, bt)
case as && !bs:
LteSameIterSVI16(at[0], bt, bit)
case !as && bs:
LteSameIterVSI16(at, bt[0], ait)
default:
LteSameIterI16(at, bt, ait, bit)
}
return
case Int32:
at := a.Int32s()
bt := b.Int32s()
switch {
case as && bs:
LteSameI32(at, bt)
case as && !bs:
LteSameIterSVI32(at[0], bt, bit)
case !as && bs:
LteSameIterVSI32(at, bt[0], ait)
default:
LteSameIterI32(at, bt, ait, bit)
}
return
case Int64:
at := a.Int64s()
bt := b.Int64s()
switch {
case as && bs:
LteSameI64(at, bt)
case as && !bs:
LteSameIterSVI64(at[0], bt, bit)
case !as && bs:
LteSameIterVSI64(at, bt[0], ait)
default:
LteSameIterI64(at, bt, ait, bit)
}
return
case Uint:
at := a.Uints()
bt := b.Uints()
switch {
case as && bs:
LteSameU(at, bt)
case as && !bs:
LteSameIterSVU(at[0], bt, bit)
case !as && bs:
LteSameIterVSU(at, bt[0], ait)
default:
LteSameIterU(at, bt, ait, bit)
}
return
case Uint8:
at := a.Uint8s()
bt := b.Uint8s()
switch {
case as && bs:
LteSameU8(at, bt)
case as && !bs:
LteSameIterSVU8(at[0], bt, bit)
case !as && bs:
LteSameIterVSU8(at, bt[0], ait)
default:
LteSameIterU8(at, bt, ait, bit)
}
return
case Uint16:
at := a.Uint16s()
bt := b.Uint16s()
switch {
case as && bs:
LteSameU16(at, bt)
case as && !bs:
LteSameIterSVU16(at[0], bt, bit)
case !as && bs:
LteSameIterVSU16(at, bt[0], ait)
default:
LteSameIterU16(at, bt, ait, bit)
}
return
case Uint32:
at := a.Uint32s()
bt := b.Uint32s()
switch {
case as && bs:
LteSameU32(at, bt)
case as && !bs:
LteSameIterSVU32(at[0], bt, bit)
case !as && bs:
LteSameIterVSU32(at, bt[0], ait)
default:
LteSameIterU32(at, bt, ait, bit)
}
return
case Uint64:
at := a.Uint64s()
bt := b.Uint64s()
switch {
case as && bs:
LteSameU64(at, bt)
case as && !bs:
LteSameIterSVU64(at[0], bt, bit)
case !as && bs:
LteSameIterVSU64(at, bt[0], ait)
default:
LteSameIterU64(at, bt, ait, bit)
}
return
case Float32:
at := a.Float32s()
bt := b.Float32s()
switch {
case as && bs:
LteSameF32(at, bt)
case as && !bs:
LteSameIterSVF32(at[0], bt, bit)
case !as && bs:
LteSameIterVSF32(at, bt[0], ait)
default:
LteSameIterF32(at, bt, ait, bit)
}
return
case Float64:
at := a.Float64s()
bt := b.Float64s()
switch {
case as && bs:
LteSameF64(at, bt)
case as && !bs:
LteSameIterSVF64(at[0], bt, bit)
case !as && bs:
LteSameIterVSF64(at, bt[0], ait)
default:
LteSameIterF64(at, bt, ait, bit)
}
return
case String:
at := a.Strings()
bt := b.Strings()
switch {
case as && bs:
LteSameStr(at, bt)
case as && !bs:
LteSameIterSVStr(at[0], bt, bit)
case !as && bs:
LteSameIterVSStr(at, bt[0], ait)
default:
LteSameIterStr(at, bt, ait, bit)
}
return
default:
return errors.Errorf("Unsupported type %v for Lte", t)
}
}
func (e E) EqSameIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) {
as := isScalar(a, t)
bs := isScalar(b, t)
switch t {
case Bool:
at := a.Bools()
bt := b.Bools()
switch {
case as && bs:
EqSameB(at, bt)
case as && !bs:
EqSameIterSVB(at[0], bt, bit)
case !as && bs:
EqSameIterVSB(at, bt[0], ait)
default:
EqSameIterB(at, bt, ait, bit)
}
return
case Int:
at := a.Ints()
bt := b.Ints()
switch {
case as && bs:
EqSameI(at, bt)
case as && !bs:
EqSameIterSVI(at[0], bt, bit)
case !as && bs:
EqSameIterVSI(at, bt[0], ait)
default:
EqSameIterI(at, bt, ait, bit)
}
return
case Int8:
at := a.Int8s()
bt := b.Int8s()
switch {
case as && bs:
EqSameI8(at, bt)
case as && !bs:
EqSameIterSVI8(at[0], bt, bit)
case !as && bs:
EqSameIterVSI8(at, bt[0], ait)
default:
EqSameIterI8(at, bt, ait, bit)
}
return
case Int16:
at := a.Int16s()
bt := b.Int16s()
switch {
case as && bs:
EqSameI16(at, bt)
case as && !bs:
EqSameIterSVI16(at[0], bt, bit)
case !as && bs:
EqSameIterVSI16(at, bt[0], ait)
default:
EqSameIterI16(at, bt, ait, bit)
}
return
case Int32:
at := a.Int32s()
bt := b.Int32s()
switch {
case as && bs:
EqSameI32(at, bt)
case as && !bs:
EqSameIterSVI32(at[0], bt, bit)
case !as && bs:
EqSameIterVSI32(at, bt[0], ait)
default:
EqSameIterI32(at, bt, ait, bit)
}
return
case Int64:
at := a.Int64s()
bt := b.Int64s()
switch {
case as && bs:
EqSameI64(at, bt)
case as && !bs:
EqSameIterSVI64(at[0], bt, bit)
case !as && bs:
EqSameIterVSI64(at, bt[0], ait)
default:
EqSameIterI64(at, bt, ait, bit)
}
return
case Uint:
at := a.Uints()
bt := b.Uints()
switch {
case as && bs:
EqSameU(at, bt)
case as && !bs:
EqSameIterSVU(at[0], bt, bit)
case !as && bs:
EqSameIterVSU(at, bt[0], ait)
default:
EqSameIterU(at, bt, ait, bit)
}
return
case Uint8:
at := a.Uint8s()
bt := b.Uint8s()
switch {
case as && bs:
EqSameU8(at, bt)
case as && !bs:
EqSameIterSVU8(at[0], bt, bit)
case !as && bs:
EqSameIterVSU8(at, bt[0], ait)
default:
EqSameIterU8(at, bt, ait, bit)
}
return
case Uint16:
at := a.Uint16s()
bt := b.Uint16s()
switch {
case as && bs:
EqSameU16(at, bt)
case as && !bs:
EqSameIterSVU16(at[0], bt, bit)
case !as && bs:
EqSameIterVSU16(at, bt[0], ait)
default:
EqSameIterU16(at, bt, ait, bit)
}
return
case Uint32:
at := a.Uint32s()
bt := b.Uint32s()
switch {
case as && bs:
EqSameU32(at, bt)
case as && !bs:
EqSameIterSVU32(at[0], bt, bit)
case !as && bs:
EqSameIterVSU32(at, bt[0], ait)
default:
EqSameIterU32(at, bt, ait, bit)
}
return
case Uint64:
at := a.Uint64s()
bt := b.Uint64s()
switch {
case as && bs:
EqSameU64(at, bt)
case as && !bs:
EqSameIterSVU64(at[0], bt, bit)
case !as && bs:
EqSameIterVSU64(at, bt[0], ait)
default:
EqSameIterU64(at, bt, ait, bit)
}
return
case Uintptr:
at := a.Uintptrs()
bt := b.Uintptrs()
switch {
case as && bs:
EqSameUintptr(at, bt)
case as && !bs:
EqSameIterSVUintptr(at[0], bt, bit)
case !as && bs:
EqSameIterVSUintptr(at, bt[0], ait)
default:
EqSameIterUintptr(at, bt, ait, bit)
}
return
case Float32:
at := a.Float32s()
bt := b.Float32s()
switch {
case as && bs:
EqSameF32(at, bt)
case as && !bs:
EqSameIterSVF32(at[0], bt, bit)
case !as && bs:
EqSameIterVSF32(at, bt[0], ait)
default:
EqSameIterF32(at, bt, ait, bit)
}
return
case Float64:
at := a.Float64s()
bt := b.Float64s()
switch {
case as && bs:
EqSameF64(at, bt)
case as && !bs:
EqSameIterSVF64(at[0], bt, bit)
case !as && bs:
EqSameIterVSF64(at, bt[0], ait)
default:
EqSameIterF64(at, bt, ait, bit)
}
return
case Complex64:
at := a.Complex64s()
bt := b.Complex64s()
switch {
case as && bs:
EqSameC64(at, bt)
case as && !bs:
EqSameIterSVC64(at[0], bt, bit)
case !as && bs:
EqSameIterVSC64(at, bt[0], ait)
default:
EqSameIterC64(at, bt, ait, bit)
}
return
case Complex128:
at := a.Complex128s()
bt := b.Complex128s()
switch {
case as && bs:
EqSameC128(at, bt)
case as && !bs:
EqSameIterSVC128(at[0], bt, bit)
case !as && bs:
EqSameIterVSC128(at, bt[0], ait)
default:
EqSameIterC128(at, bt, ait, bit)
}
return
case String:
at := a.Strings()
bt := b.Strings()
switch {
case as && bs:
EqSameStr(at, bt)
case as && !bs:
EqSameIterSVStr(at[0], bt, bit)
case !as && bs:
EqSameIterVSStr(at, bt[0], ait)
default:
EqSameIterStr(at, bt, ait, bit)
}
return
default:
return errors.Errorf("Unsupported type %v for Eq", t)
}
}
func (e E) NeSameIter(t reflect.Type, a *storage.Header, b *storage.Header, ait Iterator, bit Iterator) (err error) {
as := isScalar(a, t)
bs := isScalar(b, t)
switch t {
case Bool:
at := a.Bools()
bt := b.Bools()
switch {
case as && bs:
NeSameB(at, bt)
case as && !bs:
NeSameIterSVB(at[0], bt, bit)
case !as && bs:
NeSameIterVSB(at, bt[0], ait)
default:
NeSameIterB(at, bt, ait, bit)
}
return
case Int:
at := a.Ints()
bt := b.Ints()
switch {
case as && bs:
NeSameI(at, bt)
case as && !bs:
NeSameIterSVI(at[0], bt, bit)
case !as && bs:
NeSameIterVSI(at, bt[0], ait)
default:
NeSameIterI(at, bt, ait, bit)
}
return
case Int8:
at := a.Int8s()
bt := b.Int8s()
switch {
case as && bs:
NeSameI8(at, bt)
case as && !bs:
NeSameIterSVI8(at[0], bt, bit)
case !as && bs:
NeSameIterVSI8(at, bt[0], ait)
default:
NeSameIterI8(at, bt, ait, bit)
}
return
case Int16:
at := a.Int16s()
bt := b.Int16s()
switch {
case as && bs:
NeSameI16(at, bt)
case as && !bs:
NeSameIterSVI16(at[0], bt, bit)
case !as && bs:
NeSameIterVSI16(at, bt[0], ait)
default:
NeSameIterI16(at, bt, ait, bit)
}
return
case Int32:
at := a.Int32s()
bt := b.Int32s()
switch {
case as && bs:
NeSameI32(at, bt)
case as && !bs:
NeSameIterSVI32(at[0], bt, bit)
case !as && bs:
NeSameIterVSI32(at, bt[0], ait)
default:
NeSameIterI32(at, bt, ait, bit)
}
return
case Int64:
at := a.Int64s()
bt := b.Int64s()
switch {
case as && bs:
NeSameI64(at, bt)
case as && !bs:
NeSameIterSVI64(at[0], bt, bit)
case !as && bs:
NeSameIterVSI64(at, bt[0], ait)
default:
NeSameIterI64(at, bt, ait, bit)
}
return
case Uint:
at := a.Uints()
bt := b.Uints()
switch {
case as && bs:
NeSameU(at, bt)
case as && !bs:
NeSameIterSVU(at[0], bt, bit)
case !as && bs:
NeSameIterVSU(at, bt[0], ait)
default:
NeSameIterU(at, bt, ait, bit)
}
return
case Uint8:
at := a.Uint8s()
bt := b.Uint8s()
switch {
case as && bs:
NeSameU8(at, bt)
case as && !bs:
NeSameIterSVU8(at[0], bt, bit)
case !as && bs:
NeSameIterVSU8(at, bt[0], ait)
default:
NeSameIterU8(at, bt, ait, bit)
}
return
case Uint16:
at := a.Uint16s()
bt := b.Uint16s()
switch {
case as && bs:
NeSameU16(at, bt)
case as && !bs:
NeSameIterSVU16(at[0], bt, bit)
case !as && bs:
NeSameIterVSU16(at, bt[0], ait)
default:
NeSameIterU16(at, bt, ait, bit)
}
return
case Uint32:
at := a.Uint32s()
bt := b.Uint32s()
switch {
case as && bs:
NeSameU32(at, bt)
case as && !bs:
NeSameIterSVU32(at[0], bt, bit)
case !as && bs:
NeSameIterVSU32(at, bt[0], ait)
default:
NeSameIterU32(at, bt, ait, bit)
}
return
case Uint64:
at := a.Uint64s()
bt := b.Uint64s()
switch {
case as && bs:
NeSameU64(at, bt)
case as && !bs:
NeSameIterSVU64(at[0], bt, bit)
case !as && bs:
NeSameIterVSU64(at, bt[0], ait)
default:
NeSameIterU64(at, bt, ait, bit)
}
return
case Uintptr:
at := a.Uintptrs()
bt := b.Uintptrs()
switch {
case as && bs:
NeSameUintptr(at, bt)
case as && !bs:
NeSameIterSVUintptr(at[0], bt, bit)
case !as && bs:
NeSameIterVSUintptr(at, bt[0], ait)
default:
NeSameIterUintptr(at, bt, ait, bit)
}
return
case Float32:
at := a.Float32s()
bt := b.Float32s()
switch {
case as && bs:
NeSameF32(at, bt)
case as && !bs:
NeSameIterSVF32(at[0], bt, bit)
case !as && bs:
NeSameIterVSF32(at, bt[0], ait)
default:
NeSameIterF32(at, bt, ait, bit)
}
return
case Float64:
at := a.Float64s()
bt := b.Float64s()
switch {
case as && bs:
NeSameF64(at, bt)
case as && !bs:
NeSameIterSVF64(at[0], bt, bit)
case !as && bs:
NeSameIterVSF64(at, bt[0], ait)
default:
NeSameIterF64(at, bt, ait, bit)
}
return
case Complex64:
at := a.Complex64s()
bt := b.Complex64s()
switch {
case as && bs:
NeSameC64(at, bt)
case as && !bs:
NeSameIterSVC64(at[0], bt, bit)
case !as && bs:
NeSameIterVSC64(at, bt[0], ait)
default:
NeSameIterC64(at, bt, ait, bit)
}
return
case Complex128:
at := a.Complex128s()
bt := b.Complex128s()
switch {
case as && bs:
NeSameC128(at, bt)
case as && !bs:
NeSameIterSVC128(at[0], bt, bit)
case !as && bs:
NeSameIterVSC128(at, bt[0], ait)
default:
NeSameIterC128(at, bt, ait, bit)
}
return
case String:
at := a.Strings()
bt := b.Strings()
switch {
case as && bs:
NeSameStr(at, bt)
case as && !bs:
NeSameIterSVStr(at[0], bt, bit)
case !as && bs:
NeSameIterVSStr(at, bt[0], ait)
default:
NeSameIterStr(at, bt, ait, bit)
}
return
default:
return errors.Errorf("Unsupported type %v for Ne", t)
}
} | internal/execution/eng_cmp.go | 0.550366 | 0.538559 | eng_cmp.go | starcoder |
package iso20022
// Cash movements from or to a fund as a result of investment funds transactions, eg, subscriptions or redemptions.
type EstimatedFundCashForecast2 struct {
// Date and, if required, the time, at which the price has been applied.
TradeDateTime *DateAndDateTimeChoice `xml:"TradDtTm"`
// Previous date and time at which a price was applied.
PreviousTradeDateTime *DateAndDateTimeChoice `xml:"PrvsTradDtTm"`
// Investment fund class to which a cash flow is related.
FinancialInstrumentDetails *FinancialInstrument5 `xml:"FinInstrmDtls"`
// Estimated total value of all the holdings, less the fund's liabilities, attributable to a specific investment fund class.
EstimatedTotalNAV *ActiveOrHistoricCurrencyAndAmount `xml:"EstmtdTtlNAV,omitempty"`
// Previous estimated value of all the holdings, less the fund's liabilities, attributable to a specific investment fund class.
PreviousEstimatedTotalNAV *ActiveOrHistoricCurrencyAndAmount `xml:"PrvsEstmtdTtlNAV,omitempty"`
// Estimated total number of investment fund class units that have been issued.
EstimatedTotalUnitsNumber *FinancialInstrumentQuantity1 `xml:"EstmtdTtlUnitsNb,omitempty"`
// Previous estimated value of all the holdings, less the fund's liabilities, attributable to a specific investment fund class.
PreviousEstimatedTotalUnitsNumber *FinancialInstrumentQuantity1 `xml:"PrvsEstmtdTtlUnitsNb,omitempty"`
// Rate of change of the net asset value.
EstimatedTotalNAVChangeRate *PercentageRate `xml:"EstmtdTtlNAVChngRate,omitempty"`
// Currency of the investment fund class.
InvestmentCurrency []*ActiveOrHistoricCurrencyCode `xml:"InvstmtCcy,omitempty"`
// Indicates whether the estimated net cash flow is exceptional.
ExceptionalNetCashFlowIndicator *YesNoIndicator `xml:"XcptnlNetCshFlowInd"`
// Information related to the estimated cash movements reported by pre-defined or user defined criteria.
SortingCriteriaDetails []*CashSortingCriterion1 `xml:"SrtgCritDtls"`
// Net cash movements per financial instrument.
EstimatedNetCashForecastDetails []*NetCashForecast1 `xml:"EstmtdNetCshFcstDtls,omitempty"`
}
func (e *EstimatedFundCashForecast2) AddTradeDateTime() *DateAndDateTimeChoice {
e.TradeDateTime = new(DateAndDateTimeChoice)
return e.TradeDateTime
}
func (e *EstimatedFundCashForecast2) AddPreviousTradeDateTime() *DateAndDateTimeChoice {
e.PreviousTradeDateTime = new(DateAndDateTimeChoice)
return e.PreviousTradeDateTime
}
func (e *EstimatedFundCashForecast2) AddFinancialInstrumentDetails() *FinancialInstrument5 {
e.FinancialInstrumentDetails = new(FinancialInstrument5)
return e.FinancialInstrumentDetails
}
func (e *EstimatedFundCashForecast2) SetEstimatedTotalNAV(value, currency string) {
e.EstimatedTotalNAV = NewActiveOrHistoricCurrencyAndAmount(value, currency)
}
func (e *EstimatedFundCashForecast2) SetPreviousEstimatedTotalNAV(value, currency string) {
e.PreviousEstimatedTotalNAV = NewActiveOrHistoricCurrencyAndAmount(value, currency)
}
func (e *EstimatedFundCashForecast2) AddEstimatedTotalUnitsNumber() *FinancialInstrumentQuantity1 {
e.EstimatedTotalUnitsNumber = new(FinancialInstrumentQuantity1)
return e.EstimatedTotalUnitsNumber
}
func (e *EstimatedFundCashForecast2) AddPreviousEstimatedTotalUnitsNumber() *FinancialInstrumentQuantity1 {
e.PreviousEstimatedTotalUnitsNumber = new(FinancialInstrumentQuantity1)
return e.PreviousEstimatedTotalUnitsNumber
}
func (e *EstimatedFundCashForecast2) SetEstimatedTotalNAVChangeRate(value string) {
e.EstimatedTotalNAVChangeRate = (*PercentageRate)(&value)
}
func (e *EstimatedFundCashForecast2) AddInvestmentCurrency(value string) {
e.InvestmentCurrency = append(e.InvestmentCurrency, (*ActiveOrHistoricCurrencyCode)(&value))
}
func (e *EstimatedFundCashForecast2) SetExceptionalNetCashFlowIndicator(value string) {
e.ExceptionalNetCashFlowIndicator = (*YesNoIndicator)(&value)
}
func (e *EstimatedFundCashForecast2) AddSortingCriteriaDetails() *CashSortingCriterion1 {
newValue := new(CashSortingCriterion1)
e.SortingCriteriaDetails = append(e.SortingCriteriaDetails, newValue)
return newValue
}
func (e *EstimatedFundCashForecast2) AddEstimatedNetCashForecastDetails() *NetCashForecast1 {
newValue := new(NetCashForecast1)
e.EstimatedNetCashForecastDetails = append(e.EstimatedNetCashForecastDetails, newValue)
return newValue
} | data/train/go/43d5a80a0f2632885a098b490e1ac5438a95e284EstimatedFundCashForecast2.go | 0.849784 | 0.546496 | 43d5a80a0f2632885a098b490e1ac5438a95e284EstimatedFundCashForecast2.go | starcoder |
package common
const (
// MbInBytes is the number of bytes in one mebibyte.
MbInBytes = int64(1024 * 1024)
// GbInBytes is the number of bytes in one gibibyte.
GbInBytes = int64(1024 * 1024 * 1024)
// DefaultGbDiskSize is the default disk size in gibibytes.
DefaultGbDiskSize = int64(10)
// DiskTypeString is the value for the PersistentVolume's attribute "type"
DiskTypeString = "vSphere CNS Block Volume"
// AttributeDiskType is a PersistentVolume's attribute.
AttributeDiskType = "type"
// AttributeDatastoreURL represents URL of the datastore in the StorageClass
// For Example: DatastoreURL: "ds:///vmfs/volumes/5c9bb20e-009c1e46-4b85-0200483b2a97/"
AttributeDatastoreURL = "datastoreurl"
// AttributeStoragePolicyName represents name of the Storage Policy in the Storage Class
// For Example: StoragePolicy: "vSAN Default Storage Policy"
AttributeStoragePolicyName = "storagepolicyname"
// AttributeStoragePolicyID represents Storage Policy Id in the Storage Classs
// For Example: StoragePolicyId: "251bce41-cb24-41df-b46b-7c75aed3c4ee"
AttributeStoragePolicyID = "storagepolicyid"
// AttributeFsType represents filesystem type in the Storage Classs
// For Example: FsType: "ext4"
AttributeFsType = "fstype"
// DefaultFsType represents the default filesystem type which will be used to format the volume
// during mount if user does not specify the filesystem type in the Storage Class
DefaultFsType = "ext4"
//ProviderPrefix is the prefix used for the ProviderID set on the node
// Example: vsphere://4201794a-f26b-8914-d95a-edeb7ecc4a8f
ProviderPrefix = "vsphere://"
// AttributeFirstClassDiskUUID is the SCSI Disk Identifier
AttributeFirstClassDiskUUID = "diskUUID"
// BlockVolumeType is the VolumeType for CNS Volume
BlockVolumeType = "BLOCK"
// MinSupportedVCenterMajor is the minimum, major version of vCenter
// on which CNS is supported.
MinSupportedVCenterMajor int = 6
// MinSupportedVCenterMinor is the minimum, minor version of vCenter
// on which CNS is supported.
MinSupportedVCenterMinor int = 7
// MinSupportedVCenterPatch is the patch version supported with MinSupportedVCenterMajor and MinSupportedVCenterMinor
MinSupportedVCenterPatch int = 3
) | pkg/csi/service/common/constants.go | 0.606498 | 0.412353 | constants.go | starcoder |
package mbserver
import (
"encoding/binary"
"errors"
"math"
)
type (
bigEndian struct{}
littleEndian struct{}
)
// LittleEndian is the little-endian implementation of ByteOrder.
var LittleEndian littleEndian
// BigEndian is the big-endian implementation of ByteOrder.
var BigEndian bigEndian
// BytesToUint16 converts a big endian array of bytes to an array of unit16s
func (bigEndian) BytesToUint16(bytes []byte) []uint16 {
values := make([]uint16, len(bytes)/2)
for i := range values {
values[i] = binary.BigEndian.Uint16(bytes[i*2 : (i+1)*2])
}
return values
}
// Uint16ToBytes converts an array of uint16s to a big endian array of bytes
func (bigEndian) Uint16ToBytes(values []uint16) []byte {
bytes := make([]byte, len(values)*2)
for i, value := range values {
binary.BigEndian.PutUint16(bytes[i*2:(i+1)*2], value)
}
return bytes
}
// BytesToUint32 converts a big endian array of bytes to an array of unit32s
func (bigEndian) BytesToUint32(bytes []byte) []uint32 {
values := make([]uint32, len(bytes)/4)
for i := range values {
values[i] = binary.BigEndian.Uint32(bytes[i*4 : (i+1)*4])
}
return values
}
// Uint32ToBytes converts an array of uint32s to a big endian array of bytes
func (bigEndian) Uint32ToBytes(values []uint32) []byte {
bytes := make([]byte, len(values)*4)
for i, value := range values {
binary.BigEndian.PutUint32(bytes[i*4:(i+1)*4], value)
}
return bytes
}
// BytesToFloat32 converts a big endian array of bytes to an float32
func (bigEndian) BytesToFloat32(bytes []byte) float32 {
bits := binary.BigEndian.Uint32(bytes)
return math.Float32frombits(bits)
}
// Float32ToBytes converts an float32 to a big endian array of bytes
func (bigEndian) Float32ToBytes(value float32) []byte {
bits := math.Float32bits(value)
bytes := make([]byte, 4)
binary.BigEndian.PutUint32(bytes, bits)
return bytes
}
// Float32ToBytes converts an array of float32 to a big endian array of bytes
func (bigEndian) Float32sToBytes(values []float32) []byte {
buf := make([]byte, 0)
for _, value := range values {
bits := math.Float32bits(value)
bytes := make([]byte, 4)
binary.BigEndian.PutUint32(bytes, bits)
buf = append(buf, bytes...)
}
return buf
}
// 将一个uint16类型的数字转换为大端的字节充入一个数组的尾部
// 数组前面的内容可以不必是uint16类型
func (bigEndian) EncodeUint16(bytes *[]byte, value uint16) {
bArr := make([]byte, 2)
binary.BigEndian.PutUint16(bArr[0:2], value)
*bytes = append(*bytes, bArr...)
}
// 将一个uint32类型的数字转换为大端的字节充入一个数组的尾部
func (bigEndian) EncodeUint32(bytes *[]byte, value uint32) {
bArr := make([]byte, 4)
binary.BigEndian.PutUint32(bArr[0:4], value)
*bytes = append(*bytes, bArr...)
}
// 将一个float32类型的数字转换为大端的字节充入一个数组的尾部
func (bigEndian) EncodeFloat32(bytes *[]byte, value float32) {
bArr := BigEndian.Float32ToBytes(value)
*bytes = append(*bytes, bArr...)
}
// 读取字节数组中,指定长度的uint16类型数字,返回一个uint16的数组
// 适用于混乱类型的字节流
func (bigEndian) DecodeUint16s(bytes *[]byte, num uint) (vals []uint16, err error) {
needLen := (int)(2 * num)
if len(*bytes) < needLen {
err = errors.New("bytes is not Enough")
return
}
vals = BigEndian.BytesToUint16((*bytes)[:needLen])
*bytes = (*bytes)[needLen:]
return
}
// 读取字节数组中,指定长度的uint32类型数字,返回一个uint32的数组
// 适用于混乱类型的字节流
func (bigEndian) DecodeUint32s(bytes *[]byte, num uint) (vals []uint32, err error) {
needLen := (int)(4 * num)
if len(*bytes) < needLen {
err = errors.New("bytes is not Enough")
return
}
vals = BigEndian.BytesToUint32((*bytes)[0:needLen])
*bytes = (*bytes)[needLen:]
return
}
// 读取字节数组中,指定长度的float32类型数字,返回一个float32的数组
// 适用于混乱类型的字节流
func (bigEndian) DecodeFloat32s(bytes *[]byte, num uint) (vals []float32, err error) {
needLen := (int)(4 * num)
if len(*bytes) < needLen {
err = errors.New("bytes is not Enough")
return
}
fp32vals := make([]float32, num)
for i := (uint)(0); i < num; i++ {
fp32vals[i] = BigEndian.BytesToFloat32((*bytes)[i*4 : (i+1)*4])
}
*bytes = (*bytes)[needLen:]
return fp32vals, nil
}
// BytesToUint16 converts a little endian array of bytes to an array of unit16s
func (littleEndian) BytesToUint16(bytes []byte) []uint16 {
values := make([]uint16, len(bytes)/2)
for i := range values {
values[i] = binary.LittleEndian.Uint16(bytes[i*2 : (i+1)*2])
}
return values
}
// Uint16ToBytes converts an array of uint16s to a little endian array of bytes
func (littleEndian) Uint16ToBytes(values []uint16) []byte {
bytes := make([]byte, len(values)*2)
for i, value := range values {
binary.LittleEndian.PutUint16(bytes[i*2:(i+1)*2], value)
}
return bytes
}
// BytesToUint32 converts a little endian array of bytes to an array of unit32s
func (littleEndian) BytesToUint32(bytes []byte) []uint32 {
values := make([]uint32, len(bytes)/4)
for i := range values {
values[i] = binary.LittleEndian.Uint32(bytes[i*4 : (i+1)*4])
}
return values
}
// Uint32ToBytes converts an array of uint32s to a little endian array of bytes
func (littleEndian) Uint32ToBytes(values []uint32) []byte {
bytes := make([]byte, len(values)*4)
for i, value := range values {
binary.LittleEndian.PutUint32(bytes[i*4:(i+1)*4], value)
}
return bytes
}
// BytesToFloat32 converts a little endian array of bytes to an float32
func (littleEndian) BytesToFloat32(bytes []byte) float32 {
bits := binary.LittleEndian.Uint32(bytes)
return math.Float32frombits(bits)
}
// Float32ToBytes converts an float32 to a little endian array of bytes
func (littleEndian) Float32ToBytes(value float32) []byte {
bits := math.Float32bits(value)
bytes := make([]byte, 4)
binary.LittleEndian.PutUint32(bytes, bits)
return bytes
}
// Float32ToBytes converts an array of float32 to a little endian array of bytes
func (littleEndian) Float32sToBytes(values []float32) []byte {
buf := make([]byte, 0)
for _, value := range values {
bits := math.Float32bits(value)
bytes := make([]byte, 4)
binary.LittleEndian.PutUint32(bytes, bits)
buf = append(buf, bytes...)
}
return buf
}
func (littleEndian) EncodeUint16(bytes *[]byte, value uint16) {
bArr := make([]byte, 2)
binary.LittleEndian.PutUint16(bArr[0:2], value)
*bytes = append(*bytes, bArr...)
}
func (littleEndian) EncodeUint32(bytes *[]byte, value uint32) {
bArr := make([]byte, 4)
binary.LittleEndian.PutUint32(bArr[0:4], value)
*bytes = append(*bytes, bArr...)
}
func (littleEndian) EncodeFloat32(bytes *[]byte, value float32) {
bArr := LittleEndian.Float32ToBytes(value)
*bytes = append(*bytes, bArr...)
}
func (littleEndian) DecodeUint16s(bytes *[]byte, num uint) (vals []uint16, err error) {
needLen := (int)(2 * num)
if len(*bytes) < needLen {
err = errors.New("bytes is not Enough")
return
}
vals = LittleEndian.BytesToUint16((*bytes)[:needLen])
*bytes = (*bytes)[needLen:]
return
}
func (littleEndian) DecodeUint32s(bytes *[]byte, num uint) (vals []uint32, err error) {
needLen := (int)(4 * num)
if len(*bytes) < needLen {
err = errors.New("bytes is not Enough")
return
}
vals = LittleEndian.BytesToUint32((*bytes)[0:needLen])
*bytes = (*bytes)[needLen:]
return
}
func (littleEndian) DecodeFloat32s(bytes *[]byte, num uint) (vals []float32, err error) {
needLen := (int)(4 * num)
if len(*bytes) < needLen {
err = errors.New("bytes is not Enough")
return
}
fp32vals := make([]float32, num)
for i := (uint)(0); i < num; i++ {
fp32vals[i] = LittleEndian.BytesToFloat32((*bytes)[i*4 : (i+1)*4])
}
*bytes = (*bytes)[needLen:]
return fp32vals, nil
} | mbserver/binary.go | 0.629888 | 0.572036 | binary.go | starcoder |
package dev
import (
"errors"
"time"
"golang.org/x/exp/io/i2c"
)
const (
// ConversionRegiserPointer ...
ConversionRegiserPointer byte = 0x00
// ConfigRegiserPointer ...
ConfigRegiserPointer byte = 0x01
//LoThreshRegiserPointer ...
LoThreshRegiserPointer byte = 0x10
// HiThreshRegiserPointer ...
HiThreshRegiserPointer byte = 0x11
// ComparatorQueueAssertAfterOne Assert after one conversion
ComparatorQueueAssertAfterOne uint16 = 0x0000
// ComparatorQueueAssertAfterTwo Assert after two conversions
ComparatorQueueAssertAfterTwo uint16 = 0x0001
// ComparatorQueueAssertAfterFour Assert after four conversions
ComparatorQueueAssertAfterFour uint16 = 0x0002
// ComparatorQueueDisable Disable comparator and set ALERT/RDY pin to high-impedance (default)
ComparatorQueueDisable uint16 = 0x0003
// LatchingComparatorLatching The ALERT/RDY pin does not latch when asserted (default)
LatchingComparatorLatching uint16 = 0x0000
// LatchingComparatorNonLatching The asserted ALERT/RDY pin remains latched until
LatchingComparatorNonLatching uint16 = 0x0004
// ComparatorPolarityActiveLow This bit controls the polarity of the ALERT/RDY pin (default)
ComparatorPolarityActiveLow uint16 = 0x0000
// ComparatorPolarityActiveHigh This bit controls the polarity of the ALERT/RDY pin
ComparatorPolarityActiveHigh uint16 = 0x0008
// ComparatorModeTraditional this bit configures the comparator operating mode. (default)
ComparatorModeTraditional uint16 = 0x0000
// ComparatorModeWindow this bit configures the comparator operating mode.
ComparatorModeWindow uint16 = 0x0010
// // OperationalStatus determines the operational status of the device. OS can only be written
// // when in power-down state and has no effect when a conversion is ongoing
// OperationalStatus uint16 = 0x8000
// // RegisterPointerConfig ...
// RegisterPointerConfig byte = 0x01
// // RegisterConversionConfig Conversion register contains the result of the last conversion in binary two's complement format.
// RegisterConversionConfig byte = 0x00
// DataRate128 control the data rate setting. 128 Sample Per Seconds
DataRate128 uint16 = 0x0000
// DataRate250 control the data rate setting. 250 Sample Per Seconds
DataRate250 uint16 = 0x0020
// DataRate490 control the data rate setting. 490 Sample Per Seconds
DataRate490 uint16 = 0x0040
// DataRate920 control the data rate setting. 64 Sample Per Seconds
DataRate920 uint16 = 0x0060
// DataRate1600 control the data rate setting. 128 Sample Per Seconds
DataRate1600 uint16 = 0x0080
// DataRate2400 control the data rate setting. 250 Sample Per Seconds
DataRate2400 uint16 = 0x00A0
// DataRate3300_0 control the data rate setting. 475 Sample Per Seconds
DataRate3300_0 uint16 = 0x00C0
// DataRate3300_1 control the data rate setting. 475 Sample Per Seconds
DataRate3300_1 uint16 = 0x00E0
// DeviceOperationModeContinous Continuous-conversion mode
DeviceOperationModeContinous uint16 = 0x0000
// DeviceOperationModeSingleShot Single-shot mode or power-down state
DeviceOperationModeSingleShot uint16 = 0x0100
// ProgramableGainAmplifier6144 These bits set the FSR of the programmable gain amplifier. For voltages in the range ±6.144
ProgramableGainAmplifier6144 uint16 = 0x0000
// ProgramableGainAmplifier4096 set the FSR of the programmable gain amplifier. For voltages in the range ±4.096
ProgramableGainAmplifier4096 uint16 = 0x0200
// ProgramableGainAmplifier2048 set the FSR of the programmable gain amplifier. For voltages in the range ±2.048
ProgramableGainAmplifier2048 uint16 = 0x0400
// ProgramableGainAmplifier1024 set the FSR of the programmable gain amplifier. For voltages in the range ±1.024
ProgramableGainAmplifier1024 uint16 = 0x0600
// ProgramableGainAmplifier0512 set the FSR of the programmable gain amplifier. For voltages in the range ±0.512
ProgramableGainAmplifier0512 uint16 = 0x0800
// ProgramableGainAmplifier0256_0 set the FSR of the programmable gain amplifier. For voltages in the range ±0.256
ProgramableGainAmplifier0256_0 uint16 = 0x0A00
// ProgramableGainAmplifier0256_1 set the FSR of the programmable gain amplifier. For voltages in the range ±0.256
ProgramableGainAmplifier0256_1 uint16 = 0x0C00
// ProgramableGainAmplifier0256_2 set the FSR of the programmable gain amplifier. For voltages in the range ±0.256
ProgramableGainAmplifier0256_2 uint16 = 0x0E00
// MultiplexerConfigurationAIN0 AINP = AIN0 and AINN = GND
MultiplexerConfigurationAIN0 uint16 = 0x4000
// MultiplexerConfigurationAIN1 AINP = AIN1 and AINN = GND
MultiplexerConfigurationAIN1 uint16 = 0x5000
// MultiplexerConfigurationAIN2 AIN2 = AIN2 and AINN = GND
MultiplexerConfigurationAIN2 uint16 = 0x6000
// MultiplexerConfigurationAIN3 AIN3 = AIN3 and AINN = GND
MultiplexerConfigurationAIN3 uint16 = 0x7000
)
const (
ads1015DevFile = "/dev/i2c-1"
addrADS1015 = 0x48
)
var (
channelMuxConfig = map[int]uint16{
0: MultiplexerConfigurationAIN0,
1: MultiplexerConfigurationAIN1,
2: MultiplexerConfigurationAIN2,
3: MultiplexerConfigurationAIN3,
}
defaultConfig = ComparatorQueueDisable | LatchingComparatorLatching | ComparatorPolarityActiveLow | ComparatorModeTraditional | DataRate3300_0 | DeviceOperationModeContinous | ProgramableGainAmplifier6144
)
// ADS1015 ...
type ADS1015 struct {
dev *i2c.Device
config uint16
}
// NewADS1015 implement AnalogDigitalConverter interface
func NewADS1015() (*ADS1015, error) {
dev, err := i2c.Open(&i2c.Devfs{Dev: ads1015DevFile}, addrADS1015)
if err != nil {
return nil, err
}
return &ADS1015{
dev: dev,
config: defaultConfig,
}, nil
}
// SetConfig ...
func (m *ADS1015) SetConfig(config uint16) {
m.config = config
}
// Read ...
func (m *ADS1015) Read(channel int) (float64, error) {
mux, ok := channelMuxConfig[channel]
if !ok {
return 0, errors.New("invalid channel number, should be 0~3")
}
conf := m.config | mux
hiByte := byte(conf >> 8)
loByte := byte(conf & 0x00FF)
if err := m.dev.WriteReg(ConfigRegiserPointer, []byte{hiByte, loByte}); err != nil {
return 0, err
}
time.Sleep(100 * time.Microsecond)
data := make([]byte, 2)
if err := m.dev.ReadReg(ConversionRegiserPointer, data); err != nil {
return 0, err
}
val := (uint32(data[0]) << 8) | uint32(data[1])
v := float64(val*6144/1000) / 32768.0
return v, nil
}
// Close ...
func (m *ADS1015) Close() {
m.dev.Close()
} | dev/ads1015.go | 0.552781 | 0.426023 | ads1015.go | starcoder |
package equileader
import "math"
// We utilize the Leader implementation internals
// to maintain a list of subleaders in a map
func thirdpartySolution(A []int) int {
leadersCount := 0
arrayLen := len(A)
l := NewIntStack(arrayLen)
candidate := -1
leader := -1
count := 0
leftLeadersCount := 0
leftSequenceLength := 0
rightSequenceLength := 0
if arrayLen == 1 {
return 0
}
for _, value := range A {
//O(n)
if l.size == 0 {
l.Push(value)
} else {
if value != l.Front() {
l.Pop()
} else {
l.Push(value)
}
}
}
if l.size > 0 {
candidate = l.Front()
} else {
return 0
}
for _, value := range A {
if value == candidate {
count += 1
}
}
if count > int(math.Floor(float64(arrayLen)/2.0)) {
leader = candidate
} else {
return 0
}
for i, value := range A {
if value == leader {
// The key here is to compare the current leader with the current value
leftLeadersCount += 1
}
/* validate left sequence */
leftSequenceLength = i + 1
if leftLeadersCount > int(math.Floor(float64(leftSequenceLength)/2.0)) {
/* validate right sequence */
rightSequenceLength = arrayLen - leftSequenceLength
// Here we check if the remaining leaders count on the left side is more than have of the remaining on the right side
if count-leftLeadersCount > int(math.Floor(float64(rightSequenceLength)/2.0)) {
/* both sequences have valid leaders of the same value */
leadersCount += 1
}
}
}
return leadersCount
}
func Leader(A []int) int {
// O(N)
arrayLen := len(A)
if arrayLen == 1 {
return A[0]
}
l := NewIntStack(arrayLen)
candidate := -1
leader := -1
count := 0
for _, value := range A {
//O(n)
if l.size == 0 {
l.Push(value)
} else {
if value != l.Front() {
l.Pop()
} else {
l.Push(value)
}
}
}
if l.size > 0 {
candidate = l.Front()
}
for _, value := range A {
if value == candidate {
count += 1
}
}
if count > int(math.Floor(float64(arrayLen)/2.0)) {
leader = candidate
}
return leader
}
type IntStack struct {
size int
data []int
}
func NewIntStack(len int) *IntStack {
return &IntStack{
size: 0,
data: make([]int, len),
}
}
func (s *IntStack) Push(item int) {
if s.size < len(s.data) {
s.data[s.size] = item
s.size++
}
}
func (s *IntStack) Pop() int {
item := s.data[s.size-1]
s.size -= 1
return item
}
func (s *IntStack) Front() int {
return s.data[s.size-1]
} | codility/8.leader/equileader/thirdparty-solution.go | 0.669745 | 0.422803 | thirdparty-solution.go | starcoder |
package transactioncounter
import (
"errors"
"fmt"
"time"
"github.com/amoskyler/fake_stock_alerts/transaction"
)
type (
// TickerMap is a hash keyed by the Ticker id with a value being the total count of transactions
TickerMap map[transaction.Ticker]int
// Counter is responsible for enumerating and describing properties of a group of Transactions
Counter struct {
transactions []transaction.Transaction
TickerMap TickerMap
filteredTransactions []transaction.Transaction
}
)
// New constructs a new transactioncounter - it builds the counter with copies of the transactions
func New(transactions []transaction.Transaction) *Counter {
counter := &Counter{transactions, map[transaction.Ticker]int{}, []transaction.Transaction{}}
return counter
}
// GetTransactions returns a copy of the internal transaction slice
func (counter *Counter) GetTransactions() []transaction.Transaction {
return counter.transactions
}
// AddTransactions adds a slice of transactions to the transaction slice - if applyTransaction is true, returns the updated transaction TickerMap
func (counter *Counter) AddTransactions(transactions []transaction.Transaction, applyTransactions bool) (TickerMap, error) {
for _, t := range transactions {
counter.AddTransaction(t, false)
}
if applyTransactions {
return counter.ApplyAllTransactions(true)
}
return counter.TickerMap, nil
}
// AddTransaction adds a transaction to the transaction slice - if applyTransaction is true, returns the updated transaction TickerMap
func (counter *Counter) AddTransaction(t transaction.Transaction, applyTransaction bool) (TickerMap, error) {
counter.transactions = append(counter.transactions, t)
if applyTransaction {
return counter.ApplyAllTransactions(true)
}
return counter.TickerMap, nil
}
// ApplyAllTransactions naively calculates the TickerMap.
// If a true applyFilters parameter is passed in, the TickerMap is calculated with a refreshed filteredTransactions list
func (counter *Counter) ApplyAllTransactions(applyFilters bool) (TickerMap, error) {
var transactions *[]transaction.Transaction
if applyFilters {
transactions = counter.applyFilters()
} else {
transactions = &counter.transactions
}
for i, transaction := range *transactions {
if ok := validateTransaction(transaction); !ok {
return counter.TickerMap, fmt.Errorf(fmt.Sprintf("Invalid transaction %+v", transaction))
}
_, err := counter.ApplyTransaction(transaction)
if err != nil {
return counter.TickerMap, fmt.Errorf(fmt.Sprintf("Invalid transaction on index %d of %d. PrevErr: %s", i, len(counter.transactions), err))
}
}
return counter.TickerMap, nil
}
func (counter *Counter) applyFilters() *[]transaction.Transaction {
filtered := &[]transaction.Transaction{}
for _, t := range counter.transactions {
if ok := applyFilter(t); !ok {
continue
}
*filtered = append(*filtered, t)
}
counter.filteredTransactions = *filtered
return filtered
}
func applyFilter(t transaction.Transaction) bool {
// apply filters here...
elapsed := time.Since(t.Date)
if elapsed > time.Hour*24*7 {
return false
}
return true
}
func validateTransaction(t transaction.Transaction) bool {
if time.Now().Before(t.Date) {
return false
}
return true
}
// ApplyTransaction Applies an individual transaction to the TickerMap - Note: Does not pass through filter logic
func (counter *Counter) ApplyTransaction(t transaction.Transaction) (TickerMap, error) {
tickerMap := counter.TickerMap
if _, ok := tickerMap[t.Ticker]; !ok {
tickerMap[t.Ticker] = 0
}
switch t.Type {
case transaction.Buy:
tickerMap[t.Ticker]++
break
case transaction.Sell:
tickerMap[t.Ticker]--
break
default:
return tickerMap, errors.New("Unsupported transaction type")
}
if tickerMap[t.Ticker] == 0 {
delete(tickerMap, t.Ticker)
}
counter.TickerMap = tickerMap
return tickerMap, nil
}
// ToSprintf verbosly returns the transactions associated to a counter
func (counter *Counter) ToSprintf() string {
transactions := counter.transactions
out := ""
for i, t := range transactions {
out += fmt.Sprintf("Transaction-%d:\n\tDate: %s\n\tTicker: %s\n\tType: %s\n", i, t.Date, t.Ticker, t.Type)
}
return out
} | transactioncounter/transactioncounter.go | 0.807537 | 0.448004 | transactioncounter.go | starcoder |
package gokalman
import (
"fmt"
"strings"
"github.com/gonum/matrix/mat64"
"github.com/gonum/stat"
)
// MonteCarloRuns stores MC runs.
type MonteCarloRuns struct {
runs, steps int
Runs []MonteCarloRun
}
// Mean returns the mean of all the samples for the given time step.
func (mc MonteCarloRuns) Mean(step int) (mean []float64) {
// Take the first run in order to know the size.
states := make(map[int][]float64)
rows, _ := mc.Runs[0].Estimates[0].State().Dims()
for i := 0; i < rows; i++ {
states[i] = make([]float64, len(mc.Runs))
}
// Gather information
for r, run := range mc.Runs {
state := run.Estimates[step].State()
for i := 0; i < rows; i++ {
states[i][r] = state.At(i, 0)
}
}
means := make([]float64, rows)
for i := 0; i < rows; i++ {
means[i] = stat.Mean(states[i], nil)
}
return means
}
// StdDev returns the standard deviation of all the samples for the given time step.
func (mc MonteCarloRuns) StdDev(step int) (mean []float64) {
// Take the first run in order to know the size.
states := make(map[int][]float64)
rows, _ := mc.Runs[0].Estimates[0].State().Dims()
for i := 0; i < rows; i++ {
states[i] = make([]float64, len(mc.Runs))
}
// Gather information
for r, run := range mc.Runs {
state := run.Estimates[step].State()
for i := 0; i < rows; i++ {
states[i][r] = state.At(i, 0)
}
}
devs := make([]float64, rows)
for i := 0; i < rows; i++ {
devs[i] = stat.StdDev(states[i], nil)
}
return devs
}
// AsCSV is used as a CSV serializer. Does not include the header.
func (mc MonteCarloRuns) AsCSV(headers []string) []string {
rows, _ := mc.Runs[0].Estimates[0].State().Dims()
rtn := make([]string, rows)
for i := 0; i < rows; i++ {
header := headers[i]
lines := make([]string, mc.steps+1) // One line per step, plus header.
for rNo := 0; rNo < mc.runs; rNo++ {
lines[0] += fmt.Sprintf("%s-%d,", header, rNo)
}
lines[0] += header + "-mean," + header + "-stddev"
for k := 0; k < mc.steps; k++ {
for rNo, run := range mc.Runs {
lines[k+1] += fmt.Sprintf("%f,", run.Estimates[k].State().At(i, 0))
if rNo == mc.runs-1 {
// Last run reached, let's add the mean and stddev for this step.
mean := mc.Mean(k)
stddev := mc.StdDev(k)
lines[k+1] += fmt.Sprintf("%f,%f", mean[i], stddev[i])
}
}
}
rtn[i] = strings.Join(lines, "\n")
}
return rtn
}
// NewMonteCarloRuns run monte carlos on the provided filter.
func NewMonteCarloRuns(samples, steps, rowsH int, controls []*mat64.Vector, kf *Vanilla) MonteCarloRuns {
if !kf.predictionOnly {
panic("the Kalman filter needed for the Monte Carlo runs must be a pure predictor")
}
runs := make([]MonteCarloRun, samples)
if len(controls) == 1 {
ctrlSize, _ := controls[0].Dims()
controls = make([]*mat64.Vector, steps)
// Populate with zero controls
for k := 0; k < steps; k++ {
controls[k] = mat64.NewVector(ctrlSize, nil)
}
} else if len(controls) != steps {
panic("must provide as much control vectors as steps, or just one control vector")
}
for sample := 0; sample < samples; sample++ {
MCRun := MonteCarloRun{Estimates: make([]Estimate, steps)}
for k := 0; k < steps; k++ {
est, _ := kf.Update(mat64.NewVector(rowsH, nil), controls[k])
MCRun.Estimates[k] = est
}
runs[sample] = MCRun
// Must reinitialize the KF at every new sample.
kf.Reset()
}
return MonteCarloRuns{samples, steps, runs}
}
// MonteCarloRun stores the results of an MC run.
type MonteCarloRun struct {
Estimates []Estimate
} | montecarlo.go | 0.707203 | 0.514217 | montecarlo.go | starcoder |
package main
import (
"fmt"
"strings"
"github.com/AntonKosov/advent-of-code-2021/aoc"
)
/*
All algorithms for all digits are similar. There are differences in three variables only and z which
is the only value which goes outside. This algorithm may be simplified.
inp w | w = [1..9]
mul x 0 | x = 0
add x z | x = x + z
mod x 26 | x = x % 26
div z 1 <- a | z = z / a
add x 14 <- b | x = x + b
eql x w | x = x == w ? 1 : 0
eql x 0 | x = 1 - x // ("not" x)
mul y 0 | y = 0
add y 25 | y = y + 25
mul y x | y = y * x
add y 1 | y = y + 1
mul z y | z = z * y
mul y 0 | y = 0
add y w | y = y + w
add y 0 <- c | y = y + c
mul y x | y = y * x
add z y | z = z * y
So, here is a simplified version:
z = <the result of previous iteration>
w = <digit>
x = z%26 + b
if x == w {
z = z/a
} else {
z = z/a*26 + w + c
}
*/
func main() {
data := read()
r := process(data)
fmt.Printf("Answer: %v\n", r)
}
const digits = 14
type args struct {
a, b, c int
}
func read() (arguments [digits]args) {
lines := aoc.ReadAllInput()
for i := 0; i < digits; i++ {
a := aoc.StrToInt(strings.Split(lines[i*18+4], " ")[2])
b := aoc.StrToInt(strings.Split(lines[i*18+5], " ")[2])
c := aoc.StrToInt(strings.Split(lines[i*18+15], " ")[2])
arguments[i] = args{a: a, b: b, c: c}
}
return arguments
}
func process(data [digits]args) int64 {
solvedVariants := map[variant][]int64{}
variants := find(0, &data, 0, solvedVariants)
var max int64
for _, v := range variants {
if max < v {
max = v
}
}
return max
}
type variant struct {
digitIndex int
z int
}
var pows [digits]int64
func init() {
pows[digits-1] = 1
for i := digits - 2; i >= 0; i-- {
pows[i] = pows[i+1] * 10
}
}
func find(inputZ int, args *[digits]args, digitIndex int, solvedVariants map[variant][]int64) []int64 {
startVariant := variant{digitIndex: digitIndex, z: inputZ}
if variants, ok := solvedVariants[startVariant]; ok {
return variants
}
variants := []int64{}
arguments := args[digitIndex]
for w := 1; w <= 9; w++ {
if digitIndex < 2 {
fmt.Printf("digitIndex: %v, d=%v, cache=%v\n", digitIndex, w, len(solvedVariants))
}
z := inputZ
x := z%26 + arguments.b
if x == w {
z = z / arguments.a
} else {
z = z/arguments.a*26 + w + arguments.c
}
if digitIndex == digits-1 {
if z == 0 {
variants = append(variants, int64(w))
}
continue
}
sv := find(z, args, digitIndex+1, solvedVariants)
firstDigit := pows[digitIndex] * int64(w)
for _, v := range sv {
variants = append(variants, firstDigit+v)
}
// Don't need to look for other values
if digitIndex == 0 && len(variants) > 0 {
return variants
}
}
solvedVariants[startVariant] = variants
return variants
} | day24/part2/main.go | 0.564339 | 0.628778 | main.go | starcoder |
package float
import (
"errors"
"github.com/itrabbit/go-stp/conversion"
"reflect"
"strconv"
"time"
)
// Get Float reflect Type
func Type(bitSize int) reflect.Type {
switch bitSize {
case 32:
return reflect.TypeOf(float32(0))
case 64:
return reflect.TypeOf(float64(0))
default:
return reflect.TypeOf(float32(0))
}
}
// Convert To Float
func From(obj interface{}, bitSize int) (float64, error) {
t := reflect.TypeOf(obj)
if t == Type(bitSize) {
return reflect.ValueOf(obj).Float(), nil
}
m, err := conversion.GetMethod(t, Type(bitSize))
if err != nil {
return 0, err
}
res, err := m(obj)
if err != nil {
return 0, err
}
return reflect.ValueOf(res).Float(), nil
}
// Convert To Float By Default Value
func FromByDef(obj interface{}, bitSize int, def float64) float64 {
res, err := From(obj, bitSize)
if err != nil {
return def
}
return res
}
func init() {
// Float 32 To Float 64
conversion.SetMethod(Type(32), Type(64), func(obj interface{}) (interface{}, error) {
if f, ok := obj.(float32); ok {
return float64(f), nil
}
return nil, errors.New("Conversion object is not float32")
})
// Float 64 To Float 32
conversion.SetMethod(Type(64), Type(32), func(obj interface{}) (interface{}, error) {
if f, ok := obj.(float64); ok {
return float32(f), nil
}
return nil, errors.New("Conversion object is not float64")
})
// Signed Integers to Float 32
conversion.SetMultiMethod([]reflect.Type{
reflect.TypeOf(int(0)),
reflect.TypeOf(int8(0)),
reflect.TypeOf(int16(0)),
reflect.TypeOf(int32(0)),
reflect.TypeOf(int64(0)),
}, Type(32), func(obj interface{}) (interface{}, error) {
return float32(reflect.ValueOf(obj).Int()), nil
})
// Unsigned Integers to Float 32
conversion.SetMultiMethod([]reflect.Type{
reflect.TypeOf(uint(0)),
reflect.TypeOf(uint8(0)),
reflect.TypeOf(uint16(0)),
reflect.TypeOf(uint32(0)),
reflect.TypeOf(uint64(0)),
}, Type(32), func(obj interface{}) (interface{}, error) {
return float32(reflect.ValueOf(obj).Uint()), nil
})
// Signed Integers to Float 64
conversion.SetMultiMethod([]reflect.Type{
reflect.TypeOf(int(0)),
reflect.TypeOf(int8(0)),
reflect.TypeOf(int16(0)),
reflect.TypeOf(int32(0)),
reflect.TypeOf(int64(0)),
}, Type(64), func(obj interface{}) (interface{}, error) {
return float64(reflect.ValueOf(obj).Int()), nil
})
// Unsigned Integers to Float 64
conversion.SetMultiMethod([]reflect.Type{
reflect.TypeOf(uint(0)),
reflect.TypeOf(uint8(0)),
reflect.TypeOf(uint16(0)),
reflect.TypeOf(uint32(0)),
reflect.TypeOf(uint64(0)),
}, Type(64), func(obj interface{}) (interface{}, error) {
return float64(reflect.ValueOf(obj).Uint()), nil
})
// String To Float 32
conversion.SetMethod(reflect.TypeOf(string("")), Type(32), func(obj interface{}) (interface{}, error) {
if s, ok := obj.(string); ok {
f, err := strconv.ParseFloat(s, 32)
if err != nil {
return nil, err
}
return float32(f), nil
}
return nil, errors.New("Conversion object is not string")
})
// String To Float 64
conversion.SetMethod(reflect.TypeOf(string("")), Type(64), func(obj interface{}) (interface{}, error) {
if s, ok := obj.(string); ok {
return strconv.ParseFloat(s, 64)
}
return nil, errors.New("Conversion object is not string")
})
// Bool to Float 32
conversion.SetMethod(reflect.TypeOf(bool(false)), Type(32), func(obj interface{}) (interface{}, error) {
if b, ok := obj.(bool); ok {
if b {
return float32(1.0), nil
}
return float32(0.0), nil
}
return nil, errors.New("Conversion object is not bool")
})
// Bool to Float 64
conversion.SetMethod(reflect.TypeOf(bool(false)), Type(32), func(obj interface{}) (interface{}, error) {
if b, ok := obj.(bool); ok {
if b {
return float64(1.0), nil
}
return float64(0.0), nil
}
return nil, errors.New("Conversion object is not bool")
})
// time.Time To Float 32
conversion.SetMethod(reflect.TypeOf(time.Time{}), Type(32), func(obj interface{}) (interface{}, error) {
if t, ok := obj.(time.Time); ok {
return float32(t.Unix()), nil
}
return nil, errors.New("Conversion object is not time.Time")
})
// time.Time To Float 64
conversion.SetMethod(reflect.TypeOf(time.Time{}), Type(64), func(obj interface{}) (interface{}, error) {
if t, ok := obj.(time.Time); ok {
return float64(t.Unix()), nil
}
return nil, errors.New("Conversion object is not time.Time")
})
} | conversion/float/float.go | 0.6508 | 0.446133 | float.go | starcoder |
package gocvsimd
import (
"unsafe"
)
//go:noescape
func _SimdSse2AbsDifferenceSum(a unsafe.Pointer, aStride uint64, b unsafe.Pointer, bStride uint64, width, height uint64, sum unsafe.Pointer)
//go:noescape
func _SimdSse2AbsDifferenceSumMasked(a unsafe.Pointer, aStride uint64, b unsafe.Pointer, bStride uint64, mask unsafe.Pointer, maskStride uint64, index uint64/*uint8*/, width, height uint64, sum unsafe.Pointer)
//go:noescape
func _SimdSse2AbsDifferenceSums3x3(current unsafe.Pointer, currentStride uint64, background unsafe.Pointer, backgroundStride uint64, width, height uint64, sums unsafe.Pointer)
//go:noescape
func _SimdSse2AbsDifferenceSums3x3Masked(current unsafe.Pointer, currentStride uint64, background unsafe.Pointer, backgroundStride uint64, mask unsafe.Pointer, maskStride uint64, index uint64/*uint8*/, width, height uint64, sums unsafe.Pointer)
// SimdSse2AbsDifferenceSum gets sum of absolute difference of two gray 8-bit images.
// Both images must have the same width and height.
func SimdSse2AbsDifferenceSum(a, b View) uint64 {
sum := uint64(0)
_SimdSse2AbsDifferenceSum(a.GetData(), uint64(a.GetStride()), b.GetData(), uint64(b.GetStride()), uint64(a.GetWidth()), uint64(a.GetHeight()), unsafe.Pointer(&sum))
return sum
}
// SimdSse2AbsDifferenceSumMasked gets sum of absolute difference of two gray 8-bit images based on gray 8-bit mask.
// Gets the absolute difference sum for all points when mask[i] == index.
// Both images and mask must have the same width and height.
func SimdSse2AbsDifferenceSumMasked(a, b, mask View, index uint64/*uint8*/) uint64 {
sum := uint64(0)
_SimdSse2AbsDifferenceSumMasked(a.GetData(), uint64(a.GetStride()), b.GetData(), uint64(b.GetStride()), mask.GetData(), uint64(mask.GetStride()), index, uint64(a.GetWidth()), uint64(a.GetHeight()), unsafe.Pointer(&sum))
return sum
}
// SimdSse2AbsDifferenceSums3x3 gets 9 sums of absolute difference of two gray 8-bit images with various relative shifts in neighborhood 3x3.
// Both images must have the same width and height. The image height and width must be equal or greater 3.
// The sums are calculated with central part (indent width = 1) of the current image and with part of the background image with corresponding shift.
// The shifts are lain in the range [-1, 1] for axis x and y.
func SimdSse2AbsDifferenceSums3x3(current, background View) [9]uint64 {
sums := [9]uint64{}
_SimdSse2AbsDifferenceSums3x3(current.GetData(), uint64(current.GetStride()), background.GetData(), uint64(background.GetStride()), uint64(current.GetWidth()), uint64(current.GetHeight()), unsafe.Pointer(&sums[0]))
return sums
}
// SimdSse2AbsDifferenceSums3x3Masked gets 9 sums of absolute difference of two gray 8-bit images with various relative shifts in neighborhood 3x3 based on gray 8-bit mask.
// Gets the absolute difference sums for all points when mask[i] == index.
// Both images and mask must have the same width and height. The image height and width must be equal or greater 3.
// The sums are calculated with central part (indent width = 1) of the current image and with part of the background image with the corresponding shift.
// The shifts are lain in the range [-1, 1] for axis x and y.
func SimdSse2AbsDifferenceSums3x3Masked(current, background, mask View, index uint64/*uint8*/) [9]uint64 {
sums := [9]uint64{}
_SimdSse2AbsDifferenceSums3x3Masked(current.GetData(), uint64(current.GetStride()), background.GetData(), uint64(background.GetStride()), mask.GetData(), uint64(mask.GetStride()), index, uint64(current.GetWidth()), uint64(current.GetHeight()), unsafe.Pointer(&sums[0]))
return sums
} | sse2/SimdSse2AbsDifferenceSum_amd64.go | 0.703855 | 0.595463 | SimdSse2AbsDifferenceSum_amd64.go | starcoder |
package iso20022
// Information regarding the total amount of taxes.
type TotalTaxes3 struct {
// Total value of the taxes for a specific order.
TotalAmountOfTaxes *ActiveCurrencyAnd13DecimalAmount `xml:"TtlAmtOfTaxs,omitempty"`
// Amount included in the dividend that corresponds to gains directly or indirectly derived from interest payment in the scope of the European Directive on taxation of savings income in the form of interest payments.
TaxableIncomePerDividend *ActiveCurrencyAndAmount `xml:"TaxblIncmPerDvdd,omitempty"`
// Specifies whether capital gain is in the scope of the European directive on taxation of savings income in the form of interest payments (Council Directive 2003/48/EC 3 June), or an income realised upon sale, a refund or redemption of shares and units, etc.
EUCapitalGain *EUCapitalGain2Code `xml:"EUCptlGn,omitempty"`
// Specifies whether capital gain is in the scope of the European directive on taxation of savings income in the form of interest payments (Council Directive 2003/48/EC 3 June), or an income realised upon sale, a refund or redemption of shares and units, etc.
ExtendedEUCapitalGain *Extended350Code `xml:"XtndedEUCptlGn,omitempty"`
// Specifies whether dividend is in the scope of the European directive on taxation of savings income in the form of interest payments (Council Directive 2003/48/EC 3 June), or an income realised upon sale, a refund or redemption of shares and units, etc.
EUDividendStatus *EUDividendStatus1Code `xml:"EUDvddSts,omitempty"`
// Specifies whether dividend is in the scope of the European directive on taxation of savings income in the form of interest payments (Council Directive 2003/48/EC 3 June), or an income realised upon sale, a refund or redemption of shares and units, etc.
ExtendedEUDividendStatus *Extended350Code `xml:"XtndedEUDvddSts,omitempty"`
// Percentage of the underlying assets of the funds that represents a debt and is in the scope of the European directive on taxation of savings income in the form of interest payments (Council Directive 2003/48/EC 3 June).
PercentageOfDebtClaim *PercentageRate `xml:"PctgOfDebtClm,omitempty"`
// Information related to a specific tax.
TaxDetails []*Tax14 `xml:"TaxDtls,omitempty"`
}
func (t *TotalTaxes3) SetTotalAmountOfTaxes(value, currency string) {
t.TotalAmountOfTaxes = NewActiveCurrencyAnd13DecimalAmount(value, currency)
}
func (t *TotalTaxes3) SetTaxableIncomePerDividend(value, currency string) {
t.TaxableIncomePerDividend = NewActiveCurrencyAndAmount(value, currency)
}
func (t *TotalTaxes3) SetEUCapitalGain(value string) {
t.EUCapitalGain = (*EUCapitalGain2Code)(&value)
}
func (t *TotalTaxes3) SetExtendedEUCapitalGain(value string) {
t.ExtendedEUCapitalGain = (*Extended350Code)(&value)
}
func (t *TotalTaxes3) SetEUDividendStatus(value string) {
t.EUDividendStatus = (*EUDividendStatus1Code)(&value)
}
func (t *TotalTaxes3) SetExtendedEUDividendStatus(value string) {
t.ExtendedEUDividendStatus = (*Extended350Code)(&value)
}
func (t *TotalTaxes3) SetPercentageOfDebtClaim(value string) {
t.PercentageOfDebtClaim = (*PercentageRate)(&value)
}
func (t *TotalTaxes3) AddTaxDetails() *Tax14 {
newValue := new (Tax14)
t.TaxDetails = append(t.TaxDetails, newValue)
return newValue
} | TotalTaxes3.go | 0.771843 | 0.6466 | TotalTaxes3.go | starcoder |
package optional
import "time"
// Duration represents optional duration value
type Duration struct {
value time.Duration
presents bool
}
func (d *Duration) set(dd time.Duration) {
d.value = dd
d.presents = true
}
// OfDuration creates new optional time.Duration containing provided value
func OfDuration(d time.Duration) Duration {
return Duration{value: d, presents: true}
}
// OfDurationRef creates new optional time.Duration containing provided value
func OfDurationRef(d *time.Duration) Duration {
if d == nil {
return Duration{}
}
return OfDuration(*d)
}
// OfSeconds creates new optional time.Duration containing provided duration in seconds
func OfSeconds(sec int) DurationSeconds {
d := time.Duration(int64(sec)) * time.Second
return DurationSeconds{Duration: Duration{value: d, presents: true}}
}
// OfSecondsRef creates new optional time.Duration containing provided duration in seconds
func OfSecondsRef(sec *int) DurationSeconds {
if sec == nil {
return DurationSeconds{}
}
return OfSeconds(*sec)
}
// OfMilliseconds creates new optional time.Duration containing provided duration in milliseconds
func OfMilliseconds(millis int) DurationMillis {
d := time.Duration(int64(millis)) * time.Millisecond
return DurationMillis{Duration: Duration{value: d, presents: true}}
}
// OfMillisecondsRef creates new optional time.Duration containing provided duration in milliseconds
func OfMillisecondsRef(millis *int) DurationMillis {
if millis == nil {
return DurationMillis{}
}
return OfMilliseconds(*millis)
}
// OfMinutes creates new optional time.Duration containing provided duration in minutes
func OfMinutes(minutes int) DurationMinutes {
d := time.Duration(int64(minutes)) * time.Minute
return DurationMinutes{Duration: Duration{value: d, presents: true}}
}
// OfMinutesRef creates new optional time.Duration containing provided duration in minutes
func OfMinutesRef(minutes *int) DurationMinutes {
if minutes == nil {
return DurationMinutes{}
}
return OfMinutes(*minutes)
}
// FilterZero applies zero value filtering
// This method will return empty optional if value inside optional is zero or missing
func (d Duration) FilterZero() Duration {
if !d.IsPresent() || d.value.Nanoseconds() == 0 {
return Duration{}
}
return d
}
// DurationSeconds is wrapper over Duration to be used in JSON and SQL mappers
type DurationSeconds struct {
Duration
}
// DurationMillis is wrapper over Duration to be used in JSON and SQL mappers
type DurationMillis struct {
Duration
}
// DurationMinutes is wrapper over Duration to be used in JSON and SQL mappers
type DurationMinutes struct {
Duration
} | duration.go | 0.841337 | 0.486149 | duration.go | starcoder |
package main
import (
"github.com/ByteArena/box2d"
"github.com/wdevore/RangerGo/api"
"github.com/wdevore/RangerGo/engine/nodes"
"github.com/wdevore/RangerGo/engine/nodes/custom"
"github.com/wdevore/RangerGo/engine/rendering"
)
type gameLayer struct {
nodes.Node
textColor api.IPalette
circleNode api.INode
groundLineNode api.INode
// Box 2D system
b2Gravity box2d.B2Vec2
b2World box2d.B2World
b2CircleBody *box2d.B2Body
b2GroundBody *box2d.B2Body
}
func newBasicGameLayer(name string) api.INode {
o := new(gameLayer)
o.Initialize(name)
return o
}
func (g *gameLayer) Build(world api.IWorld) {
vw, vh := world.ViewSize().Components()
x := -vw / 2.0
y := -vh / 2.0
g.textColor = rendering.NewPaletteInt64(rendering.White)
cb := custom.NewCheckBoardNode("CheckerBoard", world, g)
cbr := cb.(*custom.CheckerBoardNode)
cbr.Configure(25.0)
hLine := custom.NewLineNode("HLine", world, g)
n := hLine.(*custom.LineNode)
n.SetColor(rendering.NewPaletteInt64(rendering.LightPurple))
n.SetPoints(x, 0.0, -x, 0.0)
vLine := custom.NewLineNode("VLine", world, g)
n = vLine.(*custom.LineNode)
n.SetColor(rendering.NewPaletteInt64(rendering.LightPurple))
n.SetPoints(0.0, -y, 0.0, y)
// -------------------------------------------
// Visuals for Box2D
g.circleNode = NewCircleNode("Orange Circle", world, g)
gr := g.circleNode.(*CircleNode)
gr.Configure(6, 1.0)
gr.SetColor(rendering.NewPaletteInt64(rendering.Orange))
gr.SetScale(3.0)
gr.SetPosition(100.0, -100.0)
g.groundLineNode = custom.NewLineNode("Ground", world, g)
gln := g.groundLineNode.(*custom.LineNode)
gln.SetColor(rendering.NewPaletteInt64(rendering.White))
gln.SetPoints(-1.0, 0.0, 1.0, 0.0) // Set by unit coordinates
gln.SetPosition(76.0+50.0, 0.0)
gln.SetScale(25.0)
t := custom.NewRasterTextNode("RasterText", world, g)
tr := t.(*custom.RasterTextNode)
tr.SetFontScale(2)
tr.SetFill(2)
tr.SetText("Press key to reset.")
tr.SetPosition(50.0, 50.0)
tr.SetColor(rendering.NewPaletteInt64(rendering.White))
buildPhysicsWorld(g)
}
// --------------------------------------------------------
// Timing
// --------------------------------------------------------
func (g *gameLayer) Update(msPerUpdate, secPerUpdate float64) {
// Box2D expects a fractional number of dt not ms/frame which is
// why I use secPerUpdate.
// Instruct the world to perform a single step of simulation.
// It is generally best to keep the time step and iterations fixed.
g.b2World.Step(secPerUpdate, api.VelocityIterations, api.PositionIterations)
if g.b2CircleBody.IsActive() {
pos := g.b2CircleBody.GetPosition()
g.circleNode.SetPosition(pos.X, pos.Y)
rot := g.b2CircleBody.GetAngle()
g.circleNode.SetRotation(rot)
}
}
// -----------------------------------------------------
// Node lifecycles
// -----------------------------------------------------
// EnterNode called when a node is entering the stage
func (g *gameLayer) EnterNode(man api.INodeManager) {
man.RegisterTarget(g)
// Register for IO events so we can detect keyboard clicks
man.RegisterEventTarget(g)
}
// ExitNode called when a node is exiting stage
func (g *gameLayer) ExitNode(man api.INodeManager) {
man.UnRegisterTarget(g)
man.UnRegisterEventTarget(g)
g.b2World.Destroy()
}
// -----------------------------------------------------
// IO events
// -----------------------------------------------------
func (g *gameLayer) Handle(event api.IEvent) bool {
if event.GetType() == api.IOTypeKeyboard {
if event.GetState() == 1 {
// Reset node and body properties
x := 100.0
y := -100.0
g.circleNode.SetPosition(x, y)
g.b2CircleBody.SetTransform(box2d.MakeB2Vec2(x, y), 0.0)
g.b2CircleBody.SetLinearVelocity(box2d.MakeB2Vec2(0.0, 0.0))
g.b2CircleBody.SetAngularVelocity(0.0)
}
}
return false
}
// -----------------------------------------------------
// Misc private
// -----------------------------------------------------
func buildPhysicsWorld(g *gameLayer) {
// --------------------------------------------
// Box 2d configuration
// --------------------------------------------
// Define the gravity vector.
// Ranger's coordinate space is defined as:
// .--------> +X
// |
// |
// |
// v +Y
// Thus gravity is specified as positive for downward motion.
g.b2Gravity = box2d.MakeB2Vec2(0.0, 9.8)
// Construct a world object, which will hold and simulate the rigid bodies.
g.b2World = box2d.MakeB2World(g.b2Gravity)
// -------------------------------------------
// A body def used to create bodies
bDef := box2d.MakeB2BodyDef()
bDef.Type = box2d.B2BodyType.B2_dynamicBody
bDef.Position.Set(g.circleNode.Position().X(), g.circleNode.Position().Y())
// An instance of a body to contain Fixtures
g.b2CircleBody = g.b2World.CreateBody(&bDef)
// Every Fixture has a shape
circleShape := box2d.MakeB2CircleShape()
circleShape.M_p.Set(0.0, 0.0) // Relative to body position
circleShape.M_radius = g.circleNode.Scale()
fd := box2d.MakeB2FixtureDef()
fd.Shape = &circleShape
fd.Density = 1.0
g.b2CircleBody.CreateFixtureFromDef(&fd) // attach Fixture to body
// -------------------------------------------
// The Ground = body + fixture + shape
bDef.Type = box2d.B2BodyType.B2_staticBody
bDef.Position.Set(g.groundLineNode.Position().X(), g.groundLineNode.Position().Y())
g.b2GroundBody = g.b2World.CreateBody(&bDef)
groundShape := box2d.MakeB2EdgeShape()
groundShape.Set(box2d.MakeB2Vec2(-g.groundLineNode.Scale(), 0.0), box2d.MakeB2Vec2(g.groundLineNode.Scale(), 0.0))
fDef := box2d.MakeB2FixtureDef()
fDef.Shape = &groundShape
fDef.Density = 1.0
g.b2GroundBody.CreateFixtureFromDef(&fDef) // attach Fixture to body
} | examples/physics/basics/ground/basic_game_layer.go | 0.615435 | 0.408808 | basic_game_layer.go | starcoder |
package newhope
import (
"encoding/binary"
//"git.schwanenlied.me/yawning/chacha20.git"
"github.com/Yawning/chacha20"
"golang.org/x/crypto/sha3"
)
const (
// PolyBytes is the length of an encoded polynomial in bytes.
PolyBytes = 1792
shake128Rate = 168 // Stupid that this isn't exposed.
)
type poly struct {
coeffs [paramN]uint16
}
func (p *poly) reset() {
for i := range p.coeffs {
p.coeffs[i] = 0
}
}
func (p *poly) fromBytes(a []byte) {
for i := 0; i < paramN/4; i++ {
p.coeffs[4*i+0] = uint16(a[7*i+0]) | ((uint16(a[7*i+1]) & 0x3f) << 8)
p.coeffs[4*i+1] = (uint16(a[7*i+1]) >> 6) | (uint16(a[7*i+2]) << 2) | ((uint16(a[7*i+3]) & 0x0f) << 10)
p.coeffs[4*i+2] = (uint16(a[7*i+3]) >> 4) | (uint16(a[7*i+4]) << 4) | ((uint16(a[7*i+5]) & 0x03) << 12)
p.coeffs[4*i+3] = (uint16(a[7*i+5]) >> 2) | (uint16(a[7*i+6]) << 6)
}
}
func (p *poly) toBytes(r []byte) {
for i := 0; i < paramN/4; i++ {
// Make sure that coefficients have only 14 bits.
t0 := barrettReduce(p.coeffs[4*i+0])
t1 := barrettReduce(p.coeffs[4*i+1])
t2 := barrettReduce(p.coeffs[4*i+2])
t3 := barrettReduce(p.coeffs[4*i+3])
// Make sure that coefficients are in [0,q]
m := t0 - paramQ
c := int16(m)
c >>= 15
t0 = m ^ ((t0 ^ m) & uint16(c))
m = t1 - paramQ
c = int16(m)
c >>= 15
t1 = m ^ ((t1 ^ m) & uint16(c))
m = t2 - paramQ
c = int16(m)
c >>= 15
t2 = m ^ ((t2 ^ m) & uint16(c))
m = t3 - paramQ
c = int16(m)
c >>= 15
t3 = m ^ ((t3 ^ m) & uint16(c))
r[7*i+0] = byte(t0 & 0xff)
r[7*i+1] = byte(t0>>8) | byte(t1<<6)
r[7*i+2] = byte(t1 >> 2)
r[7*i+3] = byte(t1>>10) | byte(t2<<4)
r[7*i+4] = byte(t2 >> 4)
r[7*i+5] = byte(t2>>12) | byte(t3<<2)
r[7*i+6] = byte(t3 >> 6)
}
}
func (p *poly) discardTo(xbuf []byte) bool {
var x [shake128Rate * 16 / 2]uint16
for i := range x {
x[i] = binary.LittleEndian.Uint16(xbuf[i*2:])
}
for i := 0; i < 16; i++ {
batcher84(x[i:])
}
// Check whether we're safe:
r := uint16(0)
for i := 1000; i < 1024; i++ {
r |= 61444 - x[i]
}
if r>>31 != 0 {
return true
}
// If we are, copy coefficients to polynomial:
for i := range p.coeffs {
p.coeffs[i] = x[i]
}
return false
}
func (p *poly) uniform(seed *[SeedBytes]byte, torSampling bool) {
if !torSampling {
// Reference version, vartime.
nBlocks := 14
var buf [shake128Rate * 14]byte
// h and buf are left unscrubbed because the output is public.
h := sha3.NewShake128()
h.Write(seed[:])
h.Read(buf[:])
for ctr, pos := 0, 0; ctr < paramN; {
val := binary.LittleEndian.Uint16(buf[pos:])
if val < 5*paramQ {
p.coeffs[ctr] = val
ctr++
}
pos += 2
if pos > shake128Rate*nBlocks-2 {
nBlocks = 1
h.Read(buf[:shake128Rate])
pos = 0
}
}
} else {
// `torref` version, every valid `a` is generate in constant time,
// though the number of attempts varies.
const nBlocks = 16
var buf [shake128Rate * nBlocks]byte
// h and buf are left unscrubbed because the output is public.
h := sha3.NewShake128()
h.Write(seed[:])
for {
h.Read(buf[:])
if !p.discardTo(buf[:]) {
break
}
}
}
}
func (p *poly) getNoise(seed *[SeedBytes]byte, nonce byte) {
// The `ref` code uses a uint32 vector instead of a byte vector,
// but converting between the two in Go is cumbersome.
var buf [4 * paramN]byte
var n [8]byte
n[0] = nonce
stream, err := chacha20.NewCipher(seed[:], n[:])
if err != nil {
panic(err)
}
stream.KeyStream(buf[:])
stream.Reset()
for i := 0; i < paramN; i++ {
t := binary.LittleEndian.Uint32(buf[4*i:])
d := uint32(0)
for j := uint(0); j < 8; j++ {
d += (t >> j) & 0x01010101
}
a := ((d >> 8) & 0xff) + (d & 0xff)
b := (d >> 24) + ((d >> 16) & 0xff)
p.coeffs[i] = uint16(a) + paramQ - uint16(b)
}
// Scrub the random bits...
memwipe(buf[:])
}
func (p *poly) pointwise(a, b *poly) {
for i := range p.coeffs {
t := montgomeryReduce(3186 * uint32(b.coeffs[i])) // t is now in Montgomery domain
p.coeffs[i] = montgomeryReduce(uint32(a.coeffs[i]) * uint32(t)) // p.coeffs[i] is back in normal domain
}
}
func (p *poly) add(a, b *poly) {
for i := range p.coeffs {
p.coeffs[i] = barrettReduce(a.coeffs[i] + b.coeffs[i])
}
}
func (p *poly) ntt() {
p.mulCoefficients(&psisBitrevMontgomery)
ntt(&p.coeffs, &omegasMontgomery)
}
func (p *poly) invNtt() {
p.bitrev()
ntt(&p.coeffs, &omegasInvMontgomery)
p.mulCoefficients(&psisInvMontgomery)
}
func init() {
if paramK != 16 {
panic("poly.getNoise() only supports k=16")
}
} | poly.go | 0.514156 | 0.413063 | poly.go | starcoder |
package world
import (
"fmt"
"github.com/df-mc/dragonfly/server/block/cube"
"github.com/go-gl/mathgl/mgl64"
"math"
)
// ChunkPos holds the position of a chunk. The type is provided as a utility struct for keeping track of a
// chunk's position. Chunks do not themselves keep track of that. Chunk positions are different from block
// positions in the way that increasing the X/Z by one means increasing the absolute value on the X/Z axis in
// terms of blocks by 16.
type ChunkPos [2]int32
// String implements fmt.Stringer and returns (x, z).
func (p ChunkPos) String() string {
return fmt.Sprintf("(%v, %v)", p[0], p[1])
}
// X returns the X coordinate of the chunk position.
func (p ChunkPos) X() int32 {
return p[0]
}
// Z returns the Z coordinate of the chunk position.
func (p ChunkPos) Z() int32 {
return p[1]
}
// SubChunkPos holds the position of a sub-chunk. The type is provided as a utility struct for keeping track of a
// sub-chunk's position. Sub-chunks do not themselves keep track of that. Sub-chunk positions are different from
// block positions in the way that increasing the X/Y/Z by one means increasing the absolute value on the X/Y/Z axis in
// terms of blocks by 16.
type SubChunkPos [3]int32
// String implements fmt.Stringer and returns (x, y, z).
func (p SubChunkPos) String() string {
return fmt.Sprintf("(%v, %v, %v)", p[0], p[1], p[2])
}
// X returns the X coordinate of the sub-chunk position.
func (p SubChunkPos) X() int32 {
return p[0]
}
// Y returns the Y coordinate of the sub-chunk position.
func (p SubChunkPos) Y() int32 {
return p[1]
}
// Z returns the Z coordinate of the sub-chunk position.
func (p SubChunkPos) Z() int32 {
return p[2]
}
// blockPosFromNBT returns a position from the X, Y and Z components stored in the NBT data map passed. The
// map is assumed to have an 'x', 'y' and 'z' key.
func blockPosFromNBT(data map[string]any) cube.Pos {
x, _ := data["x"].(int32)
y, _ := data["y"].(int32)
z, _ := data["z"].(int32)
return cube.Pos{int(x), int(y), int(z)}
}
// chunkPosFromVec3 returns a chunk position from the Vec3 passed. The coordinates of the chunk position are
// those of the Vec3 divided by 16, then rounded down.
func chunkPosFromVec3(vec3 mgl64.Vec3) ChunkPos {
return ChunkPos{
int32(math.Floor(vec3[0])) >> 4,
int32(math.Floor(vec3[2])) >> 4,
}
}
// chunkPosFromBlockPos returns the ChunkPos of the chunk that a block at a cube.Pos is in.
func chunkPosFromBlockPos(p cube.Pos) ChunkPos {
return ChunkPos{int32(p[0] >> 4), int32(p[2] >> 4)}
} | server/world/position.go | 0.878432 | 0.616907 | position.go | starcoder |
package nexus
// Alignment is a collection of equal length sequences
type Alignment []string
// Column is the letters from each internal sequence at position p
func (aln Alignment) Column(p uint) []byte {
pos := make([]byte, aln.NSeq())
for i := uint(0); i < aln.NSeq(); i++ {
pos[i] = aln.Seq(i)[p]
}
return pos
}
// NSeq is the number of sequences in the alignment
// Note: len(alignment) == alignment.NSeq()
func (aln Alignment) NSeq() uint {
return uint(len(aln))
}
// Seq returns the i-th sequence in the alignment
func (aln Alignment) Seq(i uint) string {
return aln[i]
}
// Subseq creates a slice from the original alignment
// An argument out of bounds is interpreted as ultimate start or end of alignment, relatively
func (aln Alignment) Subseq(s, e int) Alignment {
subseqs := make(Alignment, aln.NSeq())
for i, seq := range aln {
switch {
case 0 <= s && s < aln.Len() && 0 <= e && e < aln.Len(): // Defined start to defined end
subseqs[i] = seq[s:e]
case 0 <= s && s < aln.Len() && (e < 0 || aln.Len() <= e): // Defined start to ultimate end
subseqs[i] = seq[s:]
case (s < 0 || aln.Len() <= s) && 0 <= e && e < aln.Len(): // Ultimate start to defined end
subseqs[i] = seq[:e]
default:
subseqs[i] = seq[:] // Whole alignment
}
}
return subseqs
}
// String is al sequences with a newline after each sequence
func (aln Alignment) String() string {
str := ""
for i := range aln {
str += aln[i] + "\n"
}
return str
}
// Len is the length of the alignment
// Note that len(alignment) != alignment.Len(), the former equals alignment.NSeq()
func (aln Alignment) Len() (length int) {
for i := range aln {
if length < len(aln[i]) {
length = len(aln[i])
}
}
return
}
// Count returns the number of times each base in a set is found
func (aln Alignment) Count(bases []byte) map[byte]int {
counts := make(map[byte]int)
for _, b := range bases {
counts[b] = 0
}
for _, seq := range aln {
for _, char := range seq {
counts[byte(char)]++
}
}
return counts
}
// Frequency returns the normalized frequency of each base in a set
// Note that bases that exist in the Alignment, but not in the set are ignored
func (aln Alignment) Frequency(bases []byte) map[byte]float64 {
freqs := make(map[byte]float64, len(bases))
for _, b := range bases {
freqs[b] = 0.0
}
baseCounts := aln.Count(bases)
sumCounts := 0.0
for _, count := range baseCounts {
sumCounts += float64(count)
}
if sumCounts == 0 {
sumCounts = 1.0
}
for char, count := range baseCounts {
freqs[char] = float64(count) / sumCounts
}
return freqs
} | internal/nexus/alignment.go | 0.777933 | 0.487185 | alignment.go | starcoder |
package leb128
import (
"math/bits"
)
// Based on the explanation here: https://en.wikipedia.org/wiki/LEB128.
// UnsignedEncode encodes an uint64 to LEB128 encoded byte array
func UnsignedEncode(value uint64) []byte {
if value == 0 { // Special case
return []byte{0x00}
}
var enc []byte
for value > 0 {
bits := byte(value & 0x7F) // Extract the last 7 bits
if value>>7 > 0 { // Add high 1 bits on all but last
bits |= 0x80
}
enc = append(enc, bits)
value >>= 7 // Shift value right by 7 bits
}
return enc
}
// UnsignedDecode decodes a LEB128 encoded byte array back to uint64
func UnsignedDecode(enc []byte) uint64 {
if len(enc) > 8 { // We expect a 64 bit unsigned number
panic("Error decoding byte array. Cannot fit in uint64.")
}
if len(enc) == 1 && enc[0] == 0x00 { // Special case
return 0
}
var dec uint64
for i := len(enc) - 1; i >= 0; i-- {
bits := enc[i] & 0x7F // Extract the last 7 bits
dec = (dec << 7) | uint64(bits)
}
return dec
}
// SignedEncode encodes an int64 to LEB128 encoded byte array
func SignedEncode(value int64) []byte {
if value >= 0 {
return UnsignedEncode(uint64(value))
}
// Manually try to convert to 2's complement
uvalue := uint64(-value)
bitLength := bits.Len64(uvalue)
i := 7
for {
if i > bitLength {
bitLength = i
break
}
i += 7
}
uvalue = ^uvalue
uvalue++
var enc []byte
for i = 7; i <= bitLength; i += 7 {
bits := byte(uvalue & 0x7F)
// Add high 1 bits on all but last
if i == bitLength {
bits |= 0x00
} else {
bits |= 0x80
}
enc = append(enc, bits)
uvalue >>= 7
}
return enc
}
// SignedDecode decodes a LEB128 encoded byte array back to int64
func SignedDecode(enc []byte) int64 {
if len(enc) > 8 {
panic("Error decoding byte array. Cannot fit in int64.")
}
// Use UnsignedDecode for positive numbers
if enc[len(enc)-1]&0x40 != 0x40 {
return int64(UnsignedDecode(enc))
}
var dec uint64
for i := len(enc) - 1; i >= 0; i-- {
bits := enc[i] & 0x7F
dec = (dec << 7) | uint64(bits)
}
// Convert from 2's complement
bitLength := bits.Len64(dec)
dec = ^dec
dec = (dec << (64 - bitLength)) >> (64 - bitLength)
dec++
return int64(-dec)
} | leb128.go | 0.728459 | 0.491395 | leb128.go | starcoder |
package pgs
// Node represents any member of the proto descriptor AST. Typically, the
// highest level Node is the Package.
type Node interface {
accept(Visitor) error
}
// A Visitor exposes methods to walk an AST Node and its children in a depth-
// first manner. If the returned Visitor v is non-nil, it will be used to
// descend into the children of the current node. If nil, those children will
// be skipped. Any error returned will immediately halt execution.
type Visitor interface {
VisitPackage(Package) (v Visitor, err error)
VisitFile(File) (v Visitor, err error)
VisitMessage(Message) (v Visitor, err error)
VisitEnum(Enum) (v Visitor, err error)
VisitEnumValue(EnumValue) (v Visitor, err error)
VisitField(Field) (v Visitor, err error)
VisitOneOf(OneOf) (v Visitor, err error)
VisitService(Service) (v Visitor, err error)
VisitMethod(Method) (v Visitor, err error)
}
// Walk applies a depth-first visitor pattern with v against Node n.
func Walk(v Visitor, n Node) error { return n.accept(v) }
type nilVisitor struct{}
// NilVisitor returns a Visitor that always responds with (nil, nil) for all
// methods. This is useful as an anonymous embedded struct to satisfy the
// Visitor interface for implementations that don't require visiting every Node
// type. NilVisitor should be used over PassThroughVisitor if short-circuiting
// behavior is desired.
func NilVisitor() Visitor { return nilVisitor{} }
func (nv nilVisitor) VisitPackage(p Package) (v Visitor, err error) { return nil, nil }
func (nv nilVisitor) VisitFile(f File) (v Visitor, err error) { return nil, nil }
func (nv nilVisitor) VisitMessage(m Message) (v Visitor, err error) { return nil, nil }
func (nv nilVisitor) VisitEnum(e Enum) (v Visitor, err error) { return nil, nil }
func (nv nilVisitor) VisitEnumValue(e EnumValue) (v Visitor, err error) { return nil, nil }
func (nv nilVisitor) VisitField(f Field) (v Visitor, err error) { return nil, nil }
func (nv nilVisitor) VisitOneOf(o OneOf) (v Visitor, err error) { return nil, nil }
func (nv nilVisitor) VisitService(s Service) (v Visitor, err error) { return nil, nil }
func (nv nilVisitor) VisitMethod(m Method) (v Visitor, err error) { return nil, nil }
var _ Visitor = nilVisitor{}
type passVisitor struct {
v Visitor
}
// PassThroughVisitor returns a Visitor that always responds with (v, nil) for
// all methods. This is useful as an anonymous embedded struct to satisfy the
// Visitor interface for implementations that need access to deep child nodes
// (eg, EnumValue, Field, Method) without implementing each method of the
// interface explicitly.
func PassThroughVisitor(v Visitor) Visitor { return passVisitor{v: v} }
func (pv passVisitor) VisitPackage(Package) (v Visitor, err error) { return pv.v, nil }
func (pv passVisitor) VisitFile(File) (v Visitor, err error) { return pv.v, nil }
func (pv passVisitor) VisitMessage(Message) (v Visitor, err error) { return pv.v, nil }
func (pv passVisitor) VisitEnum(Enum) (v Visitor, err error) { return pv.v, nil }
func (pv passVisitor) VisitEnumValue(EnumValue) (v Visitor, err error) { return pv.v, nil }
func (pv passVisitor) VisitField(Field) (v Visitor, err error) { return pv.v, nil }
func (pv passVisitor) VisitOneOf(OneOf) (v Visitor, err error) { return pv.v, nil }
func (pv passVisitor) VisitService(Service) (v Visitor, err error) { return pv.v, nil }
func (pv passVisitor) VisitMethod(Method) (v Visitor, err error) { return pv.v, nil }
var (
_ Visitor = nilVisitor{}
_ Visitor = passVisitor{}
) | node.go | 0.803328 | 0.434281 | node.go | starcoder |
package iso20022
// Provides the additional information for an NDF as supplied on a fixing instruction.
type FixingConditions1 struct {
// The date on which the trade was executed.
TradeDate *ISODate `xml:"TradDt"`
// Represents the original reference of the instruction for which the status is given, as assigned by the participant that submitted the foreign exchange trade.
OriginatorReference *Max35Text `xml:"OrgtrRef"`
// Reference common to both parties of the trade.
CommonReference *Max35Text `xml:"CmonRef,omitempty"`
// Reference to the identification of a previous event in the life of a trade which is amended or cancelled.
RelatedReference *Max35Text `xml:"RltdRef,omitempty"`
// Currency and amount bought in a foreign exchange trade.
TradingSideBuyAmount *ActiveOrHistoricCurrencyAndAmount `xml:"TradgSdBuyAmt"`
// Currency and amount sold in a foreign exchange trade.
TradingSideSellAmount *ActiveOrHistoricCurrencyAndAmount `xml:"TradgSdSellAmt"`
// The value of one currency expressed in relation to another currency. ExchangeRate expresses the ratio between UnitCurrency and QuotedCurrency (ExchangeRate = UnitCurrency/QuotedCurrency).
ExchangeRate *BaseOneRate `xml:"XchgRate"`
}
func (f *FixingConditions1) SetTradeDate(value string) {
f.TradeDate = (*ISODate)(&value)
}
func (f *FixingConditions1) SetOriginatorReference(value string) {
f.OriginatorReference = (*Max35Text)(&value)
}
func (f *FixingConditions1) SetCommonReference(value string) {
f.CommonReference = (*Max35Text)(&value)
}
func (f *FixingConditions1) SetRelatedReference(value string) {
f.RelatedReference = (*Max35Text)(&value)
}
func (f *FixingConditions1) SetTradingSideBuyAmount(value, currency string) {
f.TradingSideBuyAmount = NewActiveOrHistoricCurrencyAndAmount(value, currency)
}
func (f *FixingConditions1) SetTradingSideSellAmount(value, currency string) {
f.TradingSideSellAmount = NewActiveOrHistoricCurrencyAndAmount(value, currency)
}
func (f *FixingConditions1) SetExchangeRate(value string) {
f.ExchangeRate = (*BaseOneRate)(&value)
} | FixingConditions1.go | 0.839635 | 0.471406 | FixingConditions1.go | starcoder |
package unit
import (
"fmt"
"math"
)
type Time float64
const (
Nanosecond Time = 1
Microsecond = Nanosecond * 1000
Millisecond = Microsecond * 1000
Second = Millisecond * 1000
Minute = Second * 60
Hour = Minute * 60
Day = Hour * 24
Week = Day * 7
Year = Day * 365
)
var timeUnitTable = map[Time]func(value Time) string{
Nanosecond: func(value Time) string { return "ns" },
Microsecond: func(value Time) string { return "µs" },
Millisecond: func(value Time) string { return "ms" },
Second: func(value Time) string { return "sec" },
Minute: func(value Time) string { return "min" },
Hour: func(value Time) string { return declensions(float64(value), "hour", "hours", "hours") },
Day: func(value Time) string { return declensions(float64(value), "day", "days", "days") },
Week: func(value Time) string { return declensions(float64(value), "week", "weeks", "weeks") },
Year: func(value Time) string { return declensions(float64(value), "year", "years", "years") },
}
func Nanoseconds(value float64) string {
return formatTimeValue(value, Nanosecond)
}
func Microseconds(value float64) string {
return formatTimeValue(value, Microsecond)
}
func Milliseconds(value float64) string {
return formatTimeValue(value, Millisecond)
}
func Seconds(value float64) string {
return formatTimeValue(value, Second)
}
func formatTimeValue(value float64, time Time) string {
ns := Time(math.Abs(value * float64(time)))
switch {
case ns < Microsecond:
return formatValueWithUnit(ns, Nanosecond, 3)
case ns < Millisecond:
return formatValueWithUnit(ns, Microsecond, 3)
case ns < Second:
return formatValueWithUnit(ns, Millisecond, 3)
case ns < Minute:
return formatValueWithUnit(ns, Second, 3)
case ns < Hour:
return formatValueWithUnit(ns, Minute, 2)
case ns < Day:
return formatValueWithUnit(ns, Hour, 2)
case ns < Week:
return formatValueWithUnit(ns, Day, 2)
case ns < Year:
return formatValueWithUnit(ns, Week, 2)
default:
return formatValueWithUnit(ns, Year, 2)
}
}
func formatValueWithUnit(value Time, unit Time, decimals int) string {
return fmt.Sprintf("%v %s", Round(float64(value/unit), decimals), timeUnitTable[unit](value))
}
func Round(value float64, decimals int) float64 {
multiplier := math.Pow10(decimals)
return math.Round(value*multiplier) / multiplier
} | time.go | 0.825906 | 0.40592 | time.go | starcoder |
package opengl
import "github.com/go-gl/gl/v3.3-core/gl"
// Mesh is a mesh that can be drawn
type Mesh struct {
vertices []float32
indices []uint32
vao uint32
vbo uint32
ebo uint32
Shader *Shader
ownshader bool
}
// MakeMesh creates a mesh with given vertices and an optional shader
func MakeMesh(vertices []float32, indices []uint32, shader *Shader) *Mesh {
mesh := new(Mesh)
mesh.vertices = vertices
mesh.indices = indices
mesh.Shader = shader
if mesh.Shader == nil {
// Fall back to default shader if not specified
mesh.Shader = DefaultShader()
mesh.ownshader = true // Since we use a custom shader instance we should also clear it
}
// Generate vertex array object
gl.GenVertexArrays(1, &mesh.vao)
gl.BindVertexArray(mesh.vao)
// Generate vertex buffer object
gl.GenBuffers(1, &mesh.vbo)
gl.BindBuffer(gl.ARRAY_BUFFER, mesh.vbo)
gl.BufferData(gl.ARRAY_BUFFER, len(vertices)*4, gl.Ptr(vertices), gl.STATIC_DRAW)
// Generate element buffer object
gl.GenBuffers(1, &mesh.ebo)
gl.BindBuffer(gl.ELEMENT_ARRAY_BUFFER, mesh.ebo)
gl.BufferData(gl.ELEMENT_ARRAY_BUFFER, len(indices)*4, gl.Ptr(indices), gl.STATIC_DRAW)
return mesh
}
// Destroy cleans up all used resources from Mesh
func (m *Mesh) Destroy() {
if m.ownshader {
m.Shader.Destroy()
}
gl.BindVertexArray(m.vao)
gl.BindBuffer(gl.ARRAY_BUFFER, 0)
gl.DeleteBuffers(1, &m.vbo)
gl.BindBuffer(gl.ELEMENT_ARRAY_BUFFER, 0)
gl.DeleteBuffers(1, &m.ebo)
gl.BindVertexArray(0)
gl.DeleteVertexArrays(1, &m.vao)
}
// Draw sets the quad's shader and draws it
func (m *Mesh) Draw() {
// Load shader
m.Shader.MustGetProgram() // Make sure it's updated
m.Shader.Use()
// Setup uniforms
m.Shader.BindUniforms()
// Bind VAO and EBO
gl.BindVertexArray(m.vao)
gl.BindBuffer(gl.ELEMENT_ARRAY_BUFFER, m.ebo)
// Draw vertices
gl.DrawElements(gl.TRIANGLES, int32(len(m.indices)), gl.UNSIGNED_INT, nil)
// Unbind VAO and EBO
gl.BindVertexArray(0)
gl.BindBuffer(gl.ELEMENT_ARRAY_BUFFER, 0)
}
var quadVertices = []float32{
-1, -1, 0, 0, 1,
-1, 1, 0, 0, 0,
1, 1, 0, 1, 0,
1, -1, 0, 1, 1,
}
var quadIndices = []uint32{
0, 1, 2,
0, 3, 2,
}
// MakeQuad creates a quad with either a provided shader or a default one
func MakeQuad(shader *Shader) *Mesh {
return MakeMesh(quadVertices, quadIndices, shader)
} | opengl/mesh.go | 0.808408 | 0.423041 | mesh.go | starcoder |
package raytracing
import (
"math"
"math/rand"
)
type Material interface {
scatter(hr hitRecord) (ray, bool)
attenuation() Color
}
type lambertian struct {
albedo Color
}
func NewLambertian(c Color) Material {
return lambertian{albedo: c}
}
func (l lambertian) scatter(hr hitRecord) (ray, bool) {
phi := 2 * math.Pi * rand.Float64()
z := 2*rand.Float64() - 1
r := (1 - z*z)
random := NewVector(r*math.Cos(phi), r*math.Sin(phi), z)
return newRay(hr.point, hr.normal.add(random)), true
}
func (l lambertian) attenuation() Color {
return l.albedo
}
type metal struct {
albedo Color
fuzziness float64
}
func NewMetal(c Color, f float64) Material {
if f <= 0 {
f = 0
}
if f >= 1 {
f = 1
}
return metal{albedo: c, fuzziness: f}
}
func (m metal) scatter(hr hitRecord) (ray, bool) {
f := randomInUnitSphere().mul(m.fuzziness)
reflected := reflect(hr.incident.direction, hr.normal).add(f)
if reflected.dot(hr.normal) < 0 {
return ray{}, false
}
return newRay(hr.point, reflected), true
}
func randomInUnitSphere() Vector {
x, y, z := 0.0, 0.0, 0.0
for true {
x = 2*rand.Float64() - 1
y = 2*rand.Float64() - 1
z = 2*rand.Float64() - 1
if x*x+y*y+z*z < 1 {
break
}
}
return NewVector(x, y, z)
}
func (m metal) attenuation() Color {
return m.albedo
}
type dielectric struct {
refractiveIndex float64
}
func NewDielectric(idx float64) Material {
return dielectric{refractiveIndex: idx}
}
func (d dielectric) scatter(hr hitRecord) (ray, bool) {
var relIdx float64
if hr.incident.direction.dot(hr.normal) < 0 {
relIdx = 1.0 / d.refractiveIndex
} else {
relIdx = d.refractiveIndex
}
in := hr.incident.direction
orthogonal := in.sub(hr.normal.mul(in.dot(hr.normal))).mul(relIdx)
if in.norm() < orthogonal.norm() {
reflected := reflect(hr.incident.direction, hr.normal)
return newRay(hr.point, reflected), true
}
if rand.Float64() < schlick(in, hr.normal, relIdx) {
reflected := reflect(hr.incident.direction, hr.normal)
return newRay(hr.point, reflected), true
}
var parallel Vector
if hr.incident.direction.dot(hr.normal) < 0 {
parallel = hr.normal.mul(-math.Sqrt(in.norm() - orthogonal.norm()))
} else {
parallel = hr.normal.mul(math.Sqrt(in.norm() - orthogonal.norm()))
}
refracted := orthogonal.add(parallel)
return newRay(hr.point, refracted), true
}
func (d dielectric) attenuation() Color {
return NewColor(1, 1, 1)
}
func reflect(in Vector, normal Vector) Vector {
parallel := normal.mul(in.neg().dot(normal))
return in.add(parallel.mul(2))
}
func schlick(in, normal Vector, relIdx float64) float64 {
cos := in.dot(normal) / in.length()
if cos < 0 {
cos = -cos
}
r := (relIdx - 1) / (relIdx + 1)
r0 := r * r
return r0 + (1-r0)*math.Pow(1-cos, 5)
} | material.go | 0.837985 | 0.446736 | material.go | starcoder |
package level
// TileFlag describes simple properties of a map tile.
type TileFlag uint32
// RealWorldFlag describes simple properties of a map tile in the real world.
type RealWorldFlag TileFlag
// CyberspaceFlag describes simple properties of a map tile in cyberspace.
type CyberspaceFlag TileFlag
// ForRealWorld interprets the flag value for the real world.
func (flag TileFlag) ForRealWorld() RealWorldFlag {
return RealWorldFlag(flag)
}
// ForCyberspace interprets the flag value for cyberspace.
func (flag TileFlag) ForCyberspace() CyberspaceFlag {
return CyberspaceFlag(flag)
}
// MusicIndex returns the music identifier. Range: [0..15].
func (flag TileFlag) MusicIndex() int {
return int((flag & 0x0000F000) >> 12)
}
// WithMusicIndex returns a new flag value with the given music index set. Values beyond allowed range are ignored.
func (flag TileFlag) WithMusicIndex(value int) TileFlag {
if (value < 0) || (value > 15) {
return flag
}
return TileFlag(uint32(flag&^0x0000F000) | (uint32(value) << 12))
}
// SlopeControl returns the slope control as per flags.
func (flag TileFlag) SlopeControl() TileSlopeControl {
return TileSlopeControl((flag & 0x00000C00) >> 10)
}
// WithSlopeControl returns a new flag value with the given slope control set.
func (flag TileFlag) WithSlopeControl(ctrl TileSlopeControl) TileFlag {
return TileFlag(uint32(flag&^0x00000C00) | (uint32(ctrl&0x3) << 10))
}
// AsTileFlag returns the flags as regular tile flag value.
func (flag RealWorldFlag) AsTileFlag() TileFlag {
return TileFlag(flag)
}
// WallTextureOffset returns the vertical offset (in tile height units) to apply for wall textures.
func (flag RealWorldFlag) WallTextureOffset() TileHeightUnit {
return TileHeightUnit(flag & 0x0000001F)
}
// WithWallTextureOffset returns a flag value with the given wall texture offset.
func (flag RealWorldFlag) WithWallTextureOffset(value TileHeightUnit) RealWorldFlag {
return RealWorldFlag(uint32(flag&^0x0000001F) | uint32(value&0x1F))
}
// WallTexturePattern returns the pattern to apply for walls.
func (flag RealWorldFlag) WallTexturePattern() WallTexturePattern {
return WallTexturePattern(byte(flag&0x00000060) >> 5)
}
// WithWallTexturePattern returns a flag with the given pattern set.
func (flag RealWorldFlag) WithWallTexturePattern(value WallTexturePattern) RealWorldFlag {
return RealWorldFlag(uint32(flag&^0x00000060) | (uint32(value&0x3) << 5))
}
// UseAdjacentWallTexture returns whether the wall texture from the adjacent tile should be used for each side.
func (flag RealWorldFlag) UseAdjacentWallTexture() bool {
return (flag & 0x00000100) != 0
}
// WithUseAdjacentWallTexture returns a flag with the given usage set.
func (flag RealWorldFlag) WithUseAdjacentWallTexture(value bool) RealWorldFlag {
var valueFlag uint32
if value {
valueFlag = 0x00000100
}
return RealWorldFlag(uint32(flag&^0x00000100) | valueFlag)
}
// Deconstructed returns whether the tile is marked as heavily deconstructed (should play spooky music).
func (flag RealWorldFlag) Deconstructed() bool {
return (flag & 0x00000200) != 0
}
// WithDeconstructed returns a flag with the given deconstruction set.
func (flag RealWorldFlag) WithDeconstructed(value bool) RealWorldFlag {
var valueFlag uint32
if value {
valueFlag = 0x00000200
}
return RealWorldFlag(uint32(flag&^0x00000200) | valueFlag)
}
// FloorShadow returns the floor shadow value. Range: [0..GradesOfShadow].
func (flag RealWorldFlag) FloorShadow() int {
return int((flag & 0x000F0000) >> 16)
}
// WithFloorShadow returns a new flag value with the given floor shadow set. Values beyond allowed range are ignored.
func (flag RealWorldFlag) WithFloorShadow(value int) RealWorldFlag {
if (value < 0) || (value >= GradesOfShadow) {
return flag
}
return RealWorldFlag(uint32(flag&^0x000F0000) | (uint32(value) << 16))
}
// CeilingShadow returns the ceiling shadow value. Range: [0..GradesOfShadow].
func (flag RealWorldFlag) CeilingShadow() int {
return int((flag & 0x0F000000) >> 24)
}
// WithCeilingShadow returns a new flag value with the given ceiling shadow set. Values beyond allowed range are ignored.
func (flag RealWorldFlag) WithCeilingShadow(value int) RealWorldFlag {
if (value < 0) || (value >= GradesOfShadow) {
return flag
}
return RealWorldFlag(uint32(flag&^0x0F000000) | (uint32(value) << 24))
}
// TileVisited returns whether the tile is marked as being visited (seen).
func (flag RealWorldFlag) TileVisited() bool {
return (flag & 0x80000000) != 0
}
// WithTileVisited returns a flag with the given deconstruction set.
func (flag RealWorldFlag) WithTileVisited(value bool) RealWorldFlag {
var valueFlag uint32
if value {
valueFlag = 0x80000000
}
return RealWorldFlag(uint32(flag&^0x80000000) | valueFlag)
}
// AsTileFlag returns the flags as regular tile flag value.
func (flag CyberspaceFlag) AsTileFlag() TileFlag {
return TileFlag(flag)
}
// GameOfLifeState returns the current game-of-life state.
func (flag CyberspaceFlag) GameOfLifeState() int {
return int(byte(flag&0x00000060) >> 5)
}
// WithGameOfLifeState returns a flag with the given game-of-life state set.
func (flag CyberspaceFlag) WithGameOfLifeState(value int) CyberspaceFlag {
return CyberspaceFlag(uint32(flag&^0x00000060) | (uint32(value&0x3) << 5))
}
// FlightPull returns the pull applying in a tile.
func (flag CyberspaceFlag) FlightPull() CyberspaceFlightPull {
return CyberspaceFlightPull((uint32(flag&0x01000000) >> 20) | (uint32(flag&0x000F0000) >> 16))
}
// WithFlightPull returns a flag instance with the given pull applied.
func (flag CyberspaceFlag) WithFlightPull(value CyberspaceFlightPull) CyberspaceFlag {
newFlag := uint32(flag &^ 0x010F0000)
newFlag |= uint32(value&0x0F) << 16
newFlag |= uint32(value&0x10) << 20
return CyberspaceFlag(newFlag)
} | ss1/content/archive/level/TileFlag.go | 0.817028 | 0.6955 | TileFlag.go | starcoder |
package serialization
import (
i "io"
"time"
"github.com/google/uuid"
)
// Defines an interface for serialization of objects to a byte array.
type SerializationWriter interface {
i.Closer
// Writes a String value to the byte array.
// Parameters:
// - key - the key of the value to write (optional).
// - value - the String value to write.
// Returns:
// - An error if any.
WriteStringValue(key string, value *string) error
// Writes a Bool value to the byte array.
// Parameters:
// - key - the key of the value to write (optional).
// - value - the Bool value to write.
// Returns:
// - An error if any.
WriteBoolValue(key string, value *bool) error
// Writes a Int32 value to the byte array.
// Parameters:
// - key - the key of the value to write (optional).
// - value - the Int32 value to write.
// Returns:
// - An error if any.
WriteInt32Value(key string, value *int32) error
// Writes a Int64 value to the byte array.
// Parameters:
// - key - the key of the value to write (optional).
// - value - the Int64 value to write.
// Returns:
// - An error if any.
WriteInt64Value(key string, value *int64) error
// Writes a Float32 value to the byte array.
// Parameters:
// - key - the key of the value to write (optional).
// - value - the Float32 value to write.
// Returns:
// - An error if any.
WriteFloat32Value(key string, value *float32) error
// Writes a Float64 value to the byte array.
// Parameters:
// - key - the key of the value to write (optional).
// - value - the Float64 value to write.
// Returns:
// - An error if any.
WriteFloat64Value(key string, value *float64) error
// Writes a ByteArray value to the byte array.
// Parameters:
// - key - the key of the value to write (optional).
// - value - the ByteArray value to write.
// Returns:
// - An error if any.
WriteByteArrayValue(key string, value []byte) error
// Writes a Time value to the byte array.
// Parameters:
// - key - the key of the value to write (optional).
// - value - the Time value to write.
// Returns:
// - An error if any.
WriteTimeValue(key string, value *time.Time) error
// Writes a UUID value to the byte array.
// Parameters:
// - key - the key of the value to write (optional).
// - value - the UUID value to write.
// Returns:
// - An error if any.
WriteUUIDValue(key string, value *uuid.UUID) error
// Writes a Parsable value to the byte array.
// Parameters:
// - key - the key of the value to write (optional).
// - value - the Parsable value to write.
// Returns:
// - An error if any.
WriteObjectValue(key string, item Parsable) error
// Writes a collection of Parsable values to the byte array.
// Parameters:
// - key - the key of the value to write (optional).
// - collection - the collection of Parsable value to write.
// Returns:
// - An error if any.
WriteCollectionOfObjectValues(key string, collection []Parsable) error
// Writes a collection of String values to the byte array.
// Parameters:
// - key - the key to write (optional).
// - collection - the collection to write.
// Returns:
// - An error if any.
WriteCollectionOfStringValues(key string, collection []string) error
// Writes a collection of Bool values to the byte array.
// Parameters:
// - key - the key to write (optional).
// - collection - the collection to write.
// Returns:
// - An error if any.
WriteCollectionOfBoolValues(key string, collection []bool) error
// Writes a collection of Int32 values to the byte array.
// Parameters:
// - key - the key to write (optional).
// - collection - the collection to write.
// Returns:
// - An error if any.
WriteCollectionOfInt32Values(key string, collection []int32) error
// Writes a collection of Int64 values to the byte array.
// Parameters:
// - key - the key to write (optional).
// - collection - the collection to write.
// Returns:
// - An error if any.
WriteCollectionOfInt64Values(key string, collection []int64) error
// Writes a collection of Float32 values to the byte array.
// Parameters:
// - key - the key to write (optional).
// - collection - the collection to write.
// Returns:
// - An error if any.
WriteCollectionOfFloat32Values(key string, collection []float32) error
// Writes a collection of Float64 values to the byte array.
// Parameters:
// - key - the key to write (optional).
// - collection - the collection to write.
// Returns:
// - An error if any.
WriteCollectionOfFloat64Values(key string, collection []float64) error
// Writes a collection of Time values to the byte array.
// Parameters:
// - key - the key to write (optional).
// - collection - the collection to write.
// Returns:
// - An error if any.
WriteCollectionOfTimeValues(key string, collection []time.Time) error
// Writes a collection of UUID values to the byte array.
// Parameters:
// - key - the key to write (optional).
// - collection - the collection to write.
// Returns:
// - An error if any.
WriteCollectionOfUUIDValues(key string, collection []uuid.UUID) error
// Gets the resulting byte array from the serialization writer.
// Returns:
// - The resulting byte array.
// - An error if any.
GetSerializedContent() ([]byte, error)
// Writes additional data to the byte array.
// Parameters:
// - value - the data to write.
// Returns:
// - An error if any.
WriteAdditionalData(value map[string]interface{}) error
} | abstractions/go/serialization/serialization_writer.go | 0.661704 | 0.45302 | serialization_writer.go | starcoder |
package cnns
import (
"math"
"gonum.org/v1/gonum/mat"
)
// Pool2D Pooling of matrix with defined window: windowSize/stride/pooling_type. See ref. https://en.wikipedia.org/wiki/Convolutional_neural_network#Pooling_layer
/*
matrix - source matrix
outRows - number of output rows
outCols - number of output columns
channels - number of input channels
windowsSize - size of "kernel"
stride - step
ptype - type of pooling (max/min/avg)
returnMasks - return masks ??? (for training mode)
*/
func Pool2D(matrix *mat.Dense, outRows, outCols, channels, windowSize, stride int, ptype poolingType, returnMasks bool) (*mat.Dense, *mat.Dense, [][][2]int) {
sourceR, sourceC := matrix.Dims()
flattenSlice := []float64{}
if !returnMasks {
for c := 0; c < channels; c++ {
partialSlice := make([]float64, outRows*outCols)
tmpMatrix := ExtractChannel(matrix, sourceR, sourceC, channels, c)
for y := 0; y < outRows; y++ {
startYi := y * stride
startYj := startYi + windowSize
pool2D(tmpMatrix, partialSlice, y, startYi, startYj, outCols, windowSize, stride, ptype)
}
flattenSlice = append(flattenSlice, partialSlice...)
}
return mat.NewDense(outRows*channels, outCols, flattenSlice), nil, nil
}
masks := &mat.Dense{}
masksIndices := [][][2]int{}
for c := 0; c < channels; c++ {
partialSlice := make([]float64, outRows*outCols)
tmpMatrix := ExtractChannel(matrix, sourceR, sourceC, channels, c)
tmpR, tmpC := tmpMatrix.Dims()
partialMasks := mat.NewDense(tmpR, tmpC, nil)
partialMasks.Zero()
partialMasksIndices := make([][][2]int, outRows)
for y := 0; y < outRows; y++ {
startYi := y * stride
startYj := startYi + windowSize
pool2DWithMasks(tmpMatrix, partialMasks, partialMasksIndices, partialSlice, y, startYi, startYj, outCols, windowSize, stride, ptype)
}
if masks.IsEmpty() {
masks = partialMasks
} else {
tmp := &mat.Dense{}
tmp.Stack(masks, partialMasks)
masks = tmp
}
flattenSlice = append(flattenSlice, partialSlice...)
masksIndices = append(masksIndices, partialMasksIndices...)
}
return mat.NewDense(outRows*channels, outCols, flattenSlice), masks, masksIndices
}
// pool2D See ref. Pool2D()
func pool2D(matrix *mat.Dense, flattenMatrix []float64, y, startYi, startYj, outCols, windowSize, stride int, ptype poolingType) {
for x := 0; x < outCols; x++ {
startX := x * stride
part := matrix.Slice(startYi, startYj, startX, startX+windowSize)
switch ptype {
case poolMAX:
flattenMatrix[y*outCols+x] = maxPool(part)
break
case poolMIN:
panic("poolMIN is not implemented")
case poolAVG:
panic("poolAVG is not implemented")
default:
panic("default behaviour for pool_%TYPE% is not implemented")
}
}
}
func pool2DWithMasks(matrix, masks *mat.Dense, partialMasksIndices [][][2]int, flattenMatrix []float64, y, startYi, startYj, outCols, windowSize, stride int, ptype poolingType) {
partialMasks := make([][2]int, outCols)
for x := 0; x < outCols; x++ {
startX := x * stride
part := matrix.Slice(startYi, startYj, startX, startX+windowSize).(*mat.Dense)
partMask := masks.Slice(startYi, startYj, startX, startX+windowSize).(*mat.Dense)
switch ptype {
case poolMAX:
maxX, maxY, k := maxPoolIdx(part)
partMask.Set(maxX, maxY, 1)
partialMasks[x] = [2]int{maxX, maxY}
flattenMatrix[y*outCols+x] = k
break
case poolMIN:
panic("poolMIN is not implemented (with masks)")
case poolAVG:
panic("poolAVG is not implemented (with masks)")
default:
panic("default behaviour for pool_%TYPE% is not implemented (with masks)")
}
}
partialMasksIndices[y] = partialMasks
}
func maxPoolIdx(m mat.Matrix) (int, int, float64) {
max := math.Inf(-1)
maxi := -1
maxj := -1
rows, cols := m.Dims()
for x := 0; x < rows; x++ {
for y := 0; y < cols; y++ {
val := m.At(x, y)
if val > max {
max = val
maxi = x
maxj = y
}
}
}
return maxi, maxj, max
}
func maxPool(m mat.Matrix) float64 {
return mat.Max(m)
} | pool_2d.go | 0.736211 | 0.470797 | pool_2d.go | starcoder |
package stl
import (
"bytes"
"encoding/binary"
"fmt"
"github.com/stefanom/peano/geom"
"io"
"io/ioutil"
"strconv"
)
type Model struct {
Header [80]byte
Length int32
Facets []geom.Facet
}
type Parser struct {
r io.Reader
s *Scanner
buf struct {
tok Token // last read token
lit string // last read literal
n int // buffer size (max=1)
}
}
// NewParser returns a new instance of Parser.
func NewParser(r io.Reader) *Parser {
return &Parser{r: r}
}
// Parse the STL file into a Model
func (p *Parser) Parse() (*Model, error) {
data, err := ioutil.ReadAll(p.r)
if err != nil {
panic(err)
}
m := new(Model)
err = binary.Read(bytes.NewBuffer(data[0:80]), binary.LittleEndian, &m.Header)
if err != nil {
return m, err
}
start := string(m.Header[:6])
if start == "solid " {
p.s = NewScanner(bytes.NewReader(data))
m.Facets = make([]geom.Facet, 0)
// First token should be the "solid" keyword.
if tok, lit := p.scanIgnoreWhitespace(); tok != SOLID {
return nil, fmt.Errorf("found %q, expected 'solid'", lit)
}
// Next is the name of the solid, which we ignore.
p.scanIgnoreWhitespace()
// Then we loop over the facets.
for {
// Read a field.
tok, lit := p.scanIgnoreWhitespace()
if tok != FACET && tok != ENDSOLID {
return nil, fmt.Errorf("found %q, expected 'facet' or 'endsolid'", lit)
}
if tok == ENDSOLID {
p.unscan()
break
}
facet := new(geom.Facet)
// Now we read the facet normal.
if tok, lit := p.scanIgnoreWhitespace(); tok != NORMAL {
return nil, fmt.Errorf("found %q, expected 'normal'", lit)
}
normal := new(geom.Vector)
for j := 0; j < 3; j++ {
tok, lit := p.scanIgnoreWhitespace()
if tok != NUMBER {
return nil, fmt.Errorf("found %q, expected number", lit)
}
coordinate, err := strconv.ParseFloat(lit, 32)
if err != nil {
return nil, err
}
normal[j] = float32(coordinate)
}
facet.Normal = *normal
// Now we read the facet vertices.
if tok, lit := p.scanIgnoreWhitespace(); tok != OUTER {
return nil, fmt.Errorf("found %q, expected 'outer'", lit)
}
if tok, lit := p.scanIgnoreWhitespace(); tok != LOOP {
return nil, fmt.Errorf("found %q, expected 'loop'", lit)
}
vectors := make([]geom.Vector, 3)
for i := 0; i < 3; i++ {
if tok, lit := p.scanIgnoreWhitespace(); tok != VERTEX {
return nil, fmt.Errorf("found %q, expected 'vertex'", lit)
}
for j := 0; j < 3; j++ {
tok, lit := p.scanIgnoreWhitespace()
if tok != NUMBER {
return nil, fmt.Errorf("found %q, expected number", lit)
}
coordinate, err := strconv.ParseFloat(lit, 32)
if err != nil {
return nil, err
}
vectors[i][j] = float32(coordinate)
}
}
facet.Vertex1 = vectors[0]
facet.Vertex2 = vectors[1]
facet.Vertex3 = vectors[2]
if tok, lit := p.scanIgnoreWhitespace(); tok != ENDLOOP {
return nil, fmt.Errorf("found %q, expected 'endloop'", lit)
}
if tok, lit := p.scanIgnoreWhitespace(); tok != ENDFACET {
return nil, fmt.Errorf("found %q, expected 'endfacet'", lit)
}
m.Facets = append(m.Facets, *facet)
}
// Next we should see the "FROM" keyword.
if tok, lit := p.scanIgnoreWhitespace(); tok != ENDSOLID {
return nil, fmt.Errorf("found %q, expected 'endsolid'", lit)
}
// The very end is the solid name but we can ignore that.
// Make sure the model length reflects the facets found.
m.Length = int32(len(m.Facets))
// Return the successfully parsed model.
return m, nil
} else {
// Obtain the number of facets this model contains.
err = binary.Read(bytes.NewBuffer(data[80:84]), binary.LittleEndian, &m.Length)
if err != nil {
return m, err
}
// Create the slice of Facets.
m.Facets = make([]geom.Facet, m.Length)
// Read the slice of Facets directly from their binary reprsentation.
err = binary.Read(bytes.NewBuffer(data[84:]), binary.LittleEndian, &m.Facets)
if err != nil {
return m, err
}
}
return m, nil
}
// scanIgnoreWhitespace scans the next non-whitespace token.
func (p *Parser) scanIgnoreWhitespace() (tok Token, lit string) {
tok, lit = p.scan()
if tok == WS {
tok, lit = p.scan()
}
return
}
// scan returns the next token from the underlying scanner.
// If a token has been unscanned then read that instead.
func (p *Parser) scan() (tok Token, lit string) {
// If we have a token on the buffer, then return it.
if p.buf.n != 0 {
p.buf.n = 0
return p.buf.tok, p.buf.lit
}
// Otherwise read the next token from the scanner.
tok, lit = p.s.Scan()
// Save it to the buffer in case we unscan later.
p.buf.tok, p.buf.lit = tok, lit
return
}
// unscan pushes the previously read token back onto the buffer.
func (p *Parser) unscan() { p.buf.n = 1 } | stl/parser.go | 0.676192 | 0.428114 | parser.go | starcoder |
package insulter
import (
"math/rand"
"time"
)
// https://github.com/aimxhaisse/fuu/blob/master/cfuu/dictionnary.json
type insultGender struct {
Prefix, Name, Suffix []string
}
// Insults struct
type Insults struct {
Prefix []string
Man, Woman insultGender
}
// CreateInsultDict generate the insult dict
func CreateInsultDict() Insults {
rand.Seed(time.Now().UnixNano())
maleInsults := insultGender{
Prefix: []string{
"gros", "con de", "moche", "fils de", "ptit",
"bite de", "crouton de", "foutre de", "poil de",
"marchand de", "suceur de", "mangeur de",
"buveur de", "dealer de", "fabriquant de",
"rongeur de", "voleur de",
},
Name: []string{
"raton-laveur", "grumly", "lépreux",
"hamster", "branloman", "<NAME>",
"morbak", "jésus", "jedi", "haricot", "poireau",
"triton", "larve", "chnoque", "sax alto", "urinoir", "tes morts",
},
Suffix: []string{
"vert", "orange", "communiste", "capitaliste",
"immonde", "gaulliste", "jaune", "bleu", "marron",
"sale", "obèse", "infecte", "poilu", "puant",
"végétarien", "pédéraste", "végétatif",
"néandertalien", "de nazareth",
},
}
femaleInsults := insultGender{
Prefix: []string{
"grosse", "conne", "moche", "immonde", "ptite",
"bite de", "crouton de", "foutre de", "poil de",
"marchande de", "suceuse de", "mangeuse de",
"buveuse de", "dealeuse de", "rongeuse de",
"voleuse de",
},
Name: []string{
"conne", "grosse autiste", "vache", "louloute", "tache",
"punaise", "mamie", "pomme-de-terre", "moule",
"raclure", "vieille ordure", "larve", "chnoque", "mamans",
},
Suffix: []string{
"verte", "orange", "communiste", "poilue", "puante",
"vegetarienne", "moche", "neandertalienne",
},
}
return Insults{
Prefix: []string{
"espèce de", "", "putain de",
},
Man: maleInsults,
Woman: femaleInsults,
}
}
// GenerateInsult returns an insult
func (insults *Insults) GenerateInsult() string {
globalPrefix := getRandomFromSliceString(insults.Prefix)
var insultsPerGender insultGender
if gender := getRandomFromSliceString([]string{"man", "woman"}); gender == "man" {
insultsPerGender = insults.Man
} else {
insultsPerGender = insults.Woman
}
prefix := getRandomFromSliceString(insultsPerGender.Prefix)
name := getRandomFromSliceString(insultsPerGender.Name)
suffix := getRandomFromSliceString(insultsPerGender.Suffix)
return globalPrefix + " " + prefix + " " + name + " " + suffix
}
func getRandomFromSliceString(s []string) string {
return s[rand.Intn(len(s))]
} | insulter/main.go | 0.53437 | 0.40204 | main.go | starcoder |
package ciede2000
import (
"image/color"
"math"
)
type LAB struct {
L float64
A float64
B float64
}
func ToXYZ(c color.Color) (float64, float64, float64) {
ta, tg, tb, _ := c.RGBA()
r := float64(ta) / 65535.0
g := float64(tg) / 65535.0
b := float64(tb) / 65535.0
if r > 0.04045 {
r = math.Pow(((r + 0.055) / 1.055), 2.4)
} else {
r = r / 12.92
}
if g > 0.04045 {
g = math.Pow(((g + 0.055) / 1.055), 2.4)
} else {
g = g / 12.92
}
if b > 0.04045 {
b = math.Pow(((b + 0.055) / 1.055), 2.4)
} else {
b = b / 12.92
}
r *= 100
g *= 100
b *= 100
return r*0.4124 + g*0.3576 + b*0.1805, r*0.2126 + g*0.7152 + b*0.0722, r*0.0193 + g*0.1192 + b*0.9505
}
func ToLAB(c color.Color) *LAB {
x, y, z := ToXYZ(c)
x /= 95.047
y /= 100.000
z /= 108.883
if x > 0.008856 {
x = math.Pow(x, (1.0 / 3.0))
} else {
x = (7.787 * x) + (16 / 116)
}
if y > 0.008856 {
y = math.Pow(y, (1.0 / 3.0))
} else {
y = (7.787 * y) + (16 / 116)
}
if z > 0.008856 {
z = math.Pow(z, (1.0 / 3.0))
} else {
z = (7.787 * z) + (16 / 116)
}
l := (116 * y) - 16
a := 500 * (x - y)
b := 200 * (y - z)
if l < 0.0 {
l = 0.0
}
return &LAB{l, a, b}
}
func deg2Rad(deg float64) float64 {
return deg * (math.Pi / 180.0)
}
func rad2Deg(rad float64) float64 {
return (180.0 / math.Pi) * rad
}
func CIEDE2000(lab1, lab2 *LAB) float64 {
/*
* "For these and all other numerical/graphical delta E00 values
* reported in this article, we set the parametric weighting factors
* to unity(i.e., k_L = k_C = k_H = 1.0)." (Page 27).
*/
k_L, k_C, k_H := 1.0, 1.0, 1.0
deg360InRad := deg2Rad(360.0)
deg180InRad := deg2Rad(180.0)
pow25To7 := 6103515625.0 /* pow(25, 7) */
/*
* Step 1
*/
/* Equation 2 */
C1 := math.Sqrt((lab1.A * lab1.A) + (lab1.B * lab1.B))
C2 := math.Sqrt((lab2.A * lab2.A) + (lab2.B * lab2.B))
/* Equation 3 */
barC := (C1 + C2) / 2.0
/* Equation 4 */
G := 0.5 * (1 - math.Sqrt(math.Pow(barC, 7)/(math.Pow(barC, 7)+pow25To7)))
/* Equation 5 */
a1Prime := (1.0 + G) * lab1.A
a2Prime := (1.0 + G) * lab2.A
/* Equation 6 */
CPrime1 := math.Sqrt((a1Prime * a1Prime) + (lab1.B * lab1.B))
CPrime2 := math.Sqrt((a2Prime * a2Prime) + (lab2.B * lab2.B))
/* Equation 7 */
var hPrime1 float64
if lab1.B == 0 && a1Prime == 0 {
hPrime1 = 0.0
} else {
hPrime1 = math.Atan2(lab1.B, a1Prime)
/*
* This must be converted to a hue angle in degrees between 0
* and 360 by addition of 2 to negative hue angles.
*/
if hPrime1 < 0 {
hPrime1 += deg360InRad
}
}
var hPrime2 float64
if lab2.B == 0 && a2Prime == 0 {
hPrime2 = 0.0
} else {
hPrime2 = math.Atan2(lab2.B, a2Prime)
/*
* This must be converted to a hue angle in degrees between 0
* and 360 by addition of 2 to negative hue angles.
*/
if hPrime2 < 0 {
hPrime2 += deg360InRad
}
}
/*
* Step 2
*/
/* Equation 8 */
deltaLPrime := lab2.L - lab1.L
/* Equation 9 */
deltaCPrime := CPrime2 - CPrime1
/* Equation 10 */
var deltahPrime float64
CPrimeProduct := CPrime1 * CPrime2
if CPrimeProduct == 0 {
deltahPrime = 0
} else {
/* Avoid the fabs() call */
deltahPrime = hPrime2 - hPrime1
if deltahPrime < -deg180InRad {
deltahPrime += deg360InRad
} else if deltahPrime > deg180InRad {
deltahPrime -= deg360InRad
}
}
/* Equation 11 */
deltaHPrime := 2.0 * math.Sqrt(CPrimeProduct) * math.Sin(deltahPrime/2.0)
/*
* Step 3
*/
/* Equation 12 */
barLPrime := (lab1.L + lab2.L) / 2.0
/* Equation 13 */
barCPrime := (CPrime1 + CPrime2) / 2.0
/* Equation 14 */
var barhPrime float64
hPrimeSum := hPrime1 + hPrime2
if CPrime1*CPrime2 == 0 {
barhPrime = hPrimeSum
} else {
if math.Abs(hPrime1-hPrime2) <= deg180InRad {
barhPrime = hPrimeSum / 2.0
} else {
if hPrimeSum < deg360InRad {
barhPrime = (hPrimeSum + deg360InRad) / 2.0
} else {
barhPrime = (hPrimeSum - deg360InRad) / 2.0
}
}
}
/* Equation 15 */
T := 1.0 - (0.17 * math.Cos(barhPrime-deg2Rad(30.0))) +
(0.24 * math.Cos(2.0*barhPrime)) +
(0.32 * math.Cos((3.0*barhPrime)+deg2Rad(6.0))) -
(0.20 * math.Cos((4.0*barhPrime)-deg2Rad(63.0)))
/* Equation 16 */
deltaTheta := deg2Rad(30.0) * math.Exp(-math.Pow((barhPrime-deg2Rad(275.0))/deg2Rad(25.0), 2.0))
/* Equation 17 */
R_C := 2.0 * math.Sqrt(math.Pow(barCPrime, 7.0)/(math.Pow(barCPrime, 7.0)+pow25To7))
/* Equation 18 */
S_L := 1 + ((0.015 * math.Pow(barLPrime-50.0, 2.0)) /
math.Sqrt(20+math.Pow(barLPrime-50.0, 2.0)))
/* Equation 19 */
S_C := 1 + (0.045 * barCPrime)
/* Equation 20 */
S_H := 1 + (0.015 * barCPrime * T)
/* Equation 21 */
R_T := (-math.Sin(2.0 * deltaTheta)) * R_C
/* Equation 22 */
return math.Sqrt(
math.Pow(deltaLPrime/(k_L*S_L), 2.0) +
math.Pow(deltaCPrime/(k_C*S_C), 2.0) +
math.Pow(deltaHPrime/(k_H*S_H), 2.0) +
(R_T * (deltaCPrime / (k_C * S_C)) * (deltaHPrime / (k_H * S_H))))
}
func Diff(c1, c2 color.Color) float64 {
return CIEDE2000(ToLAB(c1), ToLAB(c2))
} | vendor/github.com/mattn/go-ciede2000/ciede2000.go | 0.577138 | 0.42925 | ciede2000.go | starcoder |
package texture
import (
"github.com/jphsd/graphics2d"
"github.com/jphsd/graphics2d/util"
"image/color"
"math"
)
// Reflect contains a line along which a reflection is performed. The line defines where the
// mirror is. Points on the + side of the line remain untransformed, points on the other are
// reflected through the transformation.
type Reflect struct {
Src Field
Start []float64
End []float64
Xfm *graphics2d.Aff3
}
// NewReflect creates a new Reflection placing the mirror along lp1, lp2.
func NewReflect(src Field, lp1, lp2 []float64) *Reflect {
xfm := graphics2d.NewAff3()
xfm.Reflect(lp1[0], lp1[1], lp2[0], lp2[1])
return &Reflect{src, lp1, lp2, xfm}
}
// Eval2 implements the Field interface.
func (r *Reflect) Eval2(x, y float64) float64 {
pt := []float64{x, y}
if util.SideOfLine(r.Start, r.End, pt) < 0 {
pt = r.Xfm.Apply(pt)[0]
}
return r.Src.Eval2(pt[0], pt[1])
}
// ReflectVF contains a line along which a reflection is performed. The line defines where the
// mirror is. Points on the + side of the line remain untransformed, points on the other are
// reflected through the transformation.
type ReflectVF struct {
Src VectorField
Start []float64
End []float64
Xfm *graphics2d.Aff3
}
// NewReflect creates a new Reflection placing the mirror along lp1, lp2.
func NewReflectVF(src VectorField, lp1, lp2 []float64) *ReflectVF {
xfm := graphics2d.NewAff3()
xfm.Reflect(lp1[0], lp1[1], lp2[0], lp2[1])
return &ReflectVF{src, lp1, lp2, xfm}
}
// Eval2 implements the Field interface.
func (r *ReflectVF) Eval2(x, y float64) []float64 {
pt := []float64{x, y}
if util.SideOfLine(r.Start, r.End, pt) > 0 {
pt = r.Xfm.Apply(pt)[0]
}
return r.Src.Eval2(pt[0], pt[1])
}
// ReflectCF contains a line along which a reflection is performed. The line defines where the
// mirror is. Points on the + side of the line remain untransformed, points on the other are
// reflected through the transformation.
type ReflectCF struct {
Src ColorField
Start []float64
End []float64
Xfm *graphics2d.Aff3
}
// NewReflect creates a new Reflection placing the mirror along lp1, lp2.
func NewReflectCF(src ColorField, lp1, lp2 []float64) *ReflectCF {
xfm := graphics2d.NewAff3()
xfm.Reflect(lp1[0], lp1[1], lp2[0], lp2[1])
return &ReflectCF{src, lp1, lp2, xfm}
}
// Eval2 implements the Field interface.
func (r *ReflectCF) Eval2(x, y float64) color.Color {
pt := []float64{x, y}
if util.SideOfLine(r.Start, r.End, pt) > 0 {
pt = r.Xfm.Apply(pt)[0]
}
return r.Src.Eval2(pt[0], pt[1])
}
// Kaleidoscope creates a new field by placing n mirrors, evenly spaced, starting at an angle, offs, and
// meeting at point c.
func Kaleidoscope(src Field, c []float64, n int, offs float64) Field {
th := math.Pi / float64(n)
ca := offs
last := src
for i := 0; i < n; i++ {
pt := []float64{c[0] + math.Cos(ca)*10.0, c[1] + math.Sin(ca)*10.0}
ca += th
last = NewReflect(last, c, pt)
}
return last
}
// KaleidoscopeCF creates a new color field by placing n mirrors, evenly spaced, starting at an angle, offs, and
// meeting at point c.
func KaleidoscopeCF(src ColorField, c []float64, n int, offs float64) ColorField {
th := math.Pi / float64(n)
ca := offs
last := src
for i := 0; i < n; i++ {
pt := []float64{c[0] + math.Cos(ca)*10.0, c[1] + math.Sin(ca)*10.0}
ca += th
last = NewReflectCF(last, c, pt)
}
return last
}
// Kaleidoscope2 creates a new field by placing n mirrors, evenly spaced, starting at an angle, offs, and
// meeting at point c. It then places a second set of mirrors at a distance d from c joing the spokes.
func Kaleidoscope2(src Field, c []float64, d float64, n int, offs float64) Field {
th := math.Pi / float64(n)
ca := offs
last := src
pts := make([][]float64, n)
for i := 0; i < n; i++ {
pt := []float64{c[0] + math.Cos(ca)*d, c[1] + math.Sin(ca)*d}
pts[i] = pt
ca += th
last = NewReflect(last, c, pt)
}
prev := pts[0]
for i := 1; i < n; i++ {
cur := pts[i]
last = NewReflect(last, prev, cur)
prev = cur
}
last = NewReflect(last, prev, pts[0])
return last
}
// Kaleidoscope2CF creates a new color field by placing n mirrors, evenly spaced, starting at an angle, offs, and
// meeting at point c. It then places a second set of mirrors at a distance d from c joing the spokes.
func Kaleidoscope2CF(src ColorField, c []float64, d float64, n int, offs float64) ColorField {
th := math.Pi / float64(n)
ca := offs
last := src
pts := make([][]float64, n)
for i := 0; i < n; i++ {
pt := []float64{c[0] + math.Cos(ca)*d, c[1] + math.Sin(ca)*d}
pts[i] = pt
ca += th
last = NewReflectCF(last, c, pt)
}
prev := pts[0]
for i := 1; i < n; i++ {
cur := pts[i]
last = NewReflectCF(last, prev, cur)
prev = cur
}
last = NewReflectCF(last, prev, pts[0])
return last
} | reflect.go | 0.774285 | 0.587085 | reflect.go | starcoder |
package gosort
// heapsort performs an in-place sort of the provided values.
func heapsort(values []int) {
lv := len(values)
// STAGE 1: Re-order elements to satisfy the heap property by creating a max
// heap, where no child has a greater value than its parent.
debug("stage 1: build max heap: %v\n", values)
// Outside loop is O(n), and we can skip first item because it is already
// sorted when in a list by itself.
for i := 1; i < lv; i++ {
debug("\ti: %d\n", i)
// Inside loop is O(log n), bubbling up new value while it is larger
// than its parent, but since these elements are sorted in a heap, it
// only needs to bubble up a max of log n times rather than n.
j := i
v := values[j]
debug("\tj: %v; value: %v\n", j, v)
for j > 0 {
parentIndex := (j - 1) >> 1
parentValue := values[parentIndex]
debug("\tparent index: %v\n\tparent value: %v\n", parentIndex, parentValue)
if v < parentValue {
debug("\tcannot bubble above a larger value\n")
break
}
values[j] = parentValue // move parent value down
j = parentIndex // next iteration look at parent position
}
if j != i {
values[j] = v
}
}
// POST: values is a max-heap where no child has a value greater than its
// parent.
// STAGE 2: The first element in the max heap is the largest item in the
// list. Iteratively take the first element and place it at the end of the
// list, re-balance the heap, and then shrink the list by one element.
debug("stage 2: sort max heap: %v\n", values)
// Outside loop is O(n), where we take the first and largest element in the
// max heap, place it at the end of the list, then use a loop to re-balance
// the list.
for i := lv - 1; i > 0; i-- {
// Swap first and final element.
t := values[i] // save final element
values[i] = values[0] // move largest element into final position
// Find a new home for the saved final element by walking down the max
// heap until it is smaller than some other element.
j := 0
for {
left := j<<1 + 1
if left >= i {
debug("\tleft >= i (%v >= %v)\n", left, i)
break
}
vl := values[left]
right := left + 1
if right < i {
if vr := values[right]; vl < vr {
if vr < t {
debug("\tvr < t (%v < %v)\n", vr, t)
break
}
values[j] = vr
j = right // go right
continue
}
}
if vl < t {
debug("\tvl < t (%v < %v)\n", vl, t)
break
}
values[j] = vl
j = left // go left
}
values[j] = t
}
debug("complete: %v\n", values)
} | heap.go | 0.539469 | 0.441553 | heap.go | starcoder |
package json
import (
"encoding/base64"
"fmt"
"math"
"strconv"
"time"
"unicode/utf8"
"github.com/novln/soba/encoder"
)
// Source forked from https://github.com/uber-go/zap and from https://github.com/rs/zerolog
// For JSON-escaping. See Encoder.safeAddString(string) below.
const hex = "0123456789abcdef"
// AddArray adds the field key with given ArrayMarshaler to the encoder buffer.
func (encoder *Encoder) AddArray(key string, value encoder.ArrayMarshaler) {
encoder.AppendKey(key)
encoder.AppendArray(value)
}
// AddObject adds the field key with given ObjectMarshaler to the encoder buffer.
func (encoder *Encoder) AddObject(key string, value encoder.ObjectMarshaler) {
encoder.AppendKey(key)
encoder.AppendObject(value)
}
// AddObjects adds the field key with given list of ObjectMarshaler to the encoder buffer.
func (encoder *Encoder) AddObjects(key string, values []encoder.ObjectMarshaler) {
encoder.AppendKey(key)
encoder.AppendArrayStart()
for i := range values {
encoder.AppendObject(values[i])
}
encoder.AppendArrayEnd()
}
// AddInt adds the field key with given integer to the encoder buffer.
func (encoder *Encoder) AddInt(key string, value int) {
encoder.AppendKey(key)
encoder.AppendInt(value)
}
// AddInts adds the field key with given list of integer to the encoder buffer.
func (encoder *Encoder) AddInts(key string, values []int) {
encoder.AppendKey(key)
encoder.AppendArrayStart()
for i := range values {
encoder.AppendInt(values[i])
}
encoder.AppendArrayEnd()
}
// AddInt8 adds the field key with given integer to the encoder buffer.
func (encoder *Encoder) AddInt8(key string, value int8) {
encoder.AppendKey(key)
encoder.AppendInt8(value)
}
// AddInt8s adds the field key with given list of integer to the encoder buffer.
func (encoder *Encoder) AddInt8s(key string, values []int8) {
encoder.AppendKey(key)
encoder.AppendArrayStart()
for i := range values {
encoder.AppendInt8(values[i])
}
encoder.AppendArrayEnd()
}
// AddInt16 adds the field key with given integer to the encoder buffer.
func (encoder *Encoder) AddInt16(key string, value int16) {
encoder.AppendKey(key)
encoder.AppendInt16(value)
}
// AddInt16s adds the field key with given list of integer to the encoder buffer.
func (encoder *Encoder) AddInt16s(key string, values []int16) {
encoder.AppendKey(key)
encoder.AppendArrayStart()
for i := range values {
encoder.AppendInt16(values[i])
}
encoder.AppendArrayEnd()
}
// AddInt32 adds the field key with given integer to the encoder buffer.
func (encoder *Encoder) AddInt32(key string, value int32) {
encoder.AppendKey(key)
encoder.AppendInt32(value)
}
// AddInt32s adds the field key with given list of integer to the encoder buffer.
func (encoder *Encoder) AddInt32s(key string, values []int32) {
encoder.AppendKey(key)
encoder.AppendArrayStart()
for i := range values {
encoder.AppendInt32(values[i])
}
encoder.AppendArrayEnd()
}
// AddInt64 adds the field key with given integer to the encoder buffer.
func (encoder *Encoder) AddInt64(key string, value int64) {
encoder.AppendKey(key)
encoder.AppendInt64(value)
}
// AddInt64s adds the field key with given list of integer to the encoder buffer.
func (encoder *Encoder) AddInt64s(key string, values []int64) {
encoder.AppendKey(key)
encoder.AppendArrayStart()
for i := range values {
encoder.AppendInt64(values[i])
}
encoder.AppendArrayEnd()
}
// AddUint adds the field key with given unsigned integer to the encoder buffer.
func (encoder *Encoder) AddUint(key string, value uint) {
encoder.AppendKey(key)
encoder.AppendUint(value)
}
// AddUints adds the field key with given list of unsigned integer to the encoder buffer.
func (encoder *Encoder) AddUints(key string, values []uint) {
encoder.AppendKey(key)
encoder.AppendArrayStart()
for i := range values {
encoder.AppendUint(values[i])
}
encoder.AppendArrayEnd()
}
// AddUint8 adds the field key with given unsigned integer to the encoder buffer.
func (encoder *Encoder) AddUint8(key string, value uint8) {
encoder.AppendKey(key)
encoder.AppendUint8(value)
}
// AddUint8s adds the field key with given list of unsigned integer to the encoder buffer.
func (encoder *Encoder) AddUint8s(key string, values []uint8) {
encoder.AppendKey(key)
encoder.AppendArrayStart()
for i := range values {
encoder.AppendUint8(values[i])
}
encoder.AppendArrayEnd()
}
// AddUint16 adds the field key with given unsigned integer to the encoder buffer.
func (encoder *Encoder) AddUint16(key string, value uint16) {
encoder.AppendKey(key)
encoder.AppendUint16(value)
}
// AddUint16s adds the field key with given list of unsigned integer to the encoder buffer.
func (encoder *Encoder) AddUint16s(key string, values []uint16) {
encoder.AppendKey(key)
encoder.AppendArrayStart()
for i := range values {
encoder.AppendUint16(values[i])
}
encoder.AppendArrayEnd()
}
// AddUint32 adds the field key with given unsigned integer to the encoder buffer.
func (encoder *Encoder) AddUint32(key string, value uint32) {
encoder.AppendKey(key)
encoder.AppendUint32(value)
}
// AddUint32s adds the field key with given list of unsigned integer to the encoder buffer.
func (encoder *Encoder) AddUint32s(key string, values []uint32) {
encoder.AppendKey(key)
encoder.AppendArrayStart()
for i := range values {
encoder.AppendUint32(values[i])
}
encoder.AppendArrayEnd()
}
// AddUint64 adds the field key with given unsigned integer to the encoder buffer.
func (encoder *Encoder) AddUint64(key string, value uint64) {
encoder.AppendKey(key)
encoder.AppendUint64(value)
}
// AddUint64s adds the field key with given list of unsigned integer to the encoder buffer.
func (encoder *Encoder) AddUint64s(key string, values []uint64) {
encoder.AppendKey(key)
encoder.AppendArrayStart()
for i := range values {
encoder.AppendUint64(values[i])
}
encoder.AppendArrayEnd()
}
// AddFloat32 adds the field key with given number to the encoder buffer.
func (encoder *Encoder) AddFloat32(key string, value float32) {
encoder.AppendKey(key)
encoder.AppendFloat32(value)
}
// AddFloat32s adds the field key with given list of number to the encoder buffer.
func (encoder *Encoder) AddFloat32s(key string, values []float32) {
encoder.AppendKey(key)
encoder.AppendArrayStart()
for i := range values {
encoder.AppendFloat32(values[i])
}
encoder.AppendArrayEnd()
}
// AddFloat64 adds the field key with given number to the encoder buffer.
func (encoder *Encoder) AddFloat64(key string, value float64) {
encoder.AppendKey(key)
encoder.AppendFloat64(value)
}
// AddFloat64s adds the field key with given list of number to the encoder buffer.
func (encoder *Encoder) AddFloat64s(key string, values []float64) {
encoder.AppendKey(key)
encoder.AppendArrayStart()
for i := range values {
encoder.AppendFloat64(values[i])
}
encoder.AppendArrayEnd()
}
// AddString adds the field key with given string to the encoder buffer.
func (encoder *Encoder) AddString(key string, value string) {
encoder.AppendKey(key)
encoder.AppendString(value)
}
// AddStrings adds the field key with given list of string to the encoder buffer.
func (encoder *Encoder) AddStrings(key string, values []string) {
encoder.AppendKey(key)
encoder.AppendArrayStart()
for i := range values {
encoder.AppendString(values[i])
}
encoder.AppendArrayEnd()
}
// AddStringer adds the field key with given Stringer to the encoder buffer.
func (encoder *Encoder) AddStringer(key string, value fmt.Stringer) {
encoder.AppendKey(key)
encoder.AppendString(value.String())
}
// AddStringers adds the field key with given list of Stringer to the encoder buffer.
func (encoder *Encoder) AddStringers(key string, values []fmt.Stringer) {
encoder.AppendKey(key)
encoder.AppendArrayStart()
for i := range values {
encoder.AppendString(values[i].String())
}
encoder.AppendArrayEnd()
}
// AddTime adds the field key with given Time to the encoder buffer.
func (encoder *Encoder) AddTime(key string, value time.Time) {
encoder.AppendKey(key)
encoder.AppendTime(value)
}
// AddTimes adds the field key with given list of Time to the encoder buffer.
func (encoder *Encoder) AddTimes(key string, values []time.Time) {
encoder.AppendKey(key)
encoder.AppendArrayStart()
for i := range values {
encoder.AppendTime(values[i])
}
encoder.AppendArrayEnd()
}
// AddDuration adds the field key with given Duration to the encoder buffer.
func (encoder *Encoder) AddDuration(key string, value time.Duration) {
encoder.AppendKey(key)
encoder.AppendDuration(value)
}
// AddDurations adds the field key with given list of Duration to the encoder buffer.
func (encoder *Encoder) AddDurations(key string, values []time.Duration) {
encoder.AppendKey(key)
encoder.AppendArrayStart()
for i := range values {
encoder.AppendDuration(values[i])
}
encoder.AppendArrayEnd()
}
// AddBool adds the field key with given boolean to the encoder buffer.
func (encoder *Encoder) AddBool(key string, value bool) {
encoder.AppendKey(key)
encoder.AppendBool(value)
}
// AddBools adds the field key with given list of boolean to the encoder buffer.
func (encoder *Encoder) AddBools(key string, values []bool) {
encoder.AppendKey(key)
encoder.AppendArrayStart()
for i := range values {
encoder.AppendBool(values[i])
}
encoder.AppendArrayEnd()
}
// AddBinary adds the field key with given buffer or bytes to the encoder buffer.
func (encoder *Encoder) AddBinary(key string, value []byte) {
encoder.AppendKey(key)
encoder.AppendBinary(value)
}
// AddNull adds the field key as a null value to the encoder buffer.
func (encoder *Encoder) AddNull(key string) {
encoder.AppendKey(key)
encoder.AppendNull()
}
// AppendArray converts the input array marshaler and appends the encoded value to the encoder buffer.
func (encoder *Encoder) AppendArray(value encoder.ArrayMarshaler) {
encoder.AppendElementSeparator()
encoder.AppendArrayStart()
value.Encode(encoder)
encoder.AppendArrayEnd()
}
// AppendObject converts the input object marshaler and appends the encoded value to the encoder buffer.
func (encoder *Encoder) AppendObject(value encoder.ObjectMarshaler) {
encoder.AppendElementSeparator()
encoder.AppendBeginMarker()
value.Encode(encoder)
encoder.AppendEndMarker()
}
// AppendInt converts the input integer and appends the encoded value to the encoder buffer.
func (encoder *Encoder) AppendInt(value int) {
encoder.AppendInt64(int64(value))
}
// AppendInt8 converts the input integer and appends the encoded value to the encoder buffer.
func (encoder *Encoder) AppendInt8(value int8) {
encoder.AppendInt64(int64(value))
}
// AppendInt16 converts the input integer and appends the encoded value to the encoder buffer.
func (encoder *Encoder) AppendInt16(value int16) {
encoder.AppendInt64(int64(value))
}
// AppendInt32 converts the input integer and appends the encoded value to the encoder buffer.
func (encoder *Encoder) AppendInt32(value int32) {
encoder.AppendInt64(int64(value))
}
// AppendInt64 converts the input integer and appends the encoded value to the encoder buffer.
func (encoder *Encoder) AppendInt64(value int64) {
encoder.AppendElementSeparator()
encoder.buffer = strconv.AppendInt(encoder.buffer, value, 10)
}
// AppendUint converts the input integer and appends the encoded value to the encoder buffer.
func (encoder *Encoder) AppendUint(value uint) {
encoder.AppendUint64(uint64(value))
}
// AppendUint8 converts the input integer and appends the encoded value to the encoder buffer.
func (encoder *Encoder) AppendUint8(value uint8) {
encoder.AppendUint64(uint64(value))
}
// AppendUint16 converts the input integer and appends the encoded value to the encoder buffer.
func (encoder *Encoder) AppendUint16(value uint16) {
encoder.AppendUint64(uint64(value))
}
// AppendUint32 converts the input integer and appends the encoded value to the encoder buffer.
func (encoder *Encoder) AppendUint32(value uint32) {
encoder.AppendUint64(uint64(value))
}
// AppendUint64 converts the input integer and appends the encoded value to the encoder buffer.
func (encoder *Encoder) AppendUint64(value uint64) {
encoder.AppendElementSeparator()
encoder.buffer = strconv.AppendUint(encoder.buffer, value, 10)
}
// AppendFloat32 converts the input number and appends the encoded value to the encoder buffer.
func (encoder *Encoder) AppendFloat32(value float32) {
encoder.appendFloat(float64(value), 32)
}
// AppendFloat64 converts the input number and appends the encoded value to the encoder buffer.
func (encoder *Encoder) AppendFloat64(value float64) {
encoder.appendFloat(value, 64)
}
// AppendString converts and escapes the input string and appends the encoded value to the encoder buffer.
func (encoder *Encoder) AppendString(value string) {
encoder.AppendElementSeparator()
encoder.buffer = append(encoder.buffer, '"')
encoder.safeAddString(value)
encoder.buffer = append(encoder.buffer, '"')
}
// AppendBool converts the input bool to a string and appends the encoded value to the encoder buffer.
func (encoder *Encoder) AppendBool(value bool) {
encoder.AppendElementSeparator()
encoder.buffer = strconv.AppendBool(encoder.buffer, value)
}
// AppendTime converts the input time to a string and appends the encoded value to the encoder buffer.
func (encoder *Encoder) AppendTime(value time.Time) {
encoder.AppendElementSeparator()
encoder.buffer = append(encoder.buffer, '"')
encoder.safeAddByteString(value.AppendFormat(nil, time.RFC3339Nano))
encoder.buffer = append(encoder.buffer, '"')
}
// AppendDuration converts the input duration to a string and appends the encoded value to the encoder buffer.
func (encoder *Encoder) AppendDuration(value time.Duration) {
encoder.AppendElementSeparator()
encoder.buffer = append(encoder.buffer, '"')
encoder.safeAddString(value.String())
encoder.buffer = append(encoder.buffer, '"')
}
// AppendBinary converts the input buffer or bytes to a string and appends the encoded value to the encoder buffer.
func (encoder *Encoder) AppendBinary(value []byte) {
b64 := base64.StdEncoding
encoder.AppendElementSeparator()
encoder.buffer = append(encoder.buffer, '"')
buffer := make([]byte, b64.EncodedLen(len(value)))
b64.Encode(buffer, value)
encoder.safeAddByteString(buffer)
encoder.buffer = append(encoder.buffer, '"')
}
// AppendNull appends a null value to the encoder buffer.
func (encoder *Encoder) AppendNull() {
encoder.AppendElementSeparator()
encoder.buffer = append(encoder.buffer, 'n', 'u', 'l', 'l')
}
// appendFloat converts a number and appends it to the encoder buffer.
// Since JSON does not permit NaN or Infinity, we make a tradeoff and store those types as string.
func (encoder *Encoder) appendFloat(value float64, size int) {
encoder.AppendElementSeparator()
switch {
case math.IsNaN(value):
encoder.buffer = append(encoder.buffer, `"NaN"`...)
case math.IsInf(value, 1):
encoder.buffer = append(encoder.buffer, `"+Inf"`...)
case math.IsInf(value, -1):
encoder.buffer = append(encoder.buffer, `"-Inf"`...)
default:
encoder.buffer = strconv.AppendFloat(encoder.buffer, value, 'f', -1, size)
}
}
// safeAddString JSON-escapes a string and appends it to the encoder buffer.
// Unlike the standard library's encoder, it doesn't attempt to protect the
// user from browser vulnerabilities or JSONP-related problems.
func (encoder *Encoder) safeAddString(value string) {
i := 0
for i < len(value) {
if encoder.tryAddRuneSelf(value[i]) {
i++
continue
}
char, size := utf8.DecodeRuneInString(value[i:])
if encoder.tryAddRuneError(char, size) {
i++
continue
}
encoder.buffer = append(encoder.buffer, value[i:i+size]...)
i += size
}
}
// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for a slice of byte.
func (encoder *Encoder) safeAddByteString(value []byte) {
i := 0
for i < len(value) {
if encoder.tryAddRuneSelf(value[i]) {
i++
continue
}
char, size := utf8.DecodeRune(value[i:])
if encoder.tryAddRuneError(char, size) {
i++
continue
}
encoder.buffer = append(encoder.buffer, value[i:i+size]...)
i += size
}
}
// tryAddRuneSelf appends given value if it is valid UTF-8 character represented in a single byte.
func (encoder *Encoder) tryAddRuneSelf(char byte) bool {
if char >= utf8.RuneSelf {
return false
}
if 0x20 <= char && char != '\\' && char != '"' {
encoder.buffer = append(encoder.buffer, char)
return true
}
switch char {
case '\\', '"':
encoder.buffer = append(encoder.buffer, '\\')
encoder.buffer = append(encoder.buffer, char)
case '\n':
encoder.buffer = append(encoder.buffer, '\\')
encoder.buffer = append(encoder.buffer, 'n')
case '\r':
encoder.buffer = append(encoder.buffer, '\\')
encoder.buffer = append(encoder.buffer, 'r')
case '\t':
encoder.buffer = append(encoder.buffer, '\\')
encoder.buffer = append(encoder.buffer, 't')
default:
// Encode bytes < 0x20, except for the escape sequences above.
encoder.buffer = append(encoder.buffer, '\\', 'u', '0', '0')
encoder.buffer = append(encoder.buffer, hex[char>>4], hex[char&0xF])
}
return true
}
func (encoder *Encoder) tryAddRuneError(char rune, size int) bool {
if char == utf8.RuneError && size == 1 {
encoder.buffer = append(encoder.buffer, '\\', 'u', 'f', 'f', 'f', 'd')
return true
}
return false
} | encoder/json/types.go | 0.731251 | 0.408277 | types.go | starcoder |
package semver
import (
"fmt"
"testing"
)
type ValuesGenerator struct {
valueDefs []valueConstraint
}
type valueConstraint struct {
min int
max int
}
func NewValuesGenerator() *ValuesGenerator {
return &ValuesGenerator{}
}
func (g *ValuesGenerator) AddValue(min, max int) *ValuesGenerator {
g.valueDefs = append(g.valueDefs, valueConstraint{min, max})
return g
}
func (g ValuesGenerator) MakeAllPermutations() [][]int {
var ret [][]int
numValues := len(g.valueDefs)
values := make([]int, numValues)
for i := 0; i < numValues; i++ {
values[i] = g.valueDefs[i].min
}
for {
copied := make([]int, numValues)
copy(copied, values)
ret = append(ret, copied)
// increment the values starting at the left, rolling over and incrementing the next to the right
for pos := 0; pos < numValues; pos++ {
if values[pos] < g.valueDefs[pos].max {
values[pos]++
break
}
if pos == numValues-1 {
return ret // we've covered all permutations
}
values[pos] = g.valueDefs[pos].min
}
}
}
func (g ValuesGenerator) TestAll(t *testing.T, action func(*testing.T, []int)) {
var permutations = g.MakeAllPermutations()
for _, perm := range permutations {
values := perm
desc := "values"
for _, v := range values {
desc = desc + fmt.Sprintf(" %d", v)
}
t.Run(desc, func(t *testing.T) {
action(t, values)
})
}
}
func (g ValuesGenerator) TestAll1(t *testing.T, action func(*testing.T, int)) {
g.TestAll(t, func(t *testing.T, values []int) {
action(t, values[0])
})
}
func (g ValuesGenerator) TestAll2(t *testing.T, action func(*testing.T, int, int)) {
g.TestAll(t, func(t *testing.T, values []int) {
action(t, values[0], values[1])
})
}
func (g ValuesGenerator) TestAll3(t *testing.T, action func(*testing.T, int, int, int)) {
g.TestAll(t, func(t *testing.T, values []int) {
action(t, values[0], values[1], values[2])
})
}
func (g ValuesGenerator) TestAll4(t *testing.T, action func(*testing.T, int, int, int, int)) {
g.TestAll(t, func(t *testing.T, values []int) {
action(t, values[0], values[1], values[2], values[3])
})
} | values_generator.go | 0.565779 | 0.413181 | values_generator.go | starcoder |
package rand
import (
"errors"
"io"
"math/big"
)
const uint64Max = (1 << 64) - 1
var smallPrimes = []uint8{
2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53,
}
var smallPrimesProduct = new(big.Int).SetUint64(16294579238595022365)
var oneInt = new(big.Int).SetUint64(1)
// Prime generates a random prime p with `bits` being the maximum number of
// bits such that r | p - 1
func Prime(rand io.Reader, bits int, r uint64) (p *big.Int, err error) {
if bits < 2 {
err = errors.New("crypto/rand: prime size must be at least 2-bit")
return
}
b := uint(bits % 8)
if b == 0 {
b = 8
}
bytes := make([]byte, (bits+7)/8)
p = new(big.Int)
bigMod := new(big.Int)
rBig := new(big.Int).SetUint64(r)
for {
_, err = io.ReadFull(rand, bytes)
if err != nil {
return nil, err
}
// Clear bits in the first byte to make sure the candidate has a size <= bits.
bytes[0] &= uint8(int(1<<b) - 1)
// Don't let the value be too small, i.e, set the most significant two bits.
// Setting the top two bits, rather than just the top bit,
// means that when two of these values are multiplied together,
// the result isn't ever one bit short.
if b >= 2 {
bytes[0] |= 3 << (b - 2)
} else {
// Here b==1, because b cannot be zero.
bytes[0] |= 1
if len(bytes) > 1 {
bytes[1] |= 0x80
}
}
// Make the value odd since an even number this large certainly isn't prime.
bytes[len(bytes)-1] |= 1
p.SetBytes(bytes)
// If p > r then make p be equivalent to 1 mod r
// By taking p = p - ((p - 1) mod r)
if p.Cmp(rBig) > 0 {
// p - 1
bigMod.Sub(p, oneInt)
// (p - 1) mod r
bigMod.Mod(bigMod, rBig)
// p - ((p - 1) mod r)
p.Sub(p, bigMod)
} else {
continue
}
// Calculate the value mod the product of smallPrimes. If it's
// a multiple of any of these primes we add two until it isn't.
// The probability of overflowing is minimal and can be ignored
// because we still perform Miller-Rabin tests on the result.
bigMod.Mod(p, smallPrimesProduct)
mod := bigMod.Uint64()
NextDelta:
for delta, deltaMax := uint64(0), (uint64Max-mod)/r; delta < 1<<20 && delta <= deltaMax; delta += 2 {
m := mod + delta*r
for _, prime := range smallPrimes {
if m%uint64(prime) == 0 && (bits > 6 || m != uint64(prime)) {
continue NextDelta
}
}
if delta > 0 {
bigMod.SetUint64(delta * r)
p.Add(p, bigMod)
}
break
}
// There is a tiny possibility that, by adding delta, we caused
// the number to be one bit too long. Thus we check BitLen
// here.
if p.ProbablyPrime(20) && p.BitLen() == bits {
return
}
}
} | rand/prime.go | 0.567817 | 0.409044 | prime.go | starcoder |
package trie
// Trie defines a search tree based on runes.
type Trie struct {
root *Node
}
// Node defines a node in a trie.
type Node struct {
value *Value
nodes map[rune]*Node
end bool
}
// New creates a new trie.
func New() *Trie {
root := &Node{nodes: make(map[rune]*Node), end: false}
return &Trie{root}
}
// Put adds values to the trie.
func (t *Trie) Put(name string, vals ...string) {
for _, val := range vals {
v := NewValue(name, val)
n := t.root
for _, r := range v.val {
m, ok := n.nodes[r]
if !ok {
m = &Node{nodes: make(map[rune]*Node), end: false}
n.nodes[r] = m
}
n = m
}
n.value = v
n.end = true
}
}
// Get gets a value for the string s.
func (t *Trie) Get(s string) (*Value, bool) {
n := t.root
i := 0
runes := []rune(s)
for ; i < len(runes); i++ {
m, ok := n.nodes['*']
if ok {
for ; i < len(runes) && rune(runes[i]) != ' '; i++ {
}
if i == len(runes) {
break
}
i--
n = m
} else {
r := runes[i]
if m, ok := n.nodes[r]; ok {
n = m
} else {
return nil, false
}
}
}
if !n.end {
if m, ok := n.nodes['*']; ok {
n = m
}
}
return n.value, n.end
}
// Contains returns true if the string s is in the trie.
func (t *Trie) Contains(s string) bool {
_, ok := t.Get(s)
return ok
}
// Match returns true if the string matches a rune path in the trie regardless
// whether the trie contains a corresponding value.
func (t *Trie) Match(s string) bool {
n := t.root
i := 0
runes := []rune(s)
for ; i < len(runes); i++ {
m, ok := n.nodes['*']
if ok {
for ; i < len(runes) && runes[i] != ' '; i++ {
}
if i == len(runes) {
return true
}
i--
n = m
} else {
r := runes[i]
if m, ok := n.nodes[r]; ok {
n = m
} else {
return false
}
}
}
if len(n.nodes) == 0 || n.end {
return true
}
if _, ok := n.nodes[' ']; ok {
return true
}
_, ok := n.nodes['*']
return ok
}
// AllValues returns all the values of the node and its child nodes.
func (n *Node) AllValues() Values {
results := Values{}
if n.end == true {
results = append(results, n.value)
}
for _, m := range n.nodes {
prefixes := m.AllValues()
results = append(results, prefixes...)
}
return results
}
// Autocomplete returns all the matching values of the prefix.
func (t *Trie) Autocomplete(prefix string) Values {
n := t.root
for _, r := range prefix {
if m, ok := n.nodes[r]; ok {
n = m
} else {
return Values{}
}
}
values := n.AllValues()
values.Sort()
return values
} | src/common/trie/trie.go | 0.712932 | 0.534673 | trie.go | starcoder |
package seal
import (
"github.com/rumis/seal/expr"
)
// Eq generates a Standard equal expression
func Eq(col string, val interface{}) expr.Expr {
return expr.Op(col, "=", val)
}
// StaticEq generates a static equal expression which without params
func StaticEq(col1 string, col2 string) expr.Expr {
return StaticOp(col1, "=", col2)
}
// Op generates a Standard expression
// eg. >,>=,<,<=,....
func Op(col string, op string, val interface{}) expr.Expr {
return expr.Op(col, op, val)
}
// StaticOp generates a Standard expression
// Which always used for multi table select
func StaticOp(col1 string, op string, col2 string) expr.Expr {
return expr.New(col1 + op + col2)
}
// Not generates a NOT expression which prefixes "NOT" to the specified expression.
func Not(e expr.Expr) expr.Expr {
return expr.Not(e)
}
// And generates an AND expression which concatenates the given expressions with "AND".
func And(exps ...expr.Expr) expr.Expr {
if len(exps) == 1 {
return exps[0]
}
return expr.Group("AND", exps...)
}
// Or generates an OR expression which concatenates the given expressions with "OR".
func Or(exps ...expr.Expr) expr.Expr {
if len(exps) == 1 {
return exps[0]
}
return expr.Group("OR", exps...)
}
// In generates an IN expression for the specified column and the list of allowed values.
// If values is empty, a SQL "0=1" will be generated which represents a false expression.
func In(col string, values ...interface{}) expr.Expr {
return expr.In(col, values...)
}
// NotIn generates an NOT IN expression for the specified column and the list of disallowed values.
// If values is empty, an empty string will be returned indicating a true expression.
func NotIn(col string, values ...interface{}) expr.Expr {
return expr.NotIn(col, values...)
}
// Like generates a LIKE expression for the specified column and the possible strings that the column should be like.
// If multiple values are present, the column should be like *all* of them. For example, Like("name", "key", "word")
// will generate a SQL expression: "name" LIKE "%key%" AND "name" LIKE "%word%".
func Like(col string, value string) expr.LikeExp {
return expr.Like(col, value)
}
// NotLike generates a NOT LIKE expression.
// For example, NotLike("name", "key", "word") will generate a SQL expression:
// "name" NOT LIKE "%key%" AND "name" NOT LIKE "%word%". Please see Like() for more details.
func NotLike(col string, value string) expr.LikeExp {
return expr.NotLike(col, value)
}
// Exists generates an EXISTS expression by prefixing "EXISTS" to the given expression.
func Exists(exp expr.Expr) expr.Expr {
return expr.Exists(exp)
}
// NotExists generates an EXISTS expression by prefixing "NOT EXISTS" to the given expression.
func NotExists(exp expr.Expr) expr.Expr {
return expr.NotExists(exp)
}
// Between generates a BETWEEN expression.
// For example, Between("age", 10, 30) generates: "age" BETWEEN 10 AND 30
func Between(col string, from, to interface{}) expr.Expr {
return expr.Between(col, from, to)
}
// NotBetween generates a NOT BETWEEN expression.
// For example, NotBetween("age", 10, 30) generates: "age" NOT BETWEEN 10 AND 30
func NotBetween(col string, from, to interface{}) expr.Expr {
return expr.NotBetween(col, from, to)
}
// Count generates a COUNT() expression
// For example:
// Count("id"): Count(id)
// Count("id","id_count"): Count(id) AS id_count
// Count("id","id_count","user"): Count(user.id) AS id_count
func Count(col string, alias_table ...string) expr.Expr {
return expr.Aggregate("COUNT", col, alias_table...)
}
// SUM generates a SUM() expression
func Sum(col string, alias_table ...string) expr.Expr {
return expr.Aggregate("SUM", col, alias_table...)
}
// Count generates a MAX() expression
func Max(col string, alias_table ...string) expr.Expr {
return expr.Aggregate("MAX", col, alias_table...)
}
// Min generates a MIN() expression
func Min(col string, alias_table ...string) expr.Expr {
return expr.Aggregate("MIN", col, alias_table...)
}
// Avg generates an AVG() expression
func Avg(col string, alias_table ...string) expr.Expr {
return expr.Aggregate("AVG", col, alias_table...)
} | stmt.go | 0.770119 | 0.510435 | stmt.go | starcoder |
package fb
const (
///< No Halt Type
HaltTypenone = 0
///< Unspecified news-related halt
HaltTypenews = 1
///< Denotes a regulatory trading halt when relevant news influencing the security is being disseminated. Trading is suspended until the primary market determines that an adequate publication or disclosure of information has occurred.
HaltTypenews_disseminated = 2
///< Denotes a non-regulatory halt condition where there is a significant imbalance of buy or sell orders.
HaltTypeorder_imbalance = 3
///< Denotes a non-regulatory Trading Halt. The ability to trade a security by a Participant is temporarily inhibited due to a systems, equipment or communications facility problem or for other technical reasons.
HaltTypeequipment_change = 4
///< Unspecified halt requiring additional information before resuming trading.
HaltTypepending_additional_info = 5
///< A financial status designation used to denote the ability to create new shares of this Exchange Traded Product (ETP) has been temporarily suspended by the ETP Issuer. ETPs that are closed for Creations typically are allowed to continue trading on the listing market once the ETP Issuer publishes the press release.
HaltTypesuspended = 6
///< Denotes a regulatory Trading halt mandated by the SEC for this security.
HaltTypesec = 7
///< Unspecified halt.
HaltTypenot_specified = 8
///< Denotes a five-minute regulatory trading halt (pause) for an individual security that does not exit a Limit State within 15 seconds
HaltTypeluld_pause = 9
///< The level 1 market-wide circuit breaker (MWCB) has been triggered due to a 7%+ decline in S&P500 from last-session close. If before 3:25PM, all securities are halted for 15 min. If at or after 3:25PM trading continues unless there is a Level 3 MWCB.
HaltTypemarketwide_halt_level1 = 10
///< the level 2 market-wide circuit breaker (MWCB) has been triggered due to a 13% decline in S&P500 from last-session close. If before 3:25PM all securities are halted for 15 min. If after 3:25PM, trading continues unless there is a Level 3 MWCB.
HaltTypemarketwide_halt_level2 = 11
///< The level 3 market-wide circuit breaker (MWCB) has been triggered due to a 20% decline in S&P500 from last-session close. All equities are halted for the remainder of the day.
HaltTypemarketwide_halt_level3 = 12
///< Indicates the deactivation of a market-wide circuit breaker. This should only occur for level 1 and level 2 MWCB events.
HaltTypemarketwide_halt_resume = 13
///< Denotes a five-minute regulatory trading halt (pause) for an individual security that does not exit a Limit State within 15 seconds. The limit-state is calculated depending on the exchange. This is 5% for >$3.00 S&P 500, Russel 1000 securities, and certain ETPs, 10% for all other securities > $3.00. See: http://cdn.batstrading.com/resources/membership/BATS_US_Equities_Limit_Up_Limit_Down_FAQ.pdf
HaltTypeluld_straddle = 14
///< Halt due to unusual market activity. (Note: Find CTA Multicast equivalent)
HaltTypeextraordinary_market_activity = 15
///< Indicates an unspecified halt for an exchange traded product
HaltTypeetf = 16
///< Indicates a halt issued by an exchange for failure to meet listing or other unspecified regulatory requirements
HaltTypenon_compliance = 17
///< A regulatory halt issued for equities not meeting reporting requirements
HaltTypefilings_not_current = 18
///< Halt reason issued for exchange operations being impacted. For instance, a storm.
HaltTypeoperations = 19
///< Pseudo-halt generated for IPO not occurring at market open
HaltTypeipo_pending = 20
///< Halted due to an intra-day event like a split. Rare.
HaltTypecorporate_action = 21
///< Quotations have temporarily become unavailable for an unspecified reason.
HaltTypequote_unavailable = 22
///< Generic halt condition for a single stock
HaltTypesingle_stock_pause = 23
///< Generic resume condition for a single stock
HaltTypesingle_stock_pause_resume = 24
)
var EnumNamesHaltType = map[int]string{
HaltTypenone: "none",
HaltTypenews: "news",
HaltTypenews_disseminated: "news_disseminated",
HaltTypeorder_imbalance: "order_imbalance",
HaltTypeequipment_change: "equipment_change",
HaltTypepending_additional_info: "pending_additional_info",
HaltTypesuspended: "suspended",
HaltTypesec: "sec",
HaltTypenot_specified: "not_specified",
HaltTypeluld_pause: "luld_pause",
HaltTypemarketwide_halt_level1: "marketwide_halt_level1",
HaltTypemarketwide_halt_level2: "marketwide_halt_level2",
HaltTypemarketwide_halt_level3: "marketwide_halt_level3",
HaltTypemarketwide_halt_resume: "marketwide_halt_resume",
HaltTypeluld_straddle: "luld_straddle",
HaltTypeextraordinary_market_activity: "extraordinary_market_activity",
HaltTypeetf: "etf",
HaltTypenon_compliance: "non_compliance",
HaltTypefilings_not_current: "filings_not_current",
HaltTypeoperations: "operations",
HaltTypeipo_pending: "ipo_pending",
HaltTypecorporate_action: "corporate_action",
HaltTypequote_unavailable: "quote_unavailable",
HaltTypesingle_stock_pause: "single_stock_pause",
HaltTypesingle_stock_pause_resume: "single_stock_pause_resume",
} | go/schemas/fb/HaltType.go | 0.670608 | 0.463869 | HaltType.go | starcoder |
package timezones
// Source https://github.com/dmfilipenko/timezones.json
var data = `
[
{
"value": "Dateline Standard Time",
"abbr": "DST",
"offset": -12,
"isdst": false,
"text": "(UTC-12:00) International Date Line West",
"utc": [
"Etc/GMT+12"
]
},
{
"value": "UTC-11",
"abbr": "U",
"offset": -11,
"isdst": false,
"text": "(UTC-11:00) Coordinated Universal Time-11",
"utc": [
"Etc/GMT+11",
"Pacific/Midway",
"Pacific/Niue",
"Pacific/Pago_Pago"
]
},
{
"value": "Hawaiian Standard Time",
"abbr": "HST",
"offset": -10,
"isdst": false,
"text": "(UTC-10:00) Hawaii",
"utc": [
"Etc/GMT+10",
"Pacific/Honolulu",
"Pacific/Johnston",
"Pacific/Rarotonga",
"Pacific/Tahiti"
]
},
{
"value": "Alaskan Standard Time",
"abbr": "AKDT",
"offset": -8,
"isdst": true,
"text": "(UTC-09:00) Alaska",
"utc": [
"America/Anchorage",
"America/Juneau",
"America/Nome",
"America/Sitka",
"America/Yakutat"
]
},
{
"value": "Pacific Standard Time (Mexico)",
"abbr": "PDT",
"offset": -7,
"isdst": true,
"text": "(UTC-08:00) Baja California",
"utc": [
"America/Santa_Isabel"
]
},
{
"value": "Pacific Daylight Time",
"abbr": "PDT",
"offset": -7,
"isdst": true,
"text": "(UTC-07:00) Pacific Time (US & Canada)",
"utc": [
"America/Dawson",
"America/Los_Angeles",
"America/Tijuana",
"America/Vancouver",
"America/Whitehorse"
]
},
{
"value": "Pacific Standard Time",
"abbr": "PST",
"offset": -8,
"isdst": false,
"text": "(UTC-08:00) Pacific Time (US & Canada)",
"utc": [
"America/Dawson",
"America/Los_Angeles",
"America/Tijuana",
"America/Vancouver",
"America/Whitehorse",
"PST8PDT"
]
},
{
"value": "US Mountain Standard Time",
"abbr": "UMST",
"offset": -7,
"isdst": false,
"text": "(UTC-07:00) Arizona",
"utc": [
"America/Creston",
"America/Dawson_Creek",
"America/Hermosillo",
"America/Phoenix",
"Etc/GMT+7"
]
},
{
"value": "Mountain Standard Time (Mexico)",
"abbr": "MDT",
"offset": -6,
"isdst": true,
"text": "(UTC-07:00) Chihuahua, La Paz, Mazatlan",
"utc": [
"America/Chihuahua",
"America/Mazatlan"
]
},
{
"value": "Mountain Standard Time",
"abbr": "MDT",
"offset": -6,
"isdst": true,
"text": "(UTC-07:00) Mountain Time (US & Canada)",
"utc": [
"America/Boise",
"America/Cambridge_Bay",
"America/Denver",
"America/Edmonton",
"America/Inuvik",
"America/Ojinaga",
"America/Yellowknife",
"MST7MDT"
]
},
{
"value": "Central America Standard Time",
"abbr": "CAST",
"offset": -6,
"isdst": false,
"text": "(UTC-06:00) Central America",
"utc": [
"America/Belize",
"America/Costa_Rica",
"America/El_Salvador",
"America/Guatemala",
"America/Managua",
"America/Tegucigalpa",
"Etc/GMT+6",
"Pacific/Galapagos"
]
},
{
"value": "Central Standard Time",
"abbr": "CDT",
"offset": -5,
"isdst": true,
"text": "(UTC-06:00) Central Time (US & Canada)",
"utc": [
"America/Chicago",
"America/Indiana/Knox",
"America/Indiana/Tell_City",
"America/Matamoros",
"America/Menominee",
"America/North_Dakota/Beulah",
"America/North_Dakota/Center",
"America/North_Dakota/New_Salem",
"America/Rainy_River",
"America/Rankin_Inlet",
"America/Resolute",
"America/Winnipeg",
"CST6CDT"
]
},
{
"value": "Central Standard Time (Mexico)",
"abbr": "CDT",
"offset": -5,
"isdst": true,
"text": "(UTC-06:00) Guadalajara, Mexico City, Monterrey",
"utc": [
"America/Bahia_Banderas",
"America/Cancun",
"America/Merida",
"America/Mexico_City",
"America/Monterrey"
]
},
{
"value": "Canada Central Standard Time",
"abbr": "CCST",
"offset": -6,
"isdst": false,
"text": "(UTC-06:00) Saskatchewan",
"utc": [
"America/Regina",
"America/Swift_Current"
]
},
{
"value": "SA Pacific Standard Time",
"abbr": "SPST",
"offset": -5,
"isdst": false,
"text": "(UTC-05:00) Bogota, Lima, Quito",
"utc": [
"America/Bogota",
"America/Cayman",
"America/Coral_Harbour",
"America/Eirunepe",
"America/Guayaquil",
"America/Jamaica",
"America/Lima",
"America/Panama",
"America/Rio_Branco",
"Etc/GMT+5"
]
},
{
"value": "Eastern Standard Time",
"abbr": "EDT",
"offset": -4,
"isdst": true,
"text": "(UTC-05:00) Eastern Time (US & Canada)",
"utc": [
"America/Detroit",
"America/Havana",
"America/Indiana/Petersburg",
"America/Indiana/Vincennes",
"America/Indiana/Winamac",
"America/Iqaluit",
"America/Kentucky/Monticello",
"America/Louisville",
"America/Montreal",
"America/Nassau",
"America/New_York",
"America/Nipigon",
"America/Pangnirtung",
"America/Port-au-Prince",
"America/Thunder_Bay",
"America/Toronto",
"EST5EDT"
]
},
{
"value": "US Eastern Standard Time",
"abbr": "UEDT",
"offset": -4,
"isdst": true,
"text": "(UTC-05:00) Indiana (East)",
"utc": [
"America/Indiana/Marengo",
"America/Indiana/Vevay",
"America/Indianapolis"
]
},
{
"value": "Venezuela Standard Time",
"abbr": "VST",
"offset": -4.5,
"isdst": false,
"text": "(UTC-04:30) Caracas",
"utc": [
"America/Caracas"
]
},
{
"value": "Paraguay Standard Time",
"abbr": "PYT",
"offset": -4,
"isdst": false,
"text": "(UTC-04:00) Asuncion",
"utc": [
"America/Asuncion"
]
},
{
"value": "Atlantic Standard Time",
"abbr": "ADT",
"offset": -3,
"isdst": true,
"text": "(UTC-04:00) Atlantic Time (Canada)",
"utc": [
"America/Glace_Bay",
"America/Goose_Bay",
"America/Halifax",
"America/Moncton",
"America/Thule",
"Atlantic/Bermuda"
]
},
{
"value": "Central Brazilian Standard Time",
"abbr": "CBST",
"offset": -4,
"isdst": false,
"text": "(UTC-04:00) Cuiaba",
"utc": [
"America/Campo_Grande",
"America/Cuiaba"
]
},
{
"value": "SA Western Standard Time",
"abbr": "SWST",
"offset": -4,
"isdst": false,
"text": "(UTC-04:00) Georgetown, La Paz, Manaus, San Juan",
"utc": [
"America/Anguilla",
"America/Antigua",
"America/Aruba",
"America/Barbados",
"America/Blanc-Sablon",
"America/Boa_Vista",
"America/Curacao",
"America/Dominica",
"America/Grand_Turk",
"America/Grenada",
"America/Guadeloupe",
"America/Guyana",
"America/Kralendijk",
"America/La_Paz",
"America/Lower_Princes",
"America/Manaus",
"America/Marigot",
"America/Martinique",
"America/Montserrat",
"America/Port_of_Spain",
"America/Porto_Velho",
"America/Puerto_Rico",
"America/Santo_Domingo",
"America/St_Barthelemy",
"America/St_Kitts",
"America/St_Lucia",
"America/St_Thomas",
"America/St_Vincent",
"America/Tortola",
"Etc/GMT+4"
]
},
{
"value": "Pacific SA Standard Time",
"abbr": "PSST",
"offset": -4,
"isdst": false,
"text": "(UTC-04:00) Santiago",
"utc": [
"America/Santiago",
"Antarctica/Palmer"
]
},
{
"value": "Newfoundland Standard Time",
"abbr": "NDT",
"offset": -2.5,
"isdst": true,
"text": "(UTC-03:30) Newfoundland",
"utc": [
"America/St_Johns"
]
},
{
"value": "E. South America Standard Time",
"abbr": "ESAST",
"offset": -3,
"isdst": false,
"text": "(UTC-03:00) Brasilia",
"utc": [
"America/Sao_Paulo"
]
},
{
"value": "Argentina Standard Time",
"abbr": "AST",
"offset": -3,
"isdst": false,
"text": "(UTC-03:00) Buenos Aires",
"utc": [
"America/Argentina/La_Rioja",
"America/Argentina/Rio_Gallegos",
"America/Argentina/Salta",
"America/Argentina/San_Juan",
"America/Argentina/San_Luis",
"America/Argentina/Tucuman",
"America/Argentina/Ushuaia",
"America/Buenos_Aires",
"America/Catamarca",
"America/Cordoba",
"America/Jujuy",
"America/Mendoza"
]
},
{
"value": "SA Eastern Standard Time",
"abbr": "SEST",
"offset": -3,
"isdst": false,
"text": "(UTC-03:00) Cayenne, Fortaleza",
"utc": [
"America/Araguaina",
"America/Belem",
"America/Cayenne",
"America/Fortaleza",
"America/Maceio",
"America/Paramaribo",
"America/Recife",
"America/Santarem",
"Antarctica/Rothera",
"Atlantic/Stanley",
"Etc/GMT+3"
]
},
{
"value": "Greenland Standard Time",
"abbr": "GDT",
"offset": -3,
"isdst": true,
"text": "(UTC-03:00) Greenland",
"utc": [
"America/Godthab"
]
},
{
"value": "Montevideo Standard Time",
"abbr": "MST",
"offset": -3,
"isdst": false,
"text": "(UTC-03:00) Montevideo",
"utc": [
"America/Montevideo"
]
},
{
"value": "Bahia Standard Time",
"abbr": "BST",
"offset": -3,
"isdst": false,
"text": "(UTC-03:00) Salvador",
"utc": [
"America/Bahia"
]
},
{
"value": "UTC-02",
"abbr": "U",
"offset": -2,
"isdst": false,
"text": "(UTC-02:00) Coordinated Universal Time-02",
"utc": [
"America/Noronha",
"Atlantic/South_Georgia",
"Etc/GMT+2"
]
},
{
"value": "Mid-Atlantic Standard Time",
"abbr": "MDT",
"offset": -1,
"isdst": true,
"text": "(UTC-02:00) Mid-Atlantic - Old",
"utc": []
},
{
"value": "Azores Standard Time",
"abbr": "ADT",
"offset": 0,
"isdst": true,
"text": "(UTC-01:00) Azores",
"utc": [
"America/Scoresbysund",
"Atlantic/Azores"
]
},
{
"value": "Cape Verde Standard Time",
"abbr": "CVST",
"offset": -1,
"isdst": false,
"text": "(UTC-01:00) Cape Verde Is.",
"utc": [
"Atlantic/Cape_Verde",
"Etc/GMT+1"
]
},
{
"value": "Morocco Standard Time",
"abbr": "MDT",
"offset": 1,
"isdst": true,
"text": "(UTC) Casablanca",
"utc": [
"Africa/Casablanca",
"Africa/El_Aaiun"
]
},
{
"value": "UTC",
"abbr": "UTC",
"offset": 0,
"isdst": false,
"text": "(UTC) Coordinated Universal Time",
"utc": [
"America/Danmarkshavn",
"Etc/GMT"
]
},
{
"value": "GMT Standard Time",
"abbr": "GMT",
"offset": 0,
"isdst": false,
"text": "(UTC) Edinburgh, London",
"utc": [
"Europe/Isle_of_Man",
"Europe/Guernsey",
"Europe/Jersey",
"Europe/London"
]
},
{
"value": "British Summer Time",
"abbr": "BST",
"offset": 1,
"isdst": true,
"text": "(UTC+01:00) Edinburgh, London",
"utc": [
"Europe/Isle_of_Man",
"Europe/Guernsey",
"Europe/Jersey",
"Europe/London"
]
},
{
"value": "GMT Standard Time",
"abbr": "GDT",
"offset": 1,
"isdst": true,
"text": "(UTC) Dublin, Lisbon",
"utc": [
"Atlantic/Canary",
"Atlantic/Faeroe",
"Atlantic/Madeira",
"Europe/Dublin",
"Europe/Lisbon"
]
},
{
"value": "Greenwich Standard Time",
"abbr": "GST",
"offset": 0,
"isdst": false,
"text": "(UTC) Monrovia, Reykjavik",
"utc": [
"Africa/Abidjan",
"Africa/Accra",
"Africa/Bamako",
"Africa/Banjul",
"Africa/Bissau",
"Africa/Conakry",
"Africa/Dakar",
"Africa/Freetown",
"Africa/Lome",
"Africa/Monrovia",
"Africa/Nouakchott",
"Africa/Ouagadougou",
"Africa/Sao_Tome",
"Atlantic/Reykjavik",
"Atlantic/St_Helena"
]
},
{
"value": "W. Europe Standard Time",
"abbr": "WEDT",
"offset": 2,
"isdst": true,
"text": "(UTC+01:00) Amsterdam, Berlin, Bern, Rome, Stockholm, Vienna",
"utc": [
"Arctic/Longyearbyen",
"Europe/Amsterdam",
"Europe/Andorra",
"Europe/Berlin",
"Europe/Busingen",
"Europe/Gibraltar",
"Europe/Luxembourg",
"Europe/Malta",
"Europe/Monaco",
"Europe/Oslo",
"Europe/Rome",
"Europe/San_Marino",
"Europe/Stockholm",
"Europe/Vaduz",
"Europe/Vatican",
"Europe/Vienna",
"Europe/Zurich"
]
},
{
"value": "Central Europe Standard Time",
"abbr": "CEDT",
"offset": 2,
"isdst": true,
"text": "(UTC+01:00) Belgrade, Bratislava, Budapest, Ljubljana, Prague",
"utc": [
"Europe/Belgrade",
"Europe/Bratislava",
"Europe/Budapest",
"Europe/Ljubljana",
"Europe/Podgorica",
"Europe/Prague",
"Europe/Tirane"
]
},
{
"value": "Romance Standard Time",
"abbr": "RDT",
"offset": 2,
"isdst": true,
"text": "(UTC+01:00) Brussels, Copenhagen, Madrid, Paris",
"utc": [
"Africa/Ceuta",
"Europe/Brussels",
"Europe/Copenhagen",
"Europe/Madrid",
"Europe/Paris"
]
},
{
"value": "Central European Standard Time",
"abbr": "CEDT",
"offset": 2,
"isdst": true,
"text": "(UTC+01:00) Sarajevo, Skopje, Warsaw, Zagreb",
"utc": [
"Europe/Sarajevo",
"Europe/Skopje",
"Europe/Warsaw",
"Europe/Zagreb"
]
},
{
"value": "W. Central Africa Standard Time",
"abbr": "WCAST",
"offset": 1,
"isdst": false,
"text": "(UTC+01:00) West Central Africa",
"utc": [
"Africa/Algiers",
"Africa/Bangui",
"Africa/Brazzaville",
"Africa/Douala",
"Africa/Kinshasa",
"Africa/Lagos",
"Africa/Libreville",
"Africa/Luanda",
"Africa/Malabo",
"Africa/Ndjamena",
"Africa/Niamey",
"Africa/Porto-Novo",
"Africa/Tunis",
"Etc/GMT-1"
]
},
{
"value": "Namibia Standard Time",
"abbr": "NST",
"offset": 1,
"isdst": false,
"text": "(UTC+01:00) Windhoek",
"utc": [
"Africa/Windhoek"
]
},
{
"value": "GTB Standard Time",
"abbr": "GDT",
"offset": 3,
"isdst": true,
"text": "(UTC+02:00) Athens, Bucharest",
"utc": [
"Asia/Nicosia",
"Europe/Athens",
"Europe/Bucharest",
"Europe/Chisinau"
]
},
{
"value": "Middle East Standard Time",
"abbr": "MEDT",
"offset": 3,
"isdst": true,
"text": "(UTC+02:00) Beirut",
"utc": [
"Asia/Beirut"
]
},
{
"value": "Egypt Standard Time",
"abbr": "EST",
"offset": 2,
"isdst": false,
"text": "(UTC+02:00) Cairo",
"utc": [
"Africa/Cairo"
]
},
{
"value": "Syria Standard Time",
"abbr": "SDT",
"offset": 3,
"isdst": true,
"text": "(UTC+02:00) Damascus",
"utc": [
"Asia/Damascus"
]
},
{
"value": "E. Europe Standard Time",
"abbr": "EEDT",
"offset": 3,
"isdst": true,
"text": "(UTC+02:00) E. Europe",
"utc": [
"Asia/Nicosia",
"Europe/Athens",
"Europe/Bucharest",
"Europe/Chisinau",
"Europe/Helsinki",
"Europe/Kiev",
"Europe/Mariehamn",
"Europe/Nicosia",
"Europe/Riga",
"Europe/Sofia",
"Europe/Tallinn",
"Europe/Uzhgorod",
"Europe/Vilnius",
"Europe/Zaporozhye"
]
},
{
"value": "South Africa Standard Time",
"abbr": "SAST",
"offset": 2,
"isdst": false,
"text": "(UTC+02:00) Harare, Pretoria",
"utc": [
"Africa/Blantyre",
"Africa/Bujumbura",
"Africa/Gaborone",
"Africa/Harare",
"Africa/Johannesburg",
"Africa/Kigali",
"Africa/Lubumbashi",
"Africa/Lusaka",
"Africa/Maputo",
"Africa/Maseru",
"Africa/Mbabane",
"Etc/GMT-2"
]
},
{
"value": "FLE Standard Time",
"abbr": "FDT",
"offset": 3,
"isdst": true,
"text": "(UTC+02:00) Helsinki, Kyiv, Riga, Sofia, Tallinn, Vilnius",
"utc": [
"Europe/Helsinki",
"Europe/Kiev",
"Europe/Mariehamn",
"Europe/Riga",
"Europe/Sofia",
"Europe/Tallinn",
"Europe/Uzhgorod",
"Europe/Vilnius",
"Europe/Zaporozhye"
]
},
{
"value": "Turkey Standard Time",
"abbr": "TDT",
"offset": 3,
"isdst": false,
"text": "(UTC+03:00) Istanbul",
"utc": [
"Europe/Istanbul"
]
},
{
"value": "Israel Standard Time",
"abbr": "JDT",
"offset": 3,
"isdst": true,
"text": "(UTC+02:00) Jerusalem",
"utc": [
"Asia/Jerusalem"
]
},
{
"value": "Libya Standard Time",
"abbr": "LST",
"offset": 2,
"isdst": false,
"text": "(UTC+02:00) Tripoli",
"utc": [
"Africa/Tripoli"
]
},
{
"value": "Jordan Standard Time",
"abbr": "JST",
"offset": 3,
"isdst": false,
"text": "(UTC+03:00) Amman",
"utc": [
"Asia/Amman"
]
},
{
"value": "Arabic Standard Time",
"abbr": "AST",
"offset": 3,
"isdst": false,
"text": "(UTC+03:00) Baghdad",
"utc": [
"Asia/Baghdad"
]
},
{
"value": "Kaliningrad Standard Time",
"abbr": "KST",
"offset": 3,
"isdst": false,
"text": "(UTC+03:00) Kaliningrad, Minsk",
"utc": [
"Europe/Kaliningrad",
"Europe/Minsk"
]
},
{
"value": "Arab Standard Time",
"abbr": "AST",
"offset": 3,
"isdst": false,
"text": "(UTC+03:00) Kuwait, Riyadh",
"utc": [
"Asia/Aden",
"Asia/Bahrain",
"Asia/Kuwait",
"Asia/Qatar",
"Asia/Riyadh"
]
},
{
"value": "E. Africa Standard Time",
"abbr": "EAST",
"offset": 3,
"isdst": false,
"text": "(UTC+03:00) Nairobi",
"utc": [
"Africa/Addis_Ababa",
"Africa/Asmera",
"Africa/Dar_es_Salaam",
"Africa/Djibouti",
"Africa/Juba",
"Africa/Kampala",
"Africa/Khartoum",
"Africa/Mogadishu",
"Africa/Nairobi",
"Antarctica/Syowa",
"Etc/GMT-3",
"Indian/Antananarivo",
"Indian/Comoro",
"Indian/Mayotte"
]
},
{
"value": "Moscow Standard Time",
"abbr": "MSK",
"offset": 3,
"isdst": false,
"text": "(UTC+03:00) Moscow, St. Petersburg, Volgograd",
"utc": [
"Europe/Kirov",
"Europe/Moscow",
"Europe/Simferopol",
"Europe/Volgograd"
]
},
{
"value": "Samara Time",
"abbr": "SAMT",
"offset": 4,
"isdst": false,
"text": "(UTC+04:00) Samara, Ulyanovsk, Saratov",
"utc": [
"Europe/Astrakhan",
"Europe/Samara",
"Europe/Ulyanovsk"
]
},
{
"value": "Iran Standard Time",
"abbr": "IDT",
"offset": 4.5,
"isdst": true,
"text": "(UTC+03:30) Tehran",
"utc": [
"Asia/Tehran"
]
},
{
"value": "Arabian Standard Time",
"abbr": "AST",
"offset": 4,
"isdst": false,
"text": "(UTC+04:00) Abu Dhabi, Muscat",
"utc": [
"Asia/Dubai",
"Asia/Muscat",
"Etc/GMT-4"
]
},
{
"value": "Azerbaijan Standard Time",
"abbr": "ADT",
"offset": 5,
"isdst": true,
"text": "(UTC+04:00) Baku",
"utc": [
"Asia/Baku"
]
},
{
"value": "Mauritius Standard Time",
"abbr": "MST",
"offset": 4,
"isdst": false,
"text": "(UTC+04:00) Port Louis",
"utc": [
"Indian/Mahe",
"Indian/Mauritius",
"Indian/Reunion"
]
},
{
"value": "Georgian Standard Time",
"abbr": "GET",
"offset": 4,
"isdst": false,
"text": "(UTC+04:00) Tbilisi",
"utc": [
"Asia/Tbilisi"
]
},
{
"value": "Caucasus Standard Time",
"abbr": "CST",
"offset": 4,
"isdst": false,
"text": "(UTC+04:00) Yerevan",
"utc": [
"Asia/Yerevan"
]
},
{
"value": "Afghanistan Standard Time",
"abbr": "AST",
"offset": 4.5,
"isdst": false,
"text": "(UTC+04:30) Kabul",
"utc": [
"Asia/Kabul"
]
},
{
"value": "West Asia Standard Time",
"abbr": "WAST",
"offset": 5,
"isdst": false,
"text": "(UTC+05:00) Ashgabat, Tashkent",
"utc": [
"Antarctica/Mawson",
"Asia/Aqtau",
"Asia/Aqtobe",
"Asia/Ashgabat",
"Asia/Dushanbe",
"Asia/Oral",
"Asia/Samarkand",
"Asia/Tashkent",
"Etc/GMT-5",
"Indian/Kerguelen",
"Indian/Maldives"
]
},
{
"value": "Yekaterinburg Time",
"abbr": "YEKT",
"offset": 5,
"isdst": false,
"text": "(UTC+05:00) Yekaterinburg",
"utc": [
"Asia/Yekaterinburg"
]
},
{
"value": "Pakistan Standard Time",
"abbr": "PKT",
"offset": 5,
"isdst": false,
"text": "(UTC+05:00) Islamabad, Karachi",
"utc": [
"Asia/Karachi"
]
},
{
"value": "India Standard Time",
"abbr": "IST",
"offset": 5.5,
"isdst": false,
"text": "(UTC+05:30) Chennai, Kolkata, Mumbai, New Delhi",
"utc": [
"Asia/Kolkata"
]
},
{
"value": "Sri Lanka Standard Time",
"abbr": "SLST",
"offset": 5.5,
"isdst": false,
"text": "(UTC+05:30) Sri Jayawardenepura",
"utc": [
"Asia/Colombo"
]
},
{
"value": "Nepal Standard Time",
"abbr": "NST",
"offset": 5.75,
"isdst": false,
"text": "(UTC+05:45) Kathmandu",
"utc": [
"Asia/Kathmandu"
]
},
{
"value": "Central Asia Standard Time",
"abbr": "CAST",
"offset": 6,
"isdst": false,
"text": "(UTC+06:00) Astana",
"utc": [
"Antarctica/Vostok",
"Asia/Almaty",
"Asia/Bishkek",
"Asia/Qyzylorda",
"Asia/Urumqi",
"Etc/GMT-6",
"Indian/Chagos"
]
},
{
"value": "Bangladesh Standard Time",
"abbr": "BST",
"offset": 6,
"isdst": false,
"text": "(UTC+06:00) Dhaka",
"utc": [
"Asia/Dhaka",
"Asia/Thimphu"
]
},
{
"value": "Myanmar Standard Time",
"abbr": "MST",
"offset": 6.5,
"isdst": false,
"text": "(UTC+06:30) Yangon (Rangoon)",
"utc": [
"Asia/Rangoon",
"Indian/Cocos"
]
},
{
"value": "SE Asia Standard Time",
"abbr": "SAST",
"offset": 7,
"isdst": false,
"text": "(UTC+07:00) Bangkok, Hanoi, Jakarta",
"utc": [
"Antarctica/Davis",
"Asia/Bangkok",
"Asia/Hovd",
"Asia/Jakarta",
"Asia/Phnom_Penh",
"Asia/Pontianak",
"Asia/Saigon",
"Asia/Vientiane",
"Etc/GMT-7",
"Indian/Christmas"
]
},
{
"value": "N. Central Asia Standard Time",
"abbr": "NCAST",
"offset": 7,
"isdst": false,
"text": "(UTC+07:00) Novosibirsk",
"utc": [
"Asia/Novokuznetsk",
"Asia/Novosibirsk",
"Asia/Omsk"
]
},
{
"value": "China Standard Time",
"abbr": "CST",
"offset": 8,
"isdst": false,
"text": "(UTC+08:00) Beijing, Chongqing, Hong Kong, Urumqi",
"utc": [
"Asia/Hong_Kong",
"Asia/Macau",
"Asia/Shanghai"
]
},
{
"value": "North Asia Standard Time",
"abbr": "NAST",
"offset": 8,
"isdst": false,
"text": "(UTC+08:00) Krasnoyarsk",
"utc": [
"Asia/Krasnoyarsk"
]
},
{
"value": "Singapore Standard Time",
"abbr": "MPST",
"offset": 8,
"isdst": false,
"text": "(UTC+08:00) Kuala Lumpur, Singapore",
"utc": [
"Asia/Brunei",
"Asia/Kuala_Lumpur",
"Asia/Kuching",
"Asia/Makassar",
"Asia/Manila",
"Asia/Singapore",
"Etc/GMT-8"
]
},
{
"value": "W. Australia Standard Time",
"abbr": "WAST",
"offset": 8,
"isdst": false,
"text": "(UTC+08:00) Perth",
"utc": [
"Antarctica/Casey",
"Australia/Perth"
]
},
{
"value": "Taipei Standard Time",
"abbr": "TST",
"offset": 8,
"isdst": false,
"text": "(UTC+08:00) Taipei",
"utc": [
"Asia/Taipei"
]
},
{
"value": "Ulaanbaatar Standard Time",
"abbr": "UST",
"offset": 8,
"isdst": false,
"text": "(UTC+08:00) Ulaanbaatar",
"utc": [
"Asia/Choibalsan",
"Asia/Ulaanbaatar"
]
},
{
"value": "North Asia East Standard Time",
"abbr": "NAEST",
"offset": 8,
"isdst": false,
"text": "(UTC+08:00) Irkutsk",
"utc": [
"Asia/Irkutsk"
]
},
{
"value": "Japan Standard Time",
"abbr": "JST",
"offset": 9,
"isdst": false,
"text": "(UTC+09:00) Osaka, Sapporo, Tokyo",
"utc": [
"Asia/Dili",
"Asia/Jayapura",
"Asia/Tokyo",
"Etc/GMT-9",
"Pacific/Palau"
]
},
{
"value": "Korea Standard Time",
"abbr": "KST",
"offset": 9,
"isdst": false,
"text": "(UTC+09:00) Seoul",
"utc": [
"Asia/Pyongyang",
"Asia/Seoul"
]
},
{
"value": "Cen. Australia Standard Time",
"abbr": "CAST",
"offset": 9.5,
"isdst": false,
"text": "(UTC+09:30) Adelaide",
"utc": [
"Australia/Adelaide",
"Australia/Broken_Hill"
]
},
{
"value": "AUS Central Standard Time",
"abbr": "ACST",
"offset": 9.5,
"isdst": false,
"text": "(UTC+09:30) Darwin",
"utc": [
"Australia/Darwin"
]
},
{
"value": "E. Australia Standard Time",
"abbr": "EAST",
"offset": 10,
"isdst": false,
"text": "(UTC+10:00) Brisbane",
"utc": [
"Australia/Brisbane",
"Australia/Lindeman"
]
},
{
"value": "AUS Eastern Standard Time",
"abbr": "AEST",
"offset": 10,
"isdst": false,
"text": "(UTC+10:00) Canberra, Melbourne, Sydney",
"utc": [
"Australia/Melbourne",
"Australia/Sydney"
]
},
{
"value": "West Pacific Standard Time",
"abbr": "WPST",
"offset": 10,
"isdst": false,
"text": "(UTC+10:00) Guam, Port Moresby",
"utc": [
"Antarctica/DumontDUrville",
"Etc/GMT-10",
"Pacific/Guam",
"Pacific/Port_Moresby",
"Pacific/Saipan",
"Pacific/Truk"
]
},
{
"value": "Tasmania Standard Time",
"abbr": "TST",
"offset": 10,
"isdst": false,
"text": "(UTC+10:00) Hobart",
"utc": [
"Australia/Currie",
"Australia/Hobart"
]
},
{
"value": "Yakutsk Standard Time",
"abbr": "YST",
"offset": 9,
"isdst": false,
"text": "(UTC+09:00) Yakutsk",
"utc": [
"Asia/Chita",
"Asia/Khandyga",
"Asia/Yakutsk"
]
},
{
"value": "Central Pacific Standard Time",
"abbr": "CPST",
"offset": 11,
"isdst": false,
"text": "(UTC+11:00) Solomon Is., New Caledonia",
"utc": [
"Antarctica/Macquarie",
"Etc/GMT-11",
"Pacific/Efate",
"Pacific/Guadalcanal",
"Pacific/Kosrae",
"Pacific/Noumea",
"Pacific/Ponape"
]
},
{
"value": "Vladivostok Standard Time",
"abbr": "VST",
"offset": 11,
"isdst": false,
"text": "(UTC+11:00) Vladivostok",
"utc": [
"Asia/Sakhalin",
"Asia/Ust-Nera",
"Asia/Vladivostok"
]
},
{
"value": "New Zealand Standard Time",
"abbr": "NZST",
"offset": 12,
"isdst": false,
"text": "(UTC+12:00) Auckland, Wellington",
"utc": [
"Antarctica/McMurdo",
"Pacific/Auckland"
]
},
{
"value": "UTC+12",
"abbr": "U",
"offset": 12,
"isdst": false,
"text": "(UTC+12:00) Coordinated Universal Time+12",
"utc": [
"Etc/GMT-12",
"Pacific/Funafuti",
"Pacific/Kwajalein",
"Pacific/Majuro",
"Pacific/Nauru",
"Pacific/Tarawa",
"Pacific/Wake",
"Pacific/Wallis"
]
},
{
"value": "Fiji Standard Time",
"abbr": "FST",
"offset": 12,
"isdst": false,
"text": "(UTC+12:00) Fiji",
"utc": [
"Pacific/Fiji"
]
},
{
"value": "Magadan Standard Time",
"abbr": "MST",
"offset": 12,
"isdst": false,
"text": "(UTC+12:00) Magadan",
"utc": [
"Asia/Anadyr",
"Asia/Kamchatka",
"Asia/Magadan",
"Asia/Srednekolymsk"
]
},
{
"value": "Kamchatka Standard Time",
"abbr": "KDT",
"offset": 13,
"isdst": true,
"text": "(UTC+12:00) Petropavlovsk-Kamchatsky - Old",
"utc": [
"Asia/Kamchatka"
]
},
{
"value": "Tonga Standard Time",
"abbr": "TST",
"offset": 13,
"isdst": false,
"text": "(UTC+13:00) Nuku'alofa",
"utc": [
"Etc/GMT-13",
"Pacific/Enderbury",
"Pacific/Fakaofo",
"Pacific/Tongatapu"
]
},
{
"value": "Samoa Standard Time",
"abbr": "SST",
"offset": 13,
"isdst": false,
"text": "(UTC+13:00) Samoa",
"utc": [
"Pacific/Apia"
]
}
]
` | data.go | 0.570092 | 0.428353 | data.go | starcoder |
package cmd
import (
"github.com/spf13/cobra"
"go.borchero.com/cuckoo/ci"
"go.borchero.com/cuckoo/providers"
"go.borchero.com/typewriter"
)
const deployDescription = `
The deploy command deploys a Helm chart to a Kubernetes cluster. A Helm chart may be defined in
multiple ways:
* Remote Charts: In this case, the --repo argument and the --chart argument must be given.
* Local Charts: In this case, only the --chart argument must be given. Although the 'template'
folder must exist, there is no need for a Chart.yaml file to exist. Dependencies should be put
in a 'dependencies.yaml' file in this case.
* Local Directories: In this case, only the --chart argument must be given and set to an arbitrary
directory. It serves as a "bundle" for multiple Kubernetes manifests which do not require
value files to be defined. This way, there is no need for an extra 'template' folder.
* Local Files: In this case, the --chart argument must be set to a particular file. Deploying a
single file as Helm chart serves as an alternative for 'kubectl apply' and provides additional
features such as rollbacks.
For (actual) local Helm charts, tag and image may be set, otherwise they are ignored. They
automatically override the values 'image.name' and 'image.tag' in the values.yaml file. Tags and
images may be templated in the same way as in the build command. Consult its documentation to read
about these template values.
Make sure to be authenticated for Kubernetes or run 'cuckoo auth' prior to calling this command to
write the kubeconfig file.
`
var deployArgs struct {
repo string
chart string
version string
name string
values []string
namespace string
image string
tag string
dryRun bool
}
func init() {
deployCommand := &cobra.Command{
Use: "deploy",
Short: "Deploy a Helm chart to a Kubernetes cluster.",
Long: deployDescription,
Args: cobra.ExactArgs(0),
Run: runDeploy,
}
deployCommand.Flags().StringVar(
&deployArgs.repo, "repo", "",
"The URL to a Helm repository when using a remote chart.",
)
deployCommand.Flags().StringVar(
&deployArgs.chart, "chart", "./deploy/helm",
"The chart to deploy.",
)
deployCommand.Flags().StringVar(
&deployArgs.version, "version", "0.0.0",
"The version of the chart to deploy. Only relevant for remote charts.",
)
deployCommand.Flags().StringVar(
&deployArgs.name, "name", env.Project.Slug,
"The name of the Helm release.",
)
deployCommand.Flags().StringArrayVarP(
&deployArgs.values, "values", "f", []string{},
"A path to one or multiple value files to set values from.",
)
deployCommand.Flags().StringVarP(
&deployArgs.namespace, "namespace", "n", "default",
"The namespace for deployed resources.",
)
deployCommand.Flags().StringVar(
&deployArgs.image, "image", "",
"The path for the image to deploy.",
)
deployCommand.Flags().StringVarP(
&deployArgs.tag, "tag", "t", "",
"The tag of the image to use for deployment. Defines appVersion of local charts.",
)
deployCommand.Flags().BoolVar(
&deployArgs.dryRun, "dry-run", false,
"Whether to perform a dry-run (useful for testing the chart).",
)
rootCmd.AddCommand(deployCommand)
}
func runDeploy(cmd *cobra.Command, args []string) {
logger := typewriter.NewCLILogger()
manager := ci.NewManager(env)
// 1) Configure Helm release
release, err := providers.NewHelmRelease(
deployArgs.repo, deployArgs.chart, deployArgs.version,
deployArgs.name, deployArgs.namespace, logger,
)
if err != nil {
typewriter.Fail(logger, "Failed to prepare deployment", err)
}
// 2) Get image and tag for local charts
image := ""
tag := ""
if release.IsLocalChart() {
image, err = manager.ImageNameFromTemplate(deployArgs.image)
if err != nil {
typewriter.Fail(logger, "Cannot use the specified image", err)
}
tag, err = manager.TagFromTemplate(deployArgs.tag)
if err != nil {
typewriter.Fail(logger, "Cannot use the specified tag", err)
}
}
// 3) Run upgrade
err = release.Upgrade(deployArgs.values, image, tag, deployArgs.dryRun)
if err != nil {
typewriter.Fail(logger, "Failed to deploy", err)
}
logger.Success("Done 🎉")
} | source/cmd/deploy.go | 0.606964 | 0.408041 | deploy.go | starcoder |
package fp
func (l BoolList) TakeRight(n int) BoolList { return l.Reverse().Take(n).Reverse() }
func (l StringList) TakeRight(n int) StringList { return l.Reverse().Take(n).Reverse() }
func (l IntList) TakeRight(n int) IntList { return l.Reverse().Take(n).Reverse() }
func (l Int64List) TakeRight(n int) Int64List { return l.Reverse().Take(n).Reverse() }
func (l ByteList) TakeRight(n int) ByteList { return l.Reverse().Take(n).Reverse() }
func (l RuneList) TakeRight(n int) RuneList { return l.Reverse().Take(n).Reverse() }
func (l Float32List) TakeRight(n int) Float32List { return l.Reverse().Take(n).Reverse() }
func (l Float64List) TakeRight(n int) Float64List { return l.Reverse().Take(n).Reverse() }
func (l AnyList) TakeRight(n int) AnyList { return l.Reverse().Take(n).Reverse() }
func (l Tuple2List) TakeRight(n int) Tuple2List { return l.Reverse().Take(n).Reverse() }
func (l BoolArrayList) TakeRight(n int) BoolArrayList { return l.Reverse().Take(n).Reverse() }
func (l StringArrayList) TakeRight(n int) StringArrayList { return l.Reverse().Take(n).Reverse() }
func (l IntArrayList) TakeRight(n int) IntArrayList { return l.Reverse().Take(n).Reverse() }
func (l Int64ArrayList) TakeRight(n int) Int64ArrayList { return l.Reverse().Take(n).Reverse() }
func (l ByteArrayList) TakeRight(n int) ByteArrayList { return l.Reverse().Take(n).Reverse() }
func (l RuneArrayList) TakeRight(n int) RuneArrayList { return l.Reverse().Take(n).Reverse() }
func (l Float32ArrayList) TakeRight(n int) Float32ArrayList { return l.Reverse().Take(n).Reverse() }
func (l Float64ArrayList) TakeRight(n int) Float64ArrayList { return l.Reverse().Take(n).Reverse() }
func (l AnyArrayList) TakeRight(n int) AnyArrayList { return l.Reverse().Take(n).Reverse() }
func (l Tuple2ArrayList) TakeRight(n int) Tuple2ArrayList { return l.Reverse().Take(n).Reverse() }
func (l BoolOptionList) TakeRight(n int) BoolOptionList { return l.Reverse().Take(n).Reverse() }
func (l StringOptionList) TakeRight(n int) StringOptionList { return l.Reverse().Take(n).Reverse() }
func (l IntOptionList) TakeRight(n int) IntOptionList { return l.Reverse().Take(n).Reverse() }
func (l Int64OptionList) TakeRight(n int) Int64OptionList { return l.Reverse().Take(n).Reverse() }
func (l ByteOptionList) TakeRight(n int) ByteOptionList { return l.Reverse().Take(n).Reverse() }
func (l RuneOptionList) TakeRight(n int) RuneOptionList { return l.Reverse().Take(n).Reverse() }
func (l Float32OptionList) TakeRight(n int) Float32OptionList { return l.Reverse().Take(n).Reverse() }
func (l Float64OptionList) TakeRight(n int) Float64OptionList { return l.Reverse().Take(n).Reverse() }
func (l AnyOptionList) TakeRight(n int) AnyOptionList { return l.Reverse().Take(n).Reverse() }
func (l Tuple2OptionList) TakeRight(n int) Tuple2OptionList { return l.Reverse().Take(n).Reverse() }
func (l BoolListList) TakeRight(n int) BoolListList { return l.Reverse().Take(n).Reverse() }
func (l StringListList) TakeRight(n int) StringListList { return l.Reverse().Take(n).Reverse() }
func (l IntListList) TakeRight(n int) IntListList { return l.Reverse().Take(n).Reverse() }
func (l Int64ListList) TakeRight(n int) Int64ListList { return l.Reverse().Take(n).Reverse() }
func (l ByteListList) TakeRight(n int) ByteListList { return l.Reverse().Take(n).Reverse() }
func (l RuneListList) TakeRight(n int) RuneListList { return l.Reverse().Take(n).Reverse() }
func (l Float32ListList) TakeRight(n int) Float32ListList { return l.Reverse().Take(n).Reverse() }
func (l Float64ListList) TakeRight(n int) Float64ListList { return l.Reverse().Take(n).Reverse() }
func (l AnyListList) TakeRight(n int) AnyListList { return l.Reverse().Take(n).Reverse() }
func (l Tuple2ListList) TakeRight(n int) Tuple2ListList { return l.Reverse().Take(n).Reverse() } | fp/bootstrap_list_takeright.go | 0.693473 | 0.479869 | bootstrap_list_takeright.go | starcoder |
// Package queueimpl4 implements an unbounded, dynamically growing FIFO queue.
// Internally, queue store the values in fixed sized arrays that are linked using
// a singly linked list.
// This implementation tests the queue performance when controlling the length and
// current positions in the arrays using simple local variables instead of relying
// on the builtin len and append functions (i.e. use array instead of slice).
// Otherwise this is the same implementation as queueimpl3.
package queueimpl4
const (
// internalArraySize holds the size of each internal array.
internalArraySize = 128
// internalArrayLastPosition holds the last position of the internal array.
internalArrayLastPosition = 127
)
// Queueimpl4 represents an unbounded, dynamically growing FIFO queue.
type Queueimpl4 struct {
// Head points to the first node of the linked list.
head *Node
// Tail points to the last node of the linked list.
// In an empty queue, head and tail points to the same node.
tail *Node
// Hp is the index pointing to the current first element in the queue
// (i.e. first element added in the current queue values).
hp int
// Tp is the index pointing to the current last element in the queue
// (i.e. last element added in the current queue values).
tp int
// Len holds the current queue values length.
len int
}
// Node represents a queue node.
// Each node holds an array of user managed values.
type Node struct {
// v holds the list of user added values in this node.
v [internalArraySize]interface{}
// n points to the next node in the linked list.
n *Node
}
// New returns an initialized queue.
func New() *Queueimpl4 {
return new(Queueimpl4).Init()
}
// Init initializes or clears queue q.
func (q *Queueimpl4) Init() *Queueimpl4 {
n := newNode()
q.head = n
q.tail = n
q.hp = 0
q.tp = 0
q.len = 0
return q
}
// Len returns the number of elements of queue q.
// The complexity is O(1).
func (q *Queueimpl4) Len() int { return q.len }
// Front returns the first element of list l or nil if the list is empty.
// The second, bool result indicates whether a valid value was returned;
// if the queue is empty, false will be returned.
// The complexity is O(1).
func (q *Queueimpl4) Front() (interface{}, bool) {
if q.len == 0 {
return nil, false
}
return q.head.v[q.hp], true
}
// Push adds a value to the queue.
// The complexity is O(1).
func (q *Queueimpl4) Push(v interface{}) {
if q.tp >= internalArraySize {
n := newNode()
q.tail.n = n
q.tail = n
q.tp = 0
}
q.tail.v[q.tp] = v
q.len++
q.tp++
}
// Pop retrieves and removes the next element from the queue.
// The second, bool result indicates whether a valid value was returned; if the queue is empty, false will be returned.
// The complexity is O(1).
func (q *Queueimpl4) Pop() (interface{}, bool) {
if q.len == 0 {
return nil, false
}
v := q.head.v[q.hp]
q.head.v[q.hp] = nil // Avoid memory leaks
q.len--
if q.hp >= internalArrayLastPosition {
n := q.head.n
q.head.n = nil // Avoid memory leaks
q.head = n
q.hp = 0
} else {
q.hp++
}
return v, true
}
// newNode returns an initialized node.
func newNode() *Node {
return &Node{}
} | queueimpl4/queueimpl4.go | 0.876621 | 0.598782 | queueimpl4.go | starcoder |
package docs
import (
"bytes"
"encoding/json"
"strings"
"text/template"
"github.com/swaggo/swag"
)
var doc = `{
"schemes": {{ marshal .Schemes }},
"swagger": "2.0",
"info": {
"description": "{{escape .Description}}",
"title": "{{.Title}}",
"termsOfService": "https://github.com/libsv/payd/blob/master/CODE_OF_CONDUCT.md",
"contact": {},
"license": {
"name": "ISC",
"url": "https://github.com/libsv/payd/blob/master/LICENSE"
},
"version": "{{.Version}}"
},
"host": "{{.Host}}",
"basePath": "{{.BasePath}}",
"paths": {
"/api/v1/payment/{paymentID}": {
"get": {
"description": "Creates a payment request based on a payment id (the identifier for an invoice).",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Payment"
],
"summary": "Request to pay an invoice and receive back outputs to use when constructing the payment transaction",
"parameters": [
{
"type": "string",
"description": "Payment ID",
"name": "paymentID",
"in": "path",
"required": true
}
],
"responses": {
"201": {
"description": "contains outputs, merchant data and expiry information, used by the payee to construct a transaction",
"schema": {
"$ref": "#/definitions/payd.PaymentRequestResponse"
}
},
"400": {
"description": "returned if the user input is invalid, usually an issue with the paymentID",
"schema": {
"$ref": "#/definitions/payd.ClientError"
}
},
"404": {
"description": "returned if the paymentID has not been found",
"schema": {
"$ref": "#/definitions/payd.ClientError"
}
},
"500": {
"description": "returned if there is an unexpected internal error",
"schema": {
"type": "string"
}
}
}
}
},
"/v1/balance": {
"get": {
"description": "Returns current balance, which is a sum of unspent txos",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Balance"
],
"summary": "Balance",
"responses": {
"200": {
"description": ""
}
}
}
},
"/v1/destinations/{invoiceID}": {
"get": {
"description": "Given an invoiceID, a set of outputs and fees will be returned, if not found a 404 is returned.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Destinations",
"Receive"
],
"summary": "Given an invoiceID, a set of outputs and fees will be returned, if not found a 404 is returned.",
"parameters": [
{
"type": "string",
"description": "Invoice ID",
"name": "invoiceID",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": ""
},
"404": {
"description": "returned if the invoiceID has not been found",
"schema": {
"$ref": "#/definitions/payd.ClientError"
}
}
}
}
},
"/v1/invoices": {
"get": {
"description": "Returns all invoices currently stored",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Invoices"
],
"summary": "Invoices",
"responses": {
"200": {
"description": ""
}
}
},
"post": {
"description": "Creates an invoices with invoiceID and satoshis",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Invoices"
],
"summary": "InvoiceCreate invoices",
"parameters": [
{
"description": "Reference and Satoshis",
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/payd.InvoiceCreate"
}
}
],
"responses": {
"201": {
"description": ""
}
}
}
},
"/v1/invoices/{invoiceID}": {
"get": {
"description": "Returns invoices by invoices id if exists",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Invoices"
],
"summary": "Invoices",
"parameters": [
{
"type": "string",
"description": "Invoice ID",
"name": "invoiceID",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": ""
}
}
},
"delete": {
"description": "InvoiceDelete",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Invoices"
],
"summary": "InvoiceDelete invoices",
"parameters": [
{
"type": "string",
"description": "invoiceID we want to remove",
"name": "invoiceID",
"in": "path",
"required": true
}
],
"responses": {
"204": {
"description": ""
},
"404": {
"description": "returned if the paymentID has not been found",
"schema": {
"$ref": "#/definitions/payd.ClientError"
}
}
}
}
},
"/v1/owner": {
"get": {
"description": "Returns information about the wallet owner",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Users"
],
"summary": "Wallet owner information.",
"responses": {
"200": {
"description": "Current wallet owner",
"schema": {
"$ref": "#/definitions/payd.User"
}
}
}
}
},
"/v1/pay": {
"post": {
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Pay"
],
"summary": "Make a payment",
"parameters": [
{
"description": "Pay to url",
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/payd.PayRequest"
}
}
],
"responses": {
"201": {
"description": ""
}
}
}
},
"/v1/payments/{invoiceID}": {
"post": {
"description": "Given an invoiceID, and an spvEnvelope, we will validate the payment and inputs used are valid and that it covers the invoice.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Payments"
],
"summary": "Validate and store a payment.",
"parameters": [
{
"type": "string",
"description": "Invoice ID",
"name": "invoiceID",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": ""
},
"400": {
"description": "returned if the invoiceID is empty or payment isn't valid",
"schema": {
"$ref": "#/definitions/payd.ClientError"
}
},
"404": {
"description": "returned if the invoiceID has not been found",
"schema": {
"$ref": "#/definitions/payd.ClientError"
}
}
}
}
},
"/v1/proofs/{txid}": {
"post": {
"description": "Creates a json envelope proof",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Proofs"
],
"summary": "InvoiceCreate proof",
"parameters": [
{
"type": "string",
"description": "Transaction ID",
"name": "txid",
"in": "path",
"required": true
},
{
"description": "JSON Envelope",
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/envelope.JSONEnvelope"
}
}
],
"responses": {
"201": {
"description": ""
}
}
}
}
},
"definitions": {
"bt.FeeQuote": {
"type": "object"
},
"envelope.JSONEnvelope": {
"type": "object",
"properties": {
"encoding": {
"type": "string"
},
"mimetype": {
"type": "string"
},
"payload": {
"type": "string"
},
"publicKey": {
"type": "string"
},
"signature": {
"type": "string"
}
}
},
"payd.ClientError": {
"type": "object",
"properties": {
"code": {
"type": "string",
"example": "N01"
},
"id": {
"type": "string",
"example": "e97970bf-2a88-4bc8-90e6-2f597a80b93d"
},
"message": {
"type": "string",
"example": "unable to find foo when loading bar"
},
"title": {
"type": "string",
"example": "not found"
}
}
},
"payd.InvoiceCreate": {
"type": "object",
"properties": {
"description": {
"description": "Description is an optional text field that can have some further info\nlike 'invoice for oranges'.\nMaxLength is 1024 characters.",
"type": "string"
},
"expiresAt": {
"description": "ExpiresAt is an optional param that can be passed to set an expiration\ndate on an invoice, after which, payments will not be accepted.",
"type": "string"
},
"reference": {
"description": "Reference is an identifier that can be used to link the\npayd invoice with an external system.\nMaxLength is 32 characters.",
"type": "string"
},
"satoshis": {
"description": "Satoshis is the total amount this invoice is to pay.",
"type": "integer"
}
}
},
"payd.P4Destination": {
"type": "object",
"properties": {
"outputs": {
"type": "array",
"items": {
"$ref": "#/definitions/payd.P4Output"
}
}
}
},
"payd.P4Output": {
"type": "object",
"properties": {
"amount": {
"type": "integer"
},
"description": {
"type": "string"
},
"script": {
"type": "string"
}
}
},
"payd.PayRequest": {
"type": "object",
"properties": {
"payToURL": {
"type": "string"
}
}
},
"payd.PaymentRequestResponse": {
"type": "object",
"properties": {
"creationTimestamp": {
"type": "string"
},
"destinations": {
"$ref": "#/definitions/payd.P4Destination"
},
"expirationTimestamp": {
"type": "string"
},
"fees": {
"$ref": "#/definitions/bt.FeeQuote"
},
"memo": {
"type": "string"
},
"merchantData": {
"$ref": "#/definitions/payd.User"
},
"network": {
"type": "string"
},
"paymentURL": {
"type": "string"
},
"spvRequired": {
"type": "boolean",
"example": true
}
}
},
"payd.User": {
"type": "object",
"properties": {
"address": {
"type": "string"
},
"avatar": {
"type": "string"
},
"email": {
"type": "string"
},
"extendedData": {
"type": "object",
"additionalProperties": true
},
"id": {
"type": "integer"
},
"name": {
"type": "string"
},
"phoneNumber": {
"type": "string"
}
}
}
}
}`
type swaggerInfo struct {
Version string
Host string
BasePath string
Schemes []string
Title string
Description string
}
// SwaggerInfo holds exported Swagger Info so clients can modify it
var SwaggerInfo = swaggerInfo{
Version: "0.0.1",
Host: "localhost:8443",
BasePath: "/api",
Schemes: []string{},
Title: "Payd",
Description: "Payd is a txo and key manager, with a common interface that can be implemented by wallets.",
}
type s struct{}
func (s *s) ReadDoc() string {
sInfo := SwaggerInfo
sInfo.Description = strings.Replace(sInfo.Description, "\n", "\\n", -1)
t, err := template.New("swagger_info").Funcs(template.FuncMap{
"marshal": func(v interface{}) string {
a, _ := json.Marshal(v)
return string(a)
},
"escape": func(v interface{}) string {
// escape tabs
str := strings.Replace(v.(string), "\t", "\\t", -1)
// replace " with \", and if that results in \\", replace that with \\\"
str = strings.Replace(str, "\"", "\\\"", -1)
return strings.Replace(str, "\\\\\"", "\\\\\\\"", -1)
},
}).Parse(doc)
if err != nil {
return doc
}
var tpl bytes.Buffer
if err := t.Execute(&tpl, sInfo); err != nil {
return doc
}
return tpl.String()
}
func init() {
swag.Register("swagger", &s{})
} | docs/docs.go | 0.680348 | 0.405861 | docs.go | starcoder |
package byteutils
// Endian represents the endianness for conversion.
type Endian bool
const (
// LittleEndian places the least significant byte at the end (right side) of
// a byte sequence.
LittleEndian Endian = false
// BigEndian places the most significant byte at the end (right side) of a
// byte sequence
BigEndian Endian = true
)
// ByteIteratorFunc takes a byte and the enumeration (count of calls to
// function). A pointer is passed so that the byte can be potentially
// modified.
type ByteIteratorFunc = func(b *byte, enumeration int)
// IterateSmallestToLargest iterates from the smallest byte to the largest
// byte given the endianness. It will call the provided function on each byte.
func (e Endian) IterateSmallestToLargest(b Bytes, f ByteIteratorFunc) {
smallest, largest := e.byteRange(len(b))
if e == BigEndian {
for i := smallest; i <= largest; i++ {
f(&b[i], i)
}
} else {
for i := smallest; i >= largest; i-- {
f(&b[i], smallest-i)
}
}
}
// IterateUint16 iterates over a uint16 as bytes, from smallest to largest.
// Endianness determines if iteration goes from the left-most byte to the
// right-most (big endian), or the right-most byte to the left-most (little
// endian).
func (e Endian) IterateUint16(n uint16, f ByteIteratorFunc) {
e.iterateNumber(n, f)
}
// IterateUint32 iterates over a uint32 as bytes, from smallest to largest.
// Endianness determines if iteration goes from the left-most byte to the
// right-most (big endian), or the right-most byte to the left-most (little
// endian).
func (e Endian) IterateUint32(n uint32, f ByteIteratorFunc) {
e.iterateNumber(n, f)
}
// IterateNumber iterates over a number as bytes, from smallest to largest.
func (e Endian) iterateNumber(n interface{}, f ByteIteratorFunc) {
var smallest, largest int
var value uint32
switch v := n.(type) {
case uint16:
smallest, largest = e.byteRange(2)
value = uint32(v)
case uint32:
smallest, largest = e.byteRange(4)
value = v
}
if e == BigEndian {
for i := smallest; i <= largest; i++ {
shift := (largest - i) * 8
var intersect uint32 = 0xFF << shift
b := byte((value & intersect) >> shift)
f(&b, i)
}
} else {
for i := largest; i <= smallest; i++ {
shift := i * 8
var intersect uint32 = 0xFF << shift
b := byte((value & intersect) >> shift)
f(&b, i)
}
}
}
// ByteRange returns the index of the smallest and the largest bytes.
func (e Endian) byteRange(byteCount int) (smallest, largest int) {
if e == LittleEndian {
smallest = byteCount - 1
} else {
largest = byteCount - 1
}
return
} | endianness.go | 0.801897 | 0.69641 | endianness.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.