code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package blockchain
import (
"github.com/bloXroute-Labs/gateway/blockchain/network"
"github.com/bloXroute-Labs/gateway/types"
)
// NoOpBxBridge is a placeholder bridge that still operates as a Converter
type NoOpBxBridge struct {
Converter
}
// NewNoOpBridge is a placeholder bridge implementation for starting the node without any blockchain connections, so that there's no blocking on channels
func NewNoOpBridge(converter Converter) Bridge {
return &NoOpBxBridge{
Converter: converter,
}
}
// TransactionBlockchainToBDN is a no-op
func (n NoOpBxBridge) TransactionBlockchainToBDN(i interface{}) (*types.BxTransaction, error) {
return nil, nil
}
// TransactionBDNToBlockchain is a no-op
func (n NoOpBxBridge) TransactionBDNToBlockchain(transaction *types.BxTransaction) (interface{}, error) {
return nil, nil
}
// BlockBlockchainToBDN is a no-op
func (n NoOpBxBridge) BlockBlockchainToBDN(i interface{}) (*types.BxBlock, error) {
return nil, nil
}
// BlockBDNtoBlockchain is a no-op
func (n NoOpBxBridge) BlockBDNtoBlockchain(block *types.BxBlock) (interface{}, error) {
return nil, nil
}
// ReceiveNetworkConfigUpdates is a no-op
func (n NoOpBxBridge) ReceiveNetworkConfigUpdates() <-chan network.EthConfig {
return make(chan network.EthConfig)
}
// UpdateNetworkConfig is a no-op
func (n NoOpBxBridge) UpdateNetworkConfig(config network.EthConfig) error {
return nil
}
// AnnounceTransactionHashes is a no-op
func (n NoOpBxBridge) AnnounceTransactionHashes(s string, list types.SHA256HashList) error {
return nil
}
// SendTransactionsFromBDN is a no-op
func (n NoOpBxBridge) SendTransactionsFromBDN(transactions []*types.BxTransaction) error {
return nil
}
// SendTransactionsToBDN is a no-op
func (n NoOpBxBridge) SendTransactionsToBDN(txs []*types.BxTransaction, peerEndpoint types.NodeEndpoint) error {
return nil
}
// RequestTransactionsFromNode is a no-op
func (n NoOpBxBridge) RequestTransactionsFromNode(s string, list types.SHA256HashList) error {
return nil
}
// ReceiveNodeTransactions is a no-op
func (n NoOpBxBridge) ReceiveNodeTransactions() <-chan TransactionsFromNode {
return make(chan TransactionsFromNode)
}
// ReceiveBDNTransactions is a no-op
func (n NoOpBxBridge) ReceiveBDNTransactions() <-chan []*types.BxTransaction {
return make(chan []*types.BxTransaction)
}
// ReceiveTransactionHashesAnnouncement is a no-op
func (n NoOpBxBridge) ReceiveTransactionHashesAnnouncement() <-chan TransactionAnnouncement {
return make(chan TransactionAnnouncement)
}
// ReceiveTransactionHashesRequest is a no-op
func (n NoOpBxBridge) ReceiveTransactionHashesRequest() <-chan TransactionAnnouncement {
return make(chan TransactionAnnouncement)
}
// SendBlockToBDN is a no-op
func (n NoOpBxBridge) SendBlockToBDN(block *types.BxBlock, endpoint types.NodeEndpoint) error {
return nil
}
// SendBlockToNode is a no-op
func (n NoOpBxBridge) SendBlockToNode(block *types.BxBlock) error {
return nil
}
// ReceiveBlockFromBDN is a no-op
func (n NoOpBxBridge) ReceiveBlockFromBDN() <-chan *types.BxBlock {
return make(chan *types.BxBlock)
}
// ReceiveBlockFromNode is a no-op
func (n NoOpBxBridge) ReceiveBlockFromNode() <-chan BlockFromNode {
return make(chan BlockFromNode)
}
// ReceiveConfirmedBlockFromNode is a no-op
func (n NoOpBxBridge) ReceiveConfirmedBlockFromNode() <-chan BlockFromNode {
return nil
}
// ReceiveNoActiveBlockchainPeersAlert is a no-op
func (n NoOpBxBridge) ReceiveNoActiveBlockchainPeersAlert() <-chan NoActiveBlockchainPeersAlert {
return make(chan NoActiveBlockchainPeersAlert)
}
// SendNoActiveBlockchainPeersAlert is a no-op
func (n NoOpBxBridge) SendNoActiveBlockchainPeersAlert() error {
return nil
}
// SendConfirmedBlockToGateway is a no-op
func (n NoOpBxBridge) SendConfirmedBlockToGateway(block *types.BxBlock, peerEndpoint types.NodeEndpoint) error {
return nil
}
// SendBlockchainStatusRequest is a no-op
func (n NoOpBxBridge) SendBlockchainStatusRequest() error { return nil }
// ReceiveBlockchainStatusRequest is a no-op
func (n NoOpBxBridge) ReceiveBlockchainStatusRequest() <-chan struct{} { return make(chan struct{}) }
// SendBlockchainStatusResponse is a no-op
func (n NoOpBxBridge) SendBlockchainStatusResponse([]*types.NodeEndpoint) error { return nil }
// ReceiveBlockchainStatusResponse is a no-op
func (n NoOpBxBridge) ReceiveBlockchainStatusResponse() <-chan []*types.NodeEndpoint {
return make(chan []*types.NodeEndpoint)
} | blockchain/noopbridge.go | 0.704973 | 0.430267 | noopbridge.go | starcoder |
package base
import (
"fmt"
"github.com/arborlang/ArborGo/internal/parser/ast"
)
// VisitorHider is a simple way to set and hide the visitor
type VisitorHider interface {
SetVisitor(v *VisitorAdapter)
}
// VisitorAdapter represents a top level VisitorAdapter that walks the tree but does nothing. Useful for doing analysis on the AST by other visitors
// For example, if I want to collect all of the function definitions of an arbor file, I would define a struct that is composed of this
// visitor and implements the VisitFunctionDefinitionNode function.
type VisitorAdapter struct {
Visitor VisitorHider // visitor implements a visitor interface
ShouldCallVisitor bool
}
// New returns a new Visitor
func New(visitor VisitorHider) *VisitorAdapter {
v := &VisitorAdapter{
Visitor: visitor,
ShouldCallVisitor: true,
}
visitor.SetVisitor(v)
return v
}
// GetVisitor gets the underlying visitor
func (v *VisitorAdapter) GetVisitor() interface{} {
return v.Visitor
}
func (v *VisitorAdapter) VisitAnyNode(node ast.Node) (ast.Node, error) {
if visitor, ok := v.Visitor.(ast.GenericVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitAnyNode(node)
}
return nil, nil
}
func (v *VisitorAdapter) VisitExtendsNode(extends *ast.ExtendsNode) (ast.Node, error) {
v.VisitAnyNode(extends)
if visitor, ok := v.Visitor.(ast.ExtendsNodeVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitExtendsNode(extends)
}
return extends, nil
}
func (v *VisitorAdapter) VisitAnnotatedNode(node *ast.AnnotatedNode) (ast.Node, error) {
v.VisitAnyNode(node)
if vi, ok := v.Visitor.(ast.AnnotatedNodeVisitor); ok {
return vi.VisitAnnotatedNode(node)
}
return node, nil
}
// VisitProgram visits a compiler block
func (v *VisitorAdapter) VisitProgram(block *ast.Program) (ast.Node, error) {
v.VisitAnyNode(block)
node, err := v.VisitAnyNode(block)
if node != nil || err != nil {
return node, err
}
if visitor, ok := v.Visitor.(ast.ProgramVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitProgram(block)
}
v.ShouldCallVisitor = true
statements := []ast.Node{}
for _, stmt := range block.Nodes {
stmt, err := stmt.Accept(v)
if err != nil {
return nil, err
}
statements = append(statements, stmt)
}
block.Nodes = statements
return block, nil
}
func (v *VisitorAdapter) VisitImplementsNode(implementsNode *ast.ImplementsNode) (ast.Node, error) {
v.VisitAnyNode(implementsNode)
if visitor, ok := v.Visitor.(ast.ImplementsNodeVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitImplementsNode(implementsNode)
}
return implementsNode, nil
}
// VisitAssignment visits an assignment node
func (v *VisitorAdapter) VisitAssignmentNode(assignment *ast.AssignmentNode) (ast.Node, error) {
v.VisitAnyNode(assignment)
if visitor, ok := v.Visitor.(ast.AssignmentNodeVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitAssignmentNode(assignment)
}
v.ShouldCallVisitor = true
assignTo, err := assignment.AssignTo.Accept(v)
if err != nil {
return assignment, err
}
value, err := assignment.Value.Accept(v)
if err != nil {
return assignment, err
}
assignment.AssignTo = assignTo
assignment.Value = value
return assignment, nil
}
// VisitBoolOp visits a boolean node
func (v *VisitorAdapter) VisitBoolOp(node *ast.BoolOp) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.BoolOpVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitBoolOp(node)
}
v.ShouldCallVisitor = true
leftOp, err := node.LeftSide.Accept(v)
if err != nil {
return nil, err
}
rightOP, err := node.RightSide.Accept(v)
if err != nil {
return nil, err
}
node.LeftSide = leftOp
node.RightSide = rightOP
return node, nil
}
// VisitComparison Visits a comparison node
func (v *VisitorAdapter) VisitComparison(node *ast.Comparison) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.ComparisonVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitComparison(node)
}
v.ShouldCallVisitor = true
comparison, err := node.LeftSide.Accept(v)
if err != nil {
return nil, err
}
right, err := node.RightSide.Accept(v)
if err != nil {
return nil, err
}
node.LeftSide = comparison
node.RightSide = right
return node, nil
}
// VisitConstant visits the constant object
func (v *VisitorAdapter) VisitConstant(node *ast.Constant) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.ConstantVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitConstant(node)
}
v.ShouldCallVisitor = true
return node, nil
}
// VisitFunctionDefinitionNode visits a function definition ndde
func (v *VisitorAdapter) VisitFunctionDefinitionNode(node *ast.FunctionDefinitionNode) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.FunctionDefinitionNodeVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitFunctionDefinitionNode(node)
}
v.ShouldCallVisitor = true
args := []*ast.VarName{}
for _, arg := range node.Arguments {
argRes, err := arg.Accept(v)
if err != nil {
return nil, err
}
args = append(args, argRes.(*ast.VarName))
}
body, err := node.Body.Accept(v)
node.Arguments = args
node.Body = body
return node, err
}
// VisitFunctionCallNode visits a function call node
func (v *VisitorAdapter) VisitFunctionCallNode(node *ast.FunctionCallNode) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.FunctionCallNodeVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitFunctionCallNode(node)
}
v.ShouldCallVisitor = true
def, err := node.Definition.Accept(v)
if err != nil {
return nil, err
}
node.Definition = def
args := []ast.Node{}
for _, arg := range node.Arguments {
arg, err := arg.Accept(v)
if err != nil {
return nil, err
}
args = append(args, arg)
}
node.Arguments = args
return node, nil
}
// VisitMathOpNode Visits a math op node
func (v *VisitorAdapter) VisitMathOpNode(node *ast.MathOpNode) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.MathOpNodeVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitMathOpNode(node)
}
v.ShouldCallVisitor = true
left, err := node.LeftSide.Accept(v)
if err != nil {
return nil, err
}
right, err := node.RightSide.Accept(v)
if err != nil {
return nil, err
}
node.LeftSide = left
node.RightSide = right
return node, nil
}
// VisitReturnNode visits a return node
func (v *VisitorAdapter) VisitReturnNode(node *ast.ReturnNode) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.ReturnNodeVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitReturnNode(node)
}
v.ShouldCallVisitor = true
if node.Expression != nil {
expr, err := node.Expression.Accept(v)
if err != nil {
return nil, err
}
node.Expression = expr
}
return node, nil
}
// VisitVarName visits a varname node
func (v *VisitorAdapter) VisitVarName(node *ast.VarName) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.VarNameVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitVarName(node)
}
v.ShouldCallVisitor = true
return node, nil
}
// VisitDeclNode visits the decl Node
func (v *VisitorAdapter) VisitDeclNode(node *ast.DeclNode) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.DeclNodeVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitDeclNode(node)
}
v.ShouldCallVisitor = true
vName, err := node.Varname.Accept(v)
node.Varname = vName.(*ast.VarName)
return node, err
}
// VisitPipeNode visits the pipe node
func (v *VisitorAdapter) VisitPipeNode(node *ast.PipeNode) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.PipeNodeVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitPipeNode(node)
}
v.ShouldCallVisitor = true
left, err := node.LeftSide.Accept(v)
if err != nil {
return nil, err
}
right, err := node.RightSide.Accept(v)
if err != nil {
return nil, err
}
node.LeftSide = left
node.RightSide = right
return node, nil
}
// VisitIfNode visits an if node
func (v *VisitorAdapter) VisitIfNode(node *ast.IfNode) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.IfNodeVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitIfNode(node)
}
v.ShouldCallVisitor = true
condition, err := node.Condition.Accept(v)
if err != nil {
return nil, err
}
body, err := node.Body.Accept(v)
if err != nil {
return nil, err
}
elIfs := []*ast.IfNode{}
for _, elseIf := range node.ElseIfs {
elIf, err := elseIf.Accept(v)
if err != nil {
return nil, err
}
elIfs = append(elIfs, elIf.(*ast.IfNode))
}
els := node.Else
if els != nil {
els, err = els.Accept(v)
if err != nil {
return nil, err
}
}
node.Condition = condition
node.Body = body
node.ElseIfs = elIfs
node.Else = els
return node, nil
}
// VisitImportNode visits an import node
func (v *VisitorAdapter) VisitImportNode(node *ast.ImportNode) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.ImportNodeVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitImportNode(node)
}
v.ShouldCallVisitor = true
return node, nil
}
// VisitTypeNode visits a type node
func (v *VisitorAdapter) VisitTypeNode(node *ast.TypeNode) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.TypeNodeVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitTypeNode(node)
}
v.ShouldCallVisitor = true
return node, nil
}
// VisitIndexNode visits an index node
func (v *VisitorAdapter) VisitIndexNode(node *ast.IndexNode) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.IndexNodeVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitIndexNode(node)
}
v.ShouldCallVisitor = true
return node, nil
}
// VisitSliceNode visits a slice node
func (v *VisitorAdapter) VisitSliceNode(node *ast.SliceNode) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.SliceNodeVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitSliceNode(node)
}
v.ShouldCallVisitor = true
return node, nil
}
// VisitSliceNode visits a slice node
func (v *VisitorAdapter) VisitDecoratorNode(node *ast.DecoratorNode) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.DecoratorNodeVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitDecoratorNode(node)
}
v.ShouldCallVisitor = true
name, err := node.Name.Accept(v)
if err != nil {
return nil, err
}
node.Name = name.(*ast.VarName)
decorates, err := node.Decorates.Accept(v)
if err != nil {
return nil, err
}
node.Decorates = decorates
return node, nil
}
func (v *VisitorAdapter) VisitDotNode(node *ast.DotNode) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.DotNodeVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitDotNode(node)
}
v.ShouldCallVisitor = true
varName, err := node.VarName.Accept(v)
if err != nil {
return nil, err
}
node.VarName = varName
access, err := node.Access.Accept(v)
if err != nil {
return nil, err
}
node.Access = access
return node, nil
}
func (v *VisitorAdapter) VisitInstantiateNode(node *ast.InstantiateNode) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.InstantiateNodeVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitInstantiateNode(node)
}
v.ShouldCallVisitor = true
callNode, err := node.FunctionCallNode.Accept(v)
node.FunctionCallNode = callNode.(*ast.FunctionCallNode)
return node, err
}
func (v *VisitorAdapter) VisitInternalNode(node *ast.InternalNode) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.InternalNodeVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitInternalNode(node)
}
v.ShouldCallVisitor = true
expr, err := node.Expression.Accept(v)
node.Expression = expr
return node, err
}
func (v *VisitorAdapter) VisitMatchNode(node *ast.MatchNode) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.MatchNodeVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitMatchNode(node)
}
v.ShouldCallVisitor = true
match, err := node.Match.Accept(v)
if err != nil {
return nil, err
}
node.Match = match
whens := []*ast.WhenNode{}
for _, clause := range node.WhenCases {
when, err := clause.Accept(v)
if err != nil {
return nil, err
}
whens = append(whens, when.(*ast.WhenNode))
}
node.WhenCases = whens
return node, nil
}
func (v *VisitorAdapter) VisitMethodDefinition(node *ast.MethodDefinition) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.MethodDefinitionVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitMethodDefinition(node)
}
v.ShouldCallVisitor = true
def, err := node.FuncDef.Accept(v)
if err != nil {
return nil, err
}
node.FuncDef = def.(*ast.FunctionDefinitionNode)
name, err := node.MethodName.Accept(v)
if err != nil {
return nil, err
}
node.MethodName = name.(*ast.VarName)
tpName, err := node.TypeName.Accept(v)
if err != nil {
return nil, err
}
node.TypeName = tpName.(*ast.VarName)
return node, nil
}
func (v *VisitorAdapter) VisitPackage(node *ast.Package) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.PackageVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitPackage(node)
}
v.ShouldCallVisitor = true
return node, nil
}
func (v *VisitorAdapter) VisitShapeNode(node *ast.ShapeNode) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.ShapeNodeVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitShapeNode(node)
}
v.ShouldCallVisitor = true
fields := map[string]ast.Node{}
for name, field := range node.Fields {
field, err := field.Accept(v)
if err != nil {
return nil, err
}
fields[name] = field
}
node.Fields = fields
return node, nil
}
func (v *VisitorAdapter) VisitWhenNode(node *ast.WhenNode) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.WhenNodeVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitWhenNode(node)
}
cas, err := node.Case.Accept(v)
if err != nil {
return nil, err
}
node.Case = cas
eval, err := node.Evaluate.Accept(v)
if err != nil {
return nil, err
}
node.Evaluate = eval
return node, nil
}
func (v *VisitorAdapter) VisitContinueNode(node *ast.ContinueNode) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.ContinueNodeVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitContinueNode(node)
}
nd := node.WithValue
if nd != nil {
nd, err := nd.Accept(v)
if err != nil {
return nil, err
}
node.WithValue = nd
}
return node, nil
}
func (v *VisitorAdapter) VisitSignalNode(node *ast.SignalNode) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.SignalNodeVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitSignalNode(node)
}
val, err := node.ValueToRaise.Accept(v)
node.ValueToRaise = val
return node, err
}
func (v *VisitorAdapter) VisitTryNode(node *ast.TryNode) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.TryNodeVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitTryNode(node)
}
tries, err := node.Tries.Accept(v)
if err != nil {
return nil, err
}
node.Tries = tries
handleCases := []*ast.HandleCaseNode{}
for _, hNode := range node.HandleCases {
cs, err := hNode.Accept(v)
if err != nil {
return nil, err
}
hCs, ok := cs.(*ast.HandleCaseNode)
if !ok {
return nil, fmt.Errorf("got back a missunderstood node")
}
handleCases = append(handleCases, hCs)
}
node.HandleCases = handleCases
return node, nil
}
func (v *VisitorAdapter) VisitHandleCaseNode(node *ast.HandleCaseNode) (ast.Node, error) {
v.VisitAnyNode(node)
if visitor, ok := v.Visitor.(ast.HandleCaseNodeVisitor); ok && v.ShouldCallVisitor {
return visitor.VisitHandleCaseNode(node)
}
cs, err := node.Case.Accept(v)
if err != nil {
return nil, err
}
node.Case = cs
return node, nil
} | internal/parser/visitors/base/adapter.go | 0.728362 | 0.521471 | adapter.go | starcoder |
package dt
import (
"fmt"
"time"
)
var location *time.Location
type Datime struct {
value time.Time
}
func init() {
location = time.Now().Location()
fmt.Println("*", location)
}
func New() Datime {
return Datime{value: time.Now()}
}
func NewUnix(seconds int64) Datime {
return Datime{value: time.Unix(seconds, 0)}
}
func (dt Datime) Unix() int64 {
return dt.value.Unix()
}
func NewWithComponents(year, month, day, hour, min, sec int) Datime {
t := time.Date(year, time.Month(month), day, hour, min, sec, 0, location)
return Datime{value: t}
}
func (dt Datime) String() string {
year, month, day, hour, min, sec := dt.Components()
return fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, min, sec)
}
func (dt Datime) Components() (int, int, int, int, int, int) {
year, month, day := dt.value.Date()
hour := dt.value.Hour()
min := dt.value.Minute()
sec := dt.value.Second()
return year, int(month), day, hour, min, sec
}
func (dt Datime) StartOfDay() Datime {
year, month, day := dt.value.Date()
t := time.Date(year, month, day, 0, 0, 0, 0, location)
return Datime{value: t}
}
func (dt Datime) EndOfDay() Datime {
year, month, day := dt.value.Date()
t := time.Date(year, month, day, 23, 59, 59, 0, location)
return Datime{value: t}
}
func (dt Datime) FirstOfWeek() Datime {
imap := []int{6, 0, 1, 2, 3, 4, 5}
dayOfWeek := imap[int(dt.value.Weekday())]
return dt.AddDay(-dayOfWeek)
}
func (dt Datime) LastOfWeek() Datime {
imap := []int{6, 0, 1, 2, 3, 4, 5}
dayOfWeek := imap[int(dt.value.Weekday())]
return dt.AddDay(6 - dayOfWeek)
}
func (dt Datime) FirstOfMonth() Datime {
year, month, _ := dt.value.Date()
t := time.Date(year, month, 1, 0, 0, 0, 0, location)
return Datime{value: t}
}
func (dt Datime) LastOfMonth() Datime {
tm := dt.FirstOfMonth().value
tm = tm.AddDate(0, 1, -1)
return Datime{value: tm}
}
func (dt Datime) FirstOfYear() Datime {
year, _, _ := dt.value.Date()
t := time.Date(year, 1, 1, 0, 0, 0, 0, location)
return Datime{value: t}
}
func (dt Datime) LastOfYear() Datime {
year, _, _ := dt.value.Date()
t := time.Date(year, 12, 31, 0, 0, 0, 0, location)
return Datime{value: t}
}
func (dt Datime) AddYear(n int) Datime {
t := dt.value.AddDate(n, 0, 0)
return Datime{value: t}
}
func (dt Datime) AddMonth(n int) Datime {
t := dt.value.AddDate(0, n, 0)
return Datime{value: t}
}
func (dt Datime) AddDay(n int) Datime {
t := dt.value.AddDate(0, 0, n)
return Datime{value: t}
}
func (dt Datime) AddHour(n int) Datime {
t := dt.value.Add(time.Duration(n) * time.Hour)
return Datime{value: t}
}
func (dt Datime) AddMinute(n int) Datime {
t := dt.value.Add(time.Duration(n) * time.Minute)
return Datime{value: t}
}
func (dt Datime) AddSecond(n int) Datime {
t := dt.value.Add(time.Duration(n) * time.Second)
return Datime{value: t}
} | shared/dt/dt.go | 0.662469 | 0.606848 | dt.go | starcoder |
package tink
import (
"fmt"
tinkpb "github.com/google/tink/proto/tink_proto"
)
// Entry represents a single entry in the keyset. In addition to the actual primitive,
// it holds the identifier and status of the primitive.
type Entry struct {
primitive interface{}
identifier string
status tinkpb.KeyStatusType
outputPrefixType tinkpb.OutputPrefixType
}
// NewEntry creates a new instance of Entry using the given information.
func NewEntry(p interface{}, id string, stt tinkpb.KeyStatusType,
outputPrefixType tinkpb.OutputPrefixType) *Entry {
return &Entry{
primitive: p,
identifier: id,
status: stt,
outputPrefixType: outputPrefixType,
}
}
func (e *Entry) Primitive() interface{} {
return e.primitive
}
func (e *Entry) Status() tinkpb.KeyStatusType {
return e.status
}
func (e *Entry) Identifier() string {
return e.identifier
}
func (e *Entry) OutputPrefixType() tinkpb.OutputPrefixType {
return e.outputPrefixType
}
/**
* A container class for a set of primitives (i.e. implementations of cryptographic
* primitives offered by Tink). It provides also additional properties for the primitives
* it holds. In particular, one of the primitives in the set can be distinguished as
* "the primary" one. <p>
*
* PrimitiveSet is an auxiliary class used for supporting key rotation: primitives in a set
* correspond to keys in a keyset. Users will usually work with primitive instances,
* which essentially wrap primitive sets. For example an instance of an Aead-primitive
* for a given keyset holds a set of Aead-primitives corresponding to the keys in the keyset,
* and uses the set members to do the actual crypto operations: to encrypt data the primary
* Aead-primitive from the set is used, and upon decryption the ciphertext's prefix
* determines the id of the primitive from the set. <p>
*
* PrimitiveSet is a public class to allow its use in implementations of custom primitives.
*/
type PrimitiveSet struct {
// Primary entry
primary *Entry
// The primitives are stored in a map of
// (ciphertext prefix, list of primitives sharing the prefix).
// This allows quickly retrieving the primitives sharing some particular prefix.
// Because all RAW keys are using an empty prefix, this also quickly allows retrieving them.
primitives map[string][]*Entry
}
// NewPrimitiveSet returns an empty instance of PrimitiveSet.
func NewPrimitiveSet() *PrimitiveSet {
return &PrimitiveSet{
primary: nil,
primitives: make(map[string][]*Entry),
}
}
// GetRawPrimitives returns all primitives in the set that have RAW prefix.
func (ps *PrimitiveSet) GetRawPrimitives() ([]*Entry, error) {
return ps.GetPrimitivesWithStringIdentifier(RAW_PREFIX)
}
// GetPrimitivesWithKey returns all primitives in the set that have prefix equal
// to that of the given key.
func (ps *PrimitiveSet) GetPrimitivesWithKey(key *tinkpb.Keyset_Key) ([]*Entry, error) {
if key == nil {
return nil, fmt.Errorf("primitive_set: key must not be nil")
}
id, err := GetOutputPrefix(key)
if err != nil {
return nil, fmt.Errorf("primitive_set: %s", err)
}
return ps.GetPrimitivesWithStringIdentifier(id)
}
// GetPrimitivesWithByteIdentifier returns all primitives in the set that have
// the given prefix.
func (ps *PrimitiveSet) GetPrimitivesWithByteIdentifier(id []byte) ([]*Entry, error) {
return ps.GetPrimitivesWithStringIdentifier(string(id))
}
// GetPrimitivesWithStringIdentifier returns all primitives in the set that have
// the given prefix.
func (ps *PrimitiveSet) GetPrimitivesWithStringIdentifier(id string) ([]*Entry, error) {
result, found := ps.primitives[id]
if !found {
return []*Entry{}, nil
}
return result, nil
}
// GetPrimitives returns all primitives of the set.
func (ps *PrimitiveSet) Primitives() map[string][]*Entry {
return ps.primitives
}
// Primary returns the entry with the primary primitive.
func (ps *PrimitiveSet) Primary() *Entry {
return ps.primary
}
// SetPrimary sets the primary entry of the set to the given entry.
func (ps *PrimitiveSet) SetPrimary(e *Entry) {
ps.primary = e
}
// AddPrimitive creates a new entry in the primitive set using the given information
// and returns the added entry.
func (ps *PrimitiveSet) AddPrimitive(primitive interface{},
key *tinkpb.Keyset_Key) (*Entry, error) {
if key == nil || primitive == nil {
return nil, fmt.Errorf("primitive_set: key and primitive must not be nil")
}
id, err := GetOutputPrefix(key)
if err != nil {
return nil, fmt.Errorf("primitive_set: %s", err)
}
e := NewEntry(primitive, id, key.Status, key.OutputPrefixType)
ps.primitives[id] = append(ps.primitives[id], e)
return e, nil
} | go/tink/primitive_set.go | 0.778565 | 0.424054 | primitive_set.go | starcoder |
package graph
import (
// "math"
// "image"
"image/color"
"image/draw"
"sort"
// "log"
)
type Point2D struct {
x,y int
}
type Triangle2D struct {
verts []Point2D
}
func (self *Triangle2D) SortByY() {
sort.SliceStable(self.verts, func(i, j int) bool {
return self.verts[i].y < self.verts[j].y
})
}
func (self *Triangle2D) GetVert( i int ) Point2D {
return self.verts[i]
}
func (self *Triangle2D) SetVert( i int, x,y int ) {
self.verts[i].x = x
self.verts[i].y = y
}
func NewTriangle(x1,y1,x2,y2,x3,y3 int ) Triangle2D {
return Triangle2D{ []Point2D { {x1,y1},{x2,y2},{x3,y3} } }
}
func fillBottomFlatTriangle( dst draw.Image,
x1,y1, x2,y2, x3,y3 int, color color.Color ) {
invslope1 := float64(x2 - x1) / float64(y2 - y1)
invslope2 := float64(x3 - x1) / float64(y3 - y1)
curx1 := float64(x1)
curx2 := float64(x1)
for scanlineY := y1; scanlineY <= y2; scanlineY++ {
drawLineH(dst, int(curx1), int(curx2), scanlineY, color)
curx1 += invslope1
curx2 += invslope2
}
}
func fillTopFlatTriangle( dst draw.Image,
x1,y1, x2,y2, x3,y3 int, color color.Color ) {
invslope1 := float64(x3 - x1) / float64(y3 - y1)
invslope2 := float64(x3 - x2) / float64(y3 - y2)
curx1 := float64(x3)
curx2 := float64(x3)
for scanlineY := y3; scanlineY > y1; scanlineY-- {
drawLineH(dst, int(curx1), int(curx2), scanlineY, color)
curx1 -= invslope1
curx2 -= invslope2
}
}
func FillTriangle( dst draw.Image, triangle Triangle2D, color color.Color ) {
/* at first sort the three vertices by y-coordinate ascending so v1 is the topmost vertice */
triangle.SortByY()
v1 := triangle.GetVert(0)
v2 := triangle.GetVert(1)
v3 := triangle.GetVert(2)
/* here we know that y1 <= y2 <= y3 */
/* check for trivial case of bottom-flat triangle */
if v2.y == v3.y {
fillBottomFlatTriangle(dst, v1.x, v1.y, v2.x, v2.y, v3.x,v3.y, color)
} else if v1.y == v2.y {
/* check for trivial case of top-flat triangle */
fillTopFlatTriangle(dst, v1.x, v1.y, v2.x, v2.y, v3.x,v3.y, color)
} else {
/* general case - split the triangle in a topflat and bottom-flat one */
v4 := Point2D{ int(float64(v1.x) + (float64(v2.y - v1.y) / float64(v3.y - v1.y)) * float64(v3.x - v1.x)), v2.y }
fillBottomFlatTriangle(dst, v1.x,v1.y, v2.x,v2.y, v4.x,v4.y , color );
fillTopFlatTriangle(dst, v2.x,v2.y, v4.x,v4.y, v3.x,v3.y , color );
}
}
func DrawTriangle( dst draw.Image, triangle Triangle2D, color color.Color ) {
v1 := triangle.GetVert(0)
v2 := triangle.GetVert(1)
v3 := triangle.GetVert(2)
DrawLine( dst, v1.x,v1.y, v2.x,v2.y, color )
DrawLine( dst, v2.x,v2.y, v3.x,v3.y, color )
DrawLine( dst, v3.x,v3.y, v1.x,v1.y, color )
} | graph/triangle.go | 0.738103 | 0.512083 | triangle.go | starcoder |
package gart
import (
"image"
)
// Line is a set of two image.Points
type Line struct {
x1, x2 image.Point
}
// Crosses returns true if the other line crosses line.
// Basically, line intersection but looking at end points.
func (l Line) Crosses(other Line) bool {
return Crosses(l.x1, l.x2, other.x1, other.x2)
}
// Code borrowed from C++ and https://bit.ly/3jyKGah
func onSegment(p, q, r image.Point) bool {
return q.X <= MaxInt(p.X, r.X) && q.X >= MinInt(p.X, r.X) &&
q.Y <= MaxInt(p.Y, r.Y) && q.Y >= MinInt(p.Y, r.Y)
}
// To find orientation of ordered triplet (p, q, r).
// The function returns following values
// 0 --> p, q and r are colinear
// 1 --> Clockwise
// 2 --> Counterclockwise
func orientation(p, q, r image.Point) int {
val := (q.Y-p.Y)*(r.X-q.X) - (q.X-p.X)*(r.Y-q.Y)
if val == 0 {
return 0 // colinear
}
if val > 0 {
return 1 // clockwise
}
return 2 // counterclock wise
}
// Crosses returns true if line segment `p1`, `q1` and `p2`, `q2` crosses.
func Crosses(p1, q1, p2, q2 image.Point) bool {
// Find the four orientations needed for general and
// special cases
o1 := orientation(p1, q1, p2)
o2 := orientation(p1, q1, q2)
o3 := orientation(p2, q2, p1)
o4 := orientation(p2, q2, q1)
// General case
if o1 != o2 && o3 != o4 {
return true
}
// Special Cases
// p1, q1 and p2 are colinear and p2 lies on segment p1q1
if o1 == 0 && onSegment(p1, p2, q1) {
return true
}
// p1, q1 and q2 are colinear and q2 lies on segment p1q1
if o2 == 0 && onSegment(p1, q2, q1) {
return true
}
// p2, q2 and p1 are colinear and p1 lies on segment p2q2
if o3 == 0 && onSegment(p2, p1, q2) {
return true
}
// p2, q2 and q1 are colinear and q1 lies on segment p2q2
if o4 == 0 && onSegment(p2, q1, q2) {
return true
}
return false // Doesn't fall in any of the above cases
}
// MinInt return the min of a and b
func MinInt(a, b int) int {
if a < b {
return a
}
return b
}
// MaxInt return the max of a and b
func MaxInt(a, b int) int {
if a > b {
return a
}
return b
} | line.go | 0.851336 | 0.583233 | line.go | starcoder |
package main
import (
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
)
// * ParamModePosition -> Use the value found at slice[n]
// * ParamModeImmediate -> Use the value n
// * ParamModeRelative -> Use the value found at slice[BASE+n]
const (
paramModePosition = iota
paramModeImmediate
paramModeRelative
)
const (
opCodeAdd = 1
opCodeMultiply = 2
opCodeStore = 3
opCodeOutput = 4
opCodeJumpIfTrue = 5
opCodeJumpIfFalse = 6
opCodeLessThan = 7
opCodeEquals = 8
opCodeAdjustBase = 9
opCodeHalt = 99
)
// nolint: gochecknoglobals
var jumpMap = map[int]int{
opCodeAdd: 4,
opCodeMultiply: 4,
opCodeStore: 2,
opCodeOutput: 2,
opCodeJumpIfTrue: 3,
opCodeJumpIfFalse: 3,
opCodeLessThan: 4,
opCodeEquals: 4,
opCodeAdjustBase: 2,
}
type computer struct {
Input int
Output []int
Pointer int
Base int
Sequence []int
Halted bool
PauseAtOutput bool
}
const (
gridSize = 50
)
const (
wall = "🚧"
path = "◾️"
unknown = "🧱"
oxygen = "💧"
robotChar = "🤖"
)
type coordinate struct {
X int
Y int
}
type direction int
const (
directionNorth = iota + 1
directionSouth
directionWest
directionEast
)
type robot struct {
X int
Y int
Direction direction
Grid map[coordinate]string
Computer computer
oxygenPos coordinate
oxygenStep int
oxygenMaxStep int
}
func (d direction) String() string {
switch d {
case directionNorth:
return "north"
case directionSouth:
return "south"
case directionWest:
return "west"
case directionEast:
return "east"
}
return "unknown"
}
func newRobot(sequence []int) *robot {
r := robot{
X: gridSize / 2,
Y: gridSize / 2,
Grid: map[coordinate]string{},
Computer: computer{
Sequence: make([]int, len(sequence)),
PauseAtOutput: true,
Input: directionNorth,
},
}
copy(r.Computer.Sequence, sequence)
return &r
}
func main() {
var (
line, _ = ioutil.ReadFile(os.Args[1])
stringSequence = strings.Split(string(line), ",")
sequence = make([]int, len(stringSequence))
)
for i := range sequence {
sequence[i], _ = strconv.Atoi(strings.TrimSpace(stringSequence[i]))
}
r1 := newRobot(sequence)
r1.checkNext(sequence, 1)
r1.show()
// Put the robot at the oxygen position after completing the first part.
r1.X, r1.Y = r1.oxygenPos.X, r1.oxygenPos.Y
r1.oxygenTime(make(map[coordinate]struct{}), 1)
fmt.Println("part 1: found oxygen after", r1.oxygenStep, "steps")
fmt.Println("part 2: time to fill", r1.oxygenMaxStep)
}
func (r *robot) checkNext(originalSequence []int, stepsFromStart int) {
var (
x = r.X
y = r.Y
seq = make([]int, len(originalSequence))
)
// Copy the original sequence passed to this function to sequence.
copy(seq, originalSequence)
// Mark the current position as a path.
r.Grid[coordinate{X: x, Y: y}] = path
for _, dir := range []direction{directionNorth, directionSouth, directionWest, directionEast} {
// Copy the sequence passed so we can reset it for each direction.
copy(r.Computer.Sequence, seq)
// Reset X and Y for each direction based on where we started.
r.X = x
r.Y = y
// Set the new direction (just for info) and set the direction as input
// to the computer.
r.Direction = dir
r.Computer.Input = int(dir)
// Get the new coordinates based of the direction we're looking. If
// we've already been in that direction, move on.
nextCoordinates := r.nextCoordiantes()
if _, ok := r.Grid[nextCoordinates]; ok {
continue
}
// Run one clock in the computer.
r.Computer.process()
// Fetch the output from the process.
moveResult := r.Computer.Output[0]
r.Computer.Output = []int{}
switch moveResult {
case 0:
r.Grid[nextCoordinates] = wall
case 1:
r.Grid[nextCoordinates] = path
r.X = nextCoordinates.X
r.Y = nextCoordinates.Y
// Keep going this direction
r.checkNext(r.Computer.Sequence, stepsFromStart+1)
case 2:
r.oxygenPos = nextCoordinates
r.oxygenStep = stepsFromStart
r.Grid[nextCoordinates] = oxygen
}
}
}
func (r *robot) oxygenTime(seen map[coordinate]struct{}, maxSteps int) {
x, y := r.X, r.Y
for _, dir := range []direction{directionNorth, directionSouth, directionWest, directionEast} {
r.Direction = dir
r.X, r.Y = x, y
nextCoordinates := r.nextCoordiantes()
if _, ok := seen[nextCoordinates]; ok {
continue
}
typ, ok := r.Grid[nextCoordinates]
if !ok {
continue
}
if typ != path {
continue
}
if maxSteps > r.oxygenMaxStep {
r.oxygenMaxStep = maxSteps
}
seen[nextCoordinates] = struct{}{}
r.X = nextCoordinates.X
r.Y = nextCoordinates.Y
r.oxygenTime(seen, maxSteps+1)
}
}
func (r *robot) show() {
for x := range make([]struct{}, gridSize) {
for y := range make([]struct{}, gridSize) {
c := coordinate{X: x, Y: y}
p := unknown
if v, ok := r.Grid[c]; ok {
p = v
}
if x == r.X && y == r.Y {
p = robotChar
}
fmt.Print(p)
}
fmt.Println("")
}
}
func (r *robot) nextCoordiantes() coordinate {
x, y := r.X, r.Y
switch r.Direction {
case directionNorth:
x--
case directionSouth:
x++
case directionWest:
y--
case directionEast:
y++
}
return coordinate{X: x, Y: y}
}
func (c *computer) process() {
opCode := c.Sequence[c.Pointer] % 100
// Halt code found, stop processing.
if opCode == opCodeHalt {
c.Halted = true
return
}
getPointer := func(argumentPosition int) int {
var (
pointer = 0
modePositions = c.Sequence[c.Pointer] / 100
positions = map[int]int{
1: modePositions % 10,
2: modePositions % 100 / 10,
3: modePositions % 1000 / 100,
}
)
switch positions[argumentPosition] {
case paramModePosition:
pointer = c.Sequence[c.Pointer+argumentPosition]
case paramModeImmediate:
pointer = c.Pointer + argumentPosition
case paramModeRelative:
pointer = c.Sequence[c.Pointer+argumentPosition] + c.Base
}
if pointer >= len(c.Sequence) {
c.Sequence = append(c.Sequence, make([]int, pointer-len(c.Sequence)+1)...)
}
return pointer
}
sequenceFor := func(pos int) int {
return c.Sequence[getPointer(pos)]
}
switch opCode {
case opCodeAdd:
c.Sequence[getPointer(3)] = sequenceFor(1) + sequenceFor(2)
case opCodeMultiply:
c.Sequence[getPointer(3)] = sequenceFor(1) * sequenceFor(2)
case opCodeStore:
c.Sequence[getPointer(1)] = c.Input
case opCodeOutput:
c.Output = append(c.Output, sequenceFor(1))
if c.PauseAtOutput {
c.Pointer += jumpMap[opCodeOutput]
return
}
case opCodeJumpIfTrue:
if c.Sequence[getPointer(1)] != 0 {
c.Pointer = sequenceFor(2) - jumpMap[opCodeJumpIfTrue]
}
case opCodeJumpIfFalse:
if c.Sequence[getPointer(1)] == 0 {
c.Pointer = sequenceFor(2) - jumpMap[opCodeJumpIfFalse]
}
case opCodeLessThan:
if c.Sequence[getPointer(1)] < c.Sequence[getPointer(2)] {
c.Sequence[getPointer(3)] = 1
} else {
c.Sequence[getPointer(3)] = 0
}
case opCodeEquals:
if c.Sequence[getPointer(1)] == c.Sequence[getPointer(2)] {
c.Sequence[getPointer(3)] = 1
} else {
c.Sequence[getPointer(3)] = 0
}
case opCodeAdjustBase:
c.Base += sequenceFor(1)
default:
panic(fmt.Sprintf("unknown instruction at position %d, opCode: %d", c.Pointer, opCode))
}
c.Pointer += jumpMap[opCode]
c.process()
} | 15/go/main.go | 0.6705 | 0.431285 | main.go | starcoder |
package types
// Value is a type of simple value.
type Value byte
// simple value types.
const (
I32 Value = 0x7F
I64 Value = 0x7E
F32 Value = 0x7C
F64 Value = 0x7D
)
// FunctionSig is a signature of function.
type FunctionSig struct {
Form byte
Params []Value
Returns []Value
}
// External is a type of external object.
type External byte
// External entries types
const (
ExternalFunction External = 0
ExternalTable External = 1
ExternalMemory External = 2
ExternalGlobal External = 3
)
// Importable is an interface of object that we want to import.
type Importable interface{}
// Import is an imported object
type Import struct {
Module string
Field string
Type Importable
}
// ImportFunc is an imported function.
type ImportFunc struct {
Type uint32
}
// ImportTable is an imported table.
type ImportTable struct {
Type Table
}
// ImportMemory is an imported memory
type ImportMemory struct {
Type Memory
}
// ImportGlobalVar is an imported global var.
type ImportGlobalVar struct {
Type GlobalVar
}
// ResizableLimits is a limit boundaries of other objects
type ResizableLimits struct {
Flags uint32 // 1 if the Maximum field is valid
Initial uint32 // initial length (in units of table elements or wasm pages)
Maximum uint32 // If flags is 1, it describes the maximum size of the table or memory
}
// Table represents a table.
type Table struct {
ElementType byte
Limits ResizableLimits
}
// Memory represents a memory.
type Memory struct {
Limits ResizableLimits
}
// GlobalVar represents a global var.
type GlobalVar struct {
Type Value // Type of the value stored by the variable
Mutable bool // Whether the value of the variable can be changed by the set_global operator
}
// GlobalEntry is an entry in globals
type GlobalEntry struct {
Type *GlobalVar // Type holds information about the value type and mutability of the variable
Init []byte // Init is an initializer expression that computes the initial value of the variable
}
// LocalEntry is an entry in locals.
type LocalEntry struct {
Count uint32 // The total number of local variables of the given Type used in the function body
Type Value // The type of value stored by the variable
}
// ExportEntry is an export entry.
type ExportEntry struct {
Name string
Kind External
Index uint32
}
// ElementSegment is an element.
type ElementSegment struct {
Index uint32 // The index into the global table space, should always be 0 in the MVP.
Offset []byte // initializer expression for computing the offset for placing elements, should return an i32 value
Elems []uint32
}
// FunctionBody is an bytes and args of a function.
type FunctionBody struct {
Locals []LocalEntry
Code []byte
}
// DataSegment an entry in data section.
type DataSegment struct {
Index uint32 // The index into the global linear memory space, should always be 0 in the MVP.
Offset []byte // initializer expression for computing the offset for placing elements, should return an i32 value
Data []byte
} | vm/wasm/types/types.go | 0.596668 | 0.430925 | types.go | starcoder |
package streams
import "constraints"
func zero[T any]() T {
var t T
return t
}
func More[T any](t T) (T, bool) { return t, true }
func Done[T any]() (T, bool) { return zero[T](), false }
type Stream[T any] func() (T, bool)
func Elements[T any, Slice ~[]T](s Slice) Stream[T] {
return Map(Indices[T](s), func(i int) T {
return s[i]
})
}
func Recieve[T any, Chan ~chan T](c Chan) Stream[T] {
return func() (T, bool) {
val, has_val := <-c
return val, has_val
}
}
func Map[A, B any](in Stream[A], f func(A) B) Stream[B] {
return func() (B, bool) {
val, has_val := in()
if !has_val {
return Done[B]()
}
return More(f(val))
}
}
func ForEach[T any](s Stream[T], f func(T)) {
for val, has_val := s(); has_val; val, has_val = s() {
f(val)
}
}
type Control int
const (
Break Control = iota
Continue
)
func ForEachControl[T any](s Stream[T], f func(T) Control) {
for val, has_val := s(); has_val; val, has_val = s() {
cntl := f(val)
if cntl == Break {
break
}
}
}
func Reduce[A, B any](s Stream[A], init B, f func(B, A) B) B {
ForEach(s, func(a A) {
init = f(init, a)
})
return init
}
func Filter[T any](s Stream[T], f func(T) bool) Stream[T] {
return func() (T, bool) {
for {
val, has_val := s()
if !has_val {
return Done[T]()
}
if f(val) {
return More(val)
}
}
}
}
func Range[Int constraints.Integer](a, b Int) Stream[Int] {
return func() (Int, bool) {
if a == b {
return Done[Int]()
}
next := a
a++
return More(next)
}
}
func Indices[T any, Slice ~[]T](s Slice) Stream[int] {
return Range(0, len(s))
}
func Iota() Stream[int] {
i := 0
return Infinite(func() int {
next := i
i++
return next
})
}
type Pair[A, B any] struct {
First A
Second B
}
func Zip[A, B any](a Stream[A], b Stream[B]) Stream[Pair[A, B]] {
return func() (Pair[A, B], bool) {
next_a, has_next_a := a()
if !has_next_a {
return Done[Pair[A, B]]()
}
next_b, has_next_b := b()
if !has_next_b {
return Done[Pair[A, B]]()
}
return More(Pair[A, B]{next_a, next_b})
}
}
func Infinite[T any](f func() T) Stream[T] {
return func() (T, bool) {
return More(f())
}
}
func Collect[T any](s Stream[T]) []T {
return Reduce(s, []T{}, func(ret []T, el T) []T {
return append(ret, el)
})
}
func Take[T any](s Stream[T], i int) []T {
return Reduce(Zip(Range(0, i), s), []T{}, func(ret []T, el Pair[int, T]) []T {
return append(ret, el.Second)
})
}
type IndexedValue[T any] struct {
Index int
Value T
}
func Enumerate[T any, Slice ~[]T](s Slice) Stream[IndexedValue[T]] {
return Map(
Zip(Indices[T](s), Elements[T](s)),
func(p Pair[int, T]) IndexedValue[T] {
return IndexedValue[T]{Index: p.First, Value: p.Second}
})
}
func Chain[T any](streams ...Stream[T]) Stream[T] {
i := 0
return func() (T, bool) {
for i < len(streams) {
val, has_val := streams[i]()
if !has_val {
i++
continue
}
return More(val)
}
return Done[T]()
}
} | streams.go | 0.538012 | 0.675462 | streams.go | starcoder |
package hll
func divideBy8RoundUp(i int) int {
result := i >> 3
if remainder := i & 0x7; remainder > 0 {
result++
}
return result
}
// readBits reads nBits from the provided address in the byte array and returns
// them as the LSB of a uint64. The address is the 0-indexed bit position where
// 0 equates to the MSB in the 0th byte, 63 is be the LSB in the 0th byte, 64 is
// the MSB bit in the 1st byte, and so on.
func readBits(bytes []byte, addr int, nBits int) uint64 {
idx := addr >> 3 /*divide by 8*/
pos := addr & 0x7 /*mod 8*/
value := uint64(0)
bitsRequired := nBits
for bitsRequired > 0 {
bitsAvailable := 8 - pos
if bitsAvailable > bitsRequired {
bitsAvailable = bitsRequired
}
// this is effectively a no-op on the first loop...zero will stay zero.
// on subsequent loops, it will shift the value down by the required
// number of bits.
value = value << uint(bitsAvailable)
mask := ((byte(1) << uint(bitsAvailable)) - 1) << uint(8-pos-bitsAvailable)
bits := bytes[idx] & mask
if pos+bitsAvailable != 8 {
bits = bits >> uint(8-(pos+bitsAvailable))
}
value |= uint64(bits)
pos += bitsAvailable
// advance to the next byte if required.
if pos == 8 {
idx++
pos = 0
}
bitsRequired -= bitsAvailable
}
return value
}
// writeBits writes the nBits least significant bits of value to the provided
// address in the byte array. The address is the 0-indexed bit position where 0
// equates to the MSB in the 0th byte, 63 is be the LSB in the 0th byte, 64 is
// the MSB bit in the 1st byte, and so on.
func writeBits(bytes []byte, addr int, value uint64, nBits int) {
idx := addr >> 3 /*divide by 8*/
pos := addr & 0x7 /*mod 8*/
for nBits > 0 {
bitsToWrite := 8 - pos
if bitsToWrite > nBits {
bitsToWrite = nBits
}
mask := byte(1<<uint(bitsToWrite)) - 1
partToWrite := mask & byte(value>>uint(nBits-bitsToWrite))
// shift into position if required.
if pos+bitsToWrite != 8 {
partToWrite = partToWrite << uint(8-(pos+bitsToWrite))
}
bytes[idx] = bytes[idx] | partToWrite
pos += bitsToWrite
if pos == 8 {
idx++
pos = 0
}
nBits -= bitsToWrite
}
} | util.go | 0.652463 | 0.456713 | util.go | starcoder |
package search
import (
"github.com/chewxy/math32"
"github.com/zhenghaoz/gorse/base"
"github.com/zhenghaoz/gorse/base/floats"
"go.uber.org/zap"
"modernc.org/sortutil"
"reflect"
"sort"
)
type Vector interface {
Distance(vector Vector) float32
Terms() []string
IsHidden() bool
}
type DenseVector struct {
data []float32
terms []string
isHidden bool
}
func NewDenseVector(data []float32, terms []string, isHidden bool) *DenseVector {
return &DenseVector{
data: data,
terms: terms,
isHidden: isHidden,
}
}
func (v *DenseVector) Distance(vector Vector) float32 {
feedbackVector, isFeedback := vector.(*DenseVector)
if !isFeedback {
base.Logger().Fatal("vector type mismatch",
zap.String("expect", reflect.TypeOf(v).String()),
zap.String("actual", reflect.TypeOf(vector).String()))
}
return -floats.Dot(v.data, feedbackVector.data)
}
func (v *DenseVector) Terms() []string {
return v.terms
}
func (v *DenseVector) IsHidden() bool {
return v.isHidden
}
type DictionaryVector struct {
isHidden bool
terms []string
indices []int32
values []float32
norm float32
}
func NewDictionaryVector(indices []int32, values []float32, terms []string, isHidden bool) *DictionaryVector {
sort.Sort(sortutil.Int32Slice(indices))
var norm float32
for _, i := range indices {
norm += values[i]
}
norm = math32.Sqrt(norm)
return &DictionaryVector{
isHidden: isHidden,
terms: terms,
indices: indices,
values: values,
norm: norm,
}
}
func (v *DictionaryVector) Dot(vector *DictionaryVector) (float32, float32) {
i, j, sum, common := 0, 0, float32(0), float32(0)
for i < len(v.indices) && j < len(vector.indices) {
if v.indices[i] == vector.indices[j] {
sum += v.values[v.indices[i]]
common++
i++
j++
} else if v.indices[i] < vector.indices[j] {
i++
} else if v.indices[i] > vector.indices[j] {
j++
}
}
return sum, common
}
const similarityShrink = 100
func (v *DictionaryVector) Distance(vector Vector) float32 {
var score float32
if dictVec, isDictVec := vector.(*DictionaryVector); !isDictVec {
base.Logger().Fatal("vector type mismatch",
zap.String("expect", reflect.TypeOf(v).String()),
zap.String("actual", reflect.TypeOf(vector).String()))
} else {
dot, common := v.Dot(dictVec)
if dot > 0 {
score = -dot / v.norm / dictVec.norm * common / (common + similarityShrink)
}
}
return score
}
func (v *DictionaryVector) Terms() []string {
return v.terms
}
func (v *DictionaryVector) IsHidden() bool {
return v.isHidden
}
type VectorIndex interface {
Build()
Search(q Vector, n int, prune0 bool) ([]int32, []float32)
MultiSearch(q Vector, terms []string, n int, prune0 bool) (map[string][]int32, map[string][]float32)
} | base/search/index.go | 0.672547 | 0.454048 | index.go | starcoder |
package matrix
import (
"encoding/json"
"errors"
"fmt"
"math/rand"
"time"
)
//MappingFunction ...
type MappingFunction func(val float64, i, j int) float64
//Matrix ...
type Matrix struct {
rows, cols int
data [][]float64
}
//New ...
func New(rows, cols int) *Matrix {
mat := &Matrix{rows: rows,
cols: cols,
}
a := make([][]float64, rows)
for i := range a {
a[i] = make([]float64, cols)
}
mat.data = a
return mat
}
//FromArray ...
func FromArray(arr []float64) *Matrix {
// fmt.Println(arr)
mat := New(len(arr), 1)
mat.Map(func(val float64, i, j int) float64 {
return arr[i+j]
})
// fmt.Println(mat)
return mat
}
//Transpose ...
func Transpose(m *Matrix) *Matrix {
return New(m.cols, m.rows).Map(func(_ float64, i, j int) float64 {
return m.data[j][i]
})
}
//DotMultiply ...
func DotMultiply(a, b *Matrix) (*Matrix, error) {
if a.cols != b.rows {
return nil, errors.New("columns of A must match rows of B")
}
return New(a.rows, b.cols).Map(func(val float64, i, j int) float64 {
// Dot product of values in col
sum := 0.0
for k := 0; k < a.cols; k++ {
sum += a.data[i][k] * b.data[k][j]
}
return sum
}), nil
}
//MapStatic ...
func MapStatic(m *Matrix, fn MappingFunction) *Matrix {
return New(m.rows, m.cols).Map(func(val float64, i, j int) float64 {
return fn(m.data[i][j], i, j)
})
}
//Deserialize ...
func Deserialize(data []byte) (*Matrix, error) {
var mat Matrix
err := json.Unmarshal(data, &mat)
if err != nil {
return nil, err
}
return &mat, nil
}
//ToArray ...
func (m *Matrix) ToArray() []float64 {
arr := make([]float64, m.rows*m.cols)
for i := 0; i < m.rows; i++ {
for j := 0; j < m.cols; j++ {
arr = append(arr, m.data[i][j])
}
}
// fmt.Println("ARRAY:", arr)
return arr
}
//Randomize ...
func (m *Matrix) Randomize() *Matrix {
return m.Map(func(val float64, i, j int) float64 {
return rand.New(rand.NewSource(time.Now().UnixNano())).Float64()
})
}
//Copy ...
func (m *Matrix) Copy() *Matrix {
mat := New(m.rows, m.cols)
for i := 0; i < m.rows; i++ {
for j := 0; j < m.cols; j++ {
mat.data[i][j] = m.data[i][j]
}
}
return mat
}
//Map ...
func (m *Matrix) Map(fn MappingFunction) *Matrix {
for i := 0; i < m.rows; i++ {
for j := 0; j < m.cols; j++ {
val := m.data[i][j]
m.data[i][j] = fn(val, i, j)
}
}
return m
}
//MultiplyScalar ...
func (m *Matrix) MultiplyScalar(scalar float64) *Matrix {
m.Map(func(val float64, i, j int) float64 {
return val * scalar
})
return m
}
//MultiplyMatrix ...
func (m *Matrix) MultiplyMatrix(mat *Matrix) (*Matrix, error) {
if m.rows != mat.rows || m.cols != mat.cols {
return nil, errors.New("columns and rows must match")
}
// hadamard product
return m.Map(func(val float64, i, j int) float64 {
return val * mat.data[i][j]
}), nil
}
//Subtract ...
func Subtract(a, b *Matrix) (*Matrix, error) {
if a.rows != b.rows || a.cols != b.cols {
return nil, errors.New("columns and rows of A and B must match")
}
return New(a.rows, a.cols).Map(func(val float64, i, j int) float64 {
return a.data[i][j] - b.data[i][j]
}), nil
}
//Add ...
func (m *Matrix) Add(n float64) *Matrix {
return m.Map(func(val float64, _, _ int) float64 {
return val + n
})
}
//AddMatrix ...
func (m *Matrix) AddMatrix(mat *Matrix) (*Matrix, error) {
if m.rows != mat.rows || m.cols != mat.cols {
return nil, errors.New("columns and rows of matrices must match")
}
return m.Map(func(val float64, i, j int) float64 {
return val + mat.data[i][j]
}), nil
}
//Print ...
func (m *Matrix) Print() {
fmt.Println(m.data)
}
//Serialize ...
func (m *Matrix) Serialize() ([]byte, error) {
return json.Marshal(m)
} | internal/matrix/matrix.go | 0.631026 | 0.446133 | matrix.go | starcoder |
package netfilter
import (
"encoding/binary"
"fmt"
"github.com/mdlayher/netlink"
)
// NewAttributeDecoder instantiates a new netlink.AttributeDecoder
// configured with a Big Endian byte order.
func NewAttributeDecoder(b []byte) (*netlink.AttributeDecoder, error) {
ad, err := netlink.NewAttributeDecoder(b)
if err != nil {
return nil, err
}
// All Netfilter attribute payloads are big-endian. (network byte order)
ad.ByteOrder = binary.BigEndian
return ad, nil
}
// NewAttributeDecoder instantiates a new netlink.AttributeEncoder
// configured with a Big Endian byte order.
func NewAttributeEncoder() *netlink.AttributeEncoder {
ae := netlink.NewAttributeEncoder()
// All Netfilter attribute payloads are big-endian. (network byte order)
ae.ByteOrder = binary.BigEndian
return ae
}
// An Attribute is a copy of a netlink.Attribute that can be nested.
type Attribute struct {
// The type of this Attribute, typically matched to a constant.
Type uint16
// An arbitrary payload which is specified by Type.
Data []byte
// Whether the attribute's data contains nested attributes.
Nested bool
Children []Attribute
// Whether the attribute's data is in network (true) or native (false) byte order.
NetByteOrder bool
}
func (a Attribute) String() string {
if a.Nested {
return fmt.Sprintf("<Length %d, Type %d, Nested %t, %d Children (%v)>", len(a.Data), a.Type, a.Nested, len(a.Children), a.Children)
}
return fmt.Sprintf("<Length %d, Type %d, Nested %t, NetByteOrder %t, %v>", len(a.Data), a.Type, a.Nested, a.NetByteOrder, a.Data)
}
// Uint16 interprets a non-nested Netfilter attribute in network byte order as a uint16.
func (a Attribute) Uint16() uint16 {
if a.Nested {
panic("Uint16: unexpected Nested attribute")
}
if l := len(a.Data); l != 2 {
panic(fmt.Sprintf("Uint16: unexpected byte slice length: %d", l))
}
return binary.BigEndian.Uint16(a.Data)
}
// PutUint16 sets the Attribute's data field to a Uint16 encoded in net byte order.
func (a *Attribute) PutUint16(v uint16) {
if len(a.Data) != 2 {
a.Data = make([]byte, 2)
}
binary.BigEndian.PutUint16(a.Data, v)
}
// Uint32 interprets a non-nested Netfilter attribute in network byte order as a uint32.
func (a Attribute) Uint32() uint32 {
if a.Nested {
panic("Uint32: unexpected Nested attribute")
}
if l := len(a.Data); l != 4 {
panic(fmt.Sprintf("Uint32: unexpected byte slice length: %d", l))
}
return binary.BigEndian.Uint32(a.Data)
}
// PutUint32 sets the Attribute's data field to a Uint32 encoded in net byte order.
func (a *Attribute) PutUint32(v uint32) {
if len(a.Data) != 4 {
a.Data = make([]byte, 4)
}
binary.BigEndian.PutUint32(a.Data, v)
}
// Int32 converts the result of Uint16() to an int32.
func (a Attribute) Int32() int32 {
return int32(a.Uint32())
}
// Uint64 interprets a non-nested Netfilter attribute in network byte order as a uint64.
func (a Attribute) Uint64() uint64 {
if a.Nested {
panic("Uint64: unexpected Nested attribute")
}
if l := len(a.Data); l != 8 {
panic(fmt.Sprintf("Uint64: unexpected byte slice length: %d", l))
}
return binary.BigEndian.Uint64(a.Data)
}
// PutUint64 sets the Attribute's data field to a Uint64 encoded in net byte order.
func (a *Attribute) PutUint64(v uint64) {
if len(a.Data) != 8 {
a.Data = make([]byte, 8)
}
binary.BigEndian.PutUint64(a.Data, v)
}
// Int64 converts the result of Uint16() to an int64.
func (a Attribute) Int64() int64 {
return int64(a.Uint64())
}
// Uint16Bytes gets the big-endian 2-byte representation of a uint16.
func Uint16Bytes(u uint16) []byte {
d := make([]byte, 2)
binary.BigEndian.PutUint16(d, u)
return d
}
// Uint32Bytes gets the big-endian 4-byte representation of a uint32.
func Uint32Bytes(u uint32) []byte {
d := make([]byte, 4)
binary.BigEndian.PutUint32(d, u)
return d
}
// Uint64Bytes gets the big-endian 8-byte representation of a uint64.
func Uint64Bytes(u uint64) []byte {
d := make([]byte, 8)
binary.BigEndian.PutUint64(d, u)
return d
}
// decode fills the Attribute's Children field with Attributes
// obtained by exhausting ad.
func (a *Attribute) decode(ad *netlink.AttributeDecoder) error {
for ad.Next() {
// Copy the netlink attribute's fields into the netfilter attribute.
nfa := Attribute{
// Only consider the rightmost 14 bits for Type.
// ad.Type implicitly masks the Nested and NetByteOrder bits.
Type: ad.Type(),
Data: ad.Bytes(),
}
// Boolean flags extracted from the two leftmost bits of Type.
nfa.Nested = ad.TypeFlags()&netlink.Nested != 0
nfa.NetByteOrder = ad.TypeFlags()&netlink.NetByteOrder != 0
if nfa.NetByteOrder && nfa.Nested {
return errInvalidAttributeFlags
}
// Unmarshal recursively if the netlink Nested flag is set.
if nfa.Nested {
ad.Nested(nfa.decode)
}
a.Children = append(a.Children, nfa)
}
return ad.Err()
}
// encode returns a function that takes an AttributeEncoder and returns error.
// This function can be passed to AttributeEncoder.Nested for recursively
// encoding Attributes.
func (a *Attribute) encode(attrs []Attribute) func(*netlink.AttributeEncoder) error {
return func(ae *netlink.AttributeEncoder) error {
for _, nfa := range attrs {
if nfa.NetByteOrder && nfa.Nested {
return errInvalidAttributeFlags
}
if nfa.Nested {
ae.Nested(nfa.Type, nfa.encode(nfa.Children))
continue
}
// Manually set the NetByteOrder flag, since ae.Bytes() can't.
if nfa.NetByteOrder {
nfa.Type |= netlink.NetByteOrder
}
ae.Bytes(nfa.Type, nfa.Data)
}
return nil
}
}
// decodeAttributes returns an array of netfilter.Attributes decoded from
// a byte array. This byte array should be taken from the netlink.Message's
// Data payload after the nfHeaderLen offset.
func decodeAttributes(ad *netlink.AttributeDecoder) ([]Attribute, error) {
// Use the Children element of the Attribute to decode into.
// Attribute already has nested decoding implemented on the type.
var a Attribute
// Pre-allocate backing array when there are netlink attributes to decode.
if ad.Len() != 0 {
a.Children = make([]Attribute, 0, ad.Len())
}
// Catch any errors encountered parsing netfilter structures.
if err := a.decode(ad); err != nil {
return nil, err
}
return a.Children, nil
}
// encodeAttributes encodes a list of Attributes into the given netlink.AttributeEncoder.
func encodeAttributes(ae *netlink.AttributeEncoder, attrs []Attribute) error {
if ae == nil {
return errNilAttributeEncoder
}
attr := Attribute{}
return attr.encode(attrs)(ae)
}
// MarshalAttributes marshals a nested attribute structure into a byte slice.
// This byte slice can then be copied into a netlink.Message's Data field after
// the nfHeaderLen offset.
func MarshalAttributes(attrs []Attribute) ([]byte, error) {
ae := NewAttributeEncoder()
if err := encodeAttributes(ae, attrs); err != nil {
return nil, err
}
b, err := ae.Encode()
if err != nil {
return nil, err
}
return b, nil
}
// UnmarshalAttributes unmarshals a byte slice into a list of Attributes.
func UnmarshalAttributes(b []byte) ([]Attribute, error) {
ad, err := NewAttributeDecoder(b)
if err != nil {
return nil, err
}
return decodeAttributes(ad)
} | vendor/github.com/ti-mo/netfilter/attribute.go | 0.832373 | 0.449393 | attribute.go | starcoder |
package identifiers
import (
"errors"
"go.dedis.ch/onet/v3/log"
"regexp"
"strconv"
)
/*
Defines and manipulates the identifiers that are meant to be encrypted by Unlynx for the purpose of answering queries.
Convention: 64 bits integers.
Genomic variant:
1 bit (2): flag genomic variant (1)
5 bits (32): chromosome id
28 bits (268'435'456): start position of the mutation (1-based coordinate system)
3 bits (8): length in # bases of the reference allele
12 bits (4'096): reference allele (6 bases)
3 bits (8): length in # bases of the alternative allele (mutated)
12 bits (4'096): alternative allele (6 bases)
*/
// IDBitSize size in bits of the identifier.
const IDBitSize = 64
// Breakdown of the size in bits.
const (
TypeFlagBitSize = 1
ChrBitSize = 5
PosBitSize = 28
AllelesBaseLengthBitSize = 3
AllelesBitSize = 12
)
// Regex expressions
const (
/*
Valid values for the chromosome id:
Number from 1 to 23 inclusive, or
X, Y, or M
-> Range from 1 to 26 inclusive, 2^5 = 32 ==> 5 bits storage
*/
ChromosomeIDRegex = "^([XYM]|[1-9]|(1[0-9])|(2[0-3]))$"
/*
Valid values for the alleles.
Either nothing ("-") or a certain number of bases ({A, T, G, C}).
Each [A, T, G, C] base is encoded on 2 bits.
The maximum number of bases supported is 6 -> 12bits and an additional 3 bits are used to encode the length.
*/
AllelesRegex = "^([ATCG]{1,6}|-)$"
)
// Mapping to encode non-numeric chromosome ids.
const (
ChromosomeXintID = int64(24)
ChromosomeYintID = int64(25)
ChromosomeMintID = int64(26)
)
// TypeFlagGenomicVariant encodes the type of id.
const TypeFlagGenomicVariant = int64(1)
/*
Possible range of positions values (position in 1-based coordinate system, minimum is 1).
Result is encoded into bits so the range is rounded to the nearest power of 2.
According to https://en.wikipedia.org/wiki/Human_genome#Molecular_organization_and_gene_content,
the chromosome with the higher number of base is #1 with 248'956'422 bases. 2^28 = 268'435'456.
==> 28 bits storage
*/
const (
PositionMin = int64(1)
PositionMax = int64(1) << PosBitSize
)
// AlleleMaping encodes alleles.
func AlleleMaping(allele string) (int64, error) {
switch allele {
case "A":
return int64(0), nil
case "T":
return int64(1), nil
case "G":
return int64(2), nil
case "C":
return int64(3), nil
default:
return int64(-1), errors.New("wrong allele format")
}
}
func checkRegex(input, expression, errorMessage string) error {
var aux = regexp.MustCompile(expression)
correct := aux.MatchString(input)
if !correct {
//log.Error(errorMessage)
return errors.New(errorMessage)
}
return nil
}
// GetVariantID encodes a genomic variant ID to be encrypted, according to the specifications.
func GetVariantID(chromosomeID string, startPosition int64, refAlleles, altAlleles string) (int64, error) {
// validate input
if checkRegex(chromosomeID, ChromosomeIDRegex, "Invalid Chromosome ID") != nil ||
checkRegex(refAlleles, AllelesRegex, "Invalid reference allele") != nil || checkRegex(altAlleles, AllelesRegex, "Invalid alternate allele") != nil ||
startPosition < PositionMin || startPosition > PositionMax || TypeFlagBitSize+ChrBitSize+PosBitSize+2*(AllelesBaseLengthBitSize+AllelesBitSize) != IDBitSize {
return int64(-1), errors.New("Invalid input: chr=" + chromosomeID + ", pos=" + strconv.FormatInt(startPosition, 10) + ", ref=" + refAlleles + ", alt=" + altAlleles)
}
// interpret chromosome id (content validated by regex)
chromosomeIntID, err := strconv.ParseInt(chromosomeID, 10, 64)
if err != nil {
switch chromosomeID {
case "X":
chromosomeIntID = ChromosomeXintID
break
case "Y":
chromosomeIntID = ChromosomeYintID
break
case "M":
chromosomeIntID = ChromosomeMintID
break
default:
log.Fatal("Invalid Chromosome ID")
return int64(-1), err
}
}
// alleles
if refAlleles == "-" {
refAlleles = ""
}
if altAlleles == "-" {
altAlleles = ""
}
refAllelesBaseLength := int64(len(refAlleles))
altAllelesBaseLength := int64(len(altAlleles))
// generate the variant
id := int64(0)
id = PushBitsFromRight(id, TypeFlagBitSize, TypeFlagGenomicVariant)
id = PushBitsFromRight(id, ChrBitSize, chromosomeIntID)
id = PushBitsFromRight(id, PosBitSize, startPosition)
id = PushBitsFromRight(id, AllelesBaseLengthBitSize, refAllelesBaseLength)
id = PushBitsFromRight(id, AllelesBitSize, EncodeAlleles(refAlleles))
id = PushBitsFromRight(id, AllelesBaseLengthBitSize, altAllelesBaseLength)
id = PushBitsFromRight(id, AllelesBitSize, EncodeAlleles(altAlleles))
return id, nil
}
// EncodeAlleles encodes a string containing alleles.
func EncodeAlleles(alleles string) int64 {
encodedAlleles := int64(0)
for i := 0; i < len(alleles); i++ {
mapV, err := AlleleMaping(alleles[i : i+1])
if err != nil {
log.Fatal(err)
}
encodedAlleles = PushBitsFromRight(encodedAlleles, 2, mapV)
}
//padding
encodedAlleles = PushBitsFromRight(encodedAlleles, AllelesBitSize-len(alleles)*2, int64(0))
return encodedAlleles
}
// PushBitsFromRight takes the nbBits rightmost bits of bitsToPush, and push them to the right of origBits.
func PushBitsFromRight(origBits int64, nbBits int, bitsToPush int64) int64 {
newBits := origBits << uint(nbBits)
// generate mask
mask := GetMask(nbBits)
// get final value
newBits |= mask & bitsToPush
return newBits
}
// GetMask generates a bit mask (support pushing bits)
func GetMask(nbBits int) int64 {
mask := int64(0)
for i := 0; i < nbBits; i++ {
mask <<= 1
mask |= int64(1)
}
return mask
} | loader/identifiers/identifiers.go | 0.637031 | 0.417034 | identifiers.go | starcoder |
package core
import (
"math"
)
func calculateLinearRegressionCoefficients(points []Point) (float64, float64) {
average := calculateAveragePoint(points)
aNumerator := 0.0
aDenominator := 0.0
for i := 0; i < len(points); i++ {
aNumerator += (points[i].X - average.X) * (points[i].Y - average.Y)
aDenominator += (points[i].X - average.X) * (points[i].X - average.X)
}
a := aNumerator / aDenominator
b := average.Y - a*average.X
return a, b
}
func calculateSSEForBucket(points []Point) float64 {
a, b := calculateLinearRegressionCoefficients(points)
sumStandardErrorsSquared := 0.0
for _, p := range points {
standardError := p.Y - (a*p.X + b)
sumStandardErrorsSquared += standardError * standardError
}
return sumStandardErrorsSquared
}
func calculateSSEForBuckets(buckets [][]Point) []float64 {
sse := make([]float64, len(buckets)-2)
// We skip the first and last buckets since they only contain one data point
for i := 1; i < len(buckets)-1; i++ {
prevBucket := buckets[i-1]
currBucket := buckets[i]
nextBucket := buckets[i+1]
// var bucketWithAdjacentPoints []Point
// bucketWithAdjacentPoints = append(bucketWithAdjacentPoints, prevBucket[len(prevBucket)-1])
// bucketWithAdjacentPoints = append(bucketWithAdjacentPoints, currBucket...)
// bucketWithAdjacentPoints = append(bucketWithAdjacentPoints, nextBucket[0])
bucketWithAdjacentPoints := make([]Point, len(currBucket)+2)
bucketWithAdjacentPoints[0] = prevBucket[len(prevBucket)-1]
bucketWithAdjacentPoints[len(bucketWithAdjacentPoints)-1] = nextBucket[0]
for i := 1; i < len(currBucket); i++ {
bucketWithAdjacentPoints[i] = currBucket[i-1]
}
sse[i-1] = calculateSSEForBucket(bucketWithAdjacentPoints)
}
sse = append(sse, 0)
return sse
}
func findLowestSSEAdjacentBucketsIndex(sse []float64, ignoreIndex int) int {
minSSE := float64(math.MaxInt64)
minSSEIndex := -1
for i := 1; i < len(sse)-2; i++ {
if i == ignoreIndex || i+1 == ignoreIndex {
continue
}
if sse[i]+sse[i+1] < minSSE {
minSSE = sse[i] + sse[i+1]
minSSEIndex = i
}
}
return minSSEIndex
}
func findHighestSSEBucketIndex(buckets [][]Point, sse []float64) int {
maxSSE := 0.0
maxSSEIdx := -1
for i := 1; i < len(sse)-1; i++ {
if len(buckets[i]) > 1 && sse[i] > maxSSE {
maxSSE = sse[i]
maxSSEIdx = i
}
}
return maxSSEIdx
}
func splitBucketAt(buckets [][]Point, index int) [][]Point {
if index < 0 || index >= len(buckets) {
return buckets
}
bucket := buckets[index]
bucketSize := len(bucket)
if bucketSize < 2 {
return buckets
}
bucketALength := int(math.Ceil(float64(bucketSize / 2)))
bucketA := bucket[0 : bucketALength+1]
bucketB := bucket[bucketALength:]
var newBuckets [][]Point
newBuckets = append(newBuckets, buckets[0:index]...)
newBuckets = append(newBuckets, bucketA, bucketB)
newBuckets = append(newBuckets, buckets[index+1:]...)
return newBuckets
}
func mergeBucketAt(buckets [][]Point, index int) [][]Point {
if index < 0 || index >= len(buckets)-1 {
return buckets
}
mergedBucket := buckets[index]
mergedBucket = append(mergedBucket, buckets[index+1]...)
var newBuckets [][]Point
newBuckets = append(newBuckets, buckets[0:index]...)
newBuckets = append(newBuckets, mergedBucket)
newBuckets = append(newBuckets, buckets[index+2:]...)
return newBuckets
}
// Largest triangle dynamic(LTD) data downsampling algorithm implementation
// - Require: data . The original data
// - Require: threshold . Number of data points to be returned
func LTD(data []Point, threshold int) []Point {
if threshold >= len(data) || threshold == 0 {
return data // Nothing to do
}
// 1: Split the data into equal number of buckets as the threshold but have the first
// bucket only containing the first data point and the last bucket containing only
// the last data point . First and last buckets are then excluded in the bucket
// resizing
// 2: Calculate the SSE for the buckets accordingly . With one point in adjacent
// buckets overlapping
// 3: while halting condition is not met do . For example, using formula 4.2
// 4: Find the bucket F with the highest SSE
// 5: Find the pair of adjacent buckets A and B with the lowest SSE sum . The
// pair should not contain F
// 6: Split bucket F into roughly two equal buckets . If bucket F contains an odd
// number of points then one bucket will contain one more point than the other
// 7: Merge the buckets A and B
// 8: Calculate the SSE of the newly split up and merged buckets
// 9: end while.
// 10: Use the Largest-Triangle-Three-Buckets algorithm on the resulting bucket configuration
// to select one point per buckets
//1: Split the data into equal number of buckets as the threshold.
buckets := splitDataBucket(data, threshold)
numIterations := len(data) * 10 / threshold
for iter := 0; iter < numIterations; iter++ {
// 2: Calculate the SSE for the buckets accordingly.
sseForBuckets := calculateSSEForBuckets(buckets)
// 4: Find the bucket F with the highest SSE
highestSSEBucketIndex := findHighestSSEBucketIndex(buckets, sseForBuckets)
if highestSSEBucketIndex < 0 {
break
}
// 5: Find the pair of adjacent buckets A and B with the lowest SSE sum .
lowestSSEAdajacentBucketIndex := findLowestSSEAdjacentBucketsIndex(sseForBuckets, highestSSEBucketIndex)
if lowestSSEAdajacentBucketIndex < 0 {
break
}
// 6: Split bucket F into roughly two equal buckets . If bucket F contains an odd
// number of points then one bucket will contain one more point than the other
buckets = splitBucketAt(buckets, highestSSEBucketIndex)
// 7: Merge the buckets A and B
if lowestSSEAdajacentBucketIndex > highestSSEBucketIndex {
lowestSSEAdajacentBucketIndex++
}
buckets = mergeBucketAt(buckets, lowestSSEAdajacentBucketIndex)
}
// 10: Use the Largest-Triangle-Three-Buckets algorithm on the resulting bucket
return LTTBForBuckets(buckets)
} | core/ltd.go | 0.732209 | 0.447581 | ltd.go | starcoder |
package limiters
import (
"fmt"
"time"
"github.com/garyburd/redigo/redis"
)
// RateRedisCounter represents redis-based sharded counter
type RateRedisCounter struct {
timer timer
period time.Duration
resolution time.Duration
bucketsCount int
prefix string
conn redis.Conn
}
// NewRateRedisCounter initializes new Redis-based sharded counter
// prefix: a custom string prefix that will be added to the each bucket key
// period: defines the maximum period for which we count, e.g. 5m, 1h
// resolution: defines the resolution of a counter, i.e. the minimum time period of a counter bucket.
// counter value is calculated as a sum of all bucket values within the last period
func NewRateRedisCounter(conn redis.Conn, prefix string, period time.Duration, resolution time.Duration) *RateRedisCounter {
assertPeriodAndResolutionCorrect(period, resolution)
return &RateRedisCounter{
conn: conn,
period: period,
prefix: prefix,
resolution: resolution,
timer: defaultTimer,
bucketsCount: int(period / resolution),
}
}
// IncrBy adds the given value to this counter
func (c *RateRedisCounter) IncrBy(val int64) error {
curTimeShard := c.timer.UnixNano() / int64(c.resolution)
key := c.makeKey(curTimeShard)
c.conn.Send("MULTI")
c.conn.Send("INCRBY", key, val)
c.conn.Send("EXPIRE", key, int(c.period/time.Second)+1) // give 1 secs more than the resolution
_, err := c.conn.Do("EXEC")
return err
}
func (c *RateRedisCounter) makeKey(timeShard int64) string {
return fmt.Sprintf("cnt:%s:%d", c.prefix, timeShard)
}
// Total returns the total value of this counter
func (c *RateRedisCounter) Total() (int64, error) {
var total int64
curTimeShard := c.timer.UnixNano() / int64(c.resolution)
for i := 0; i < c.bucketsCount; i++ {
key := c.makeKey(curTimeShard - int64(i))
val, err := redis.Int64(c.conn.Do("GET", key))
if err == redis.ErrNil {
continue
} else if err != nil {
return 0, err
}
total += val
}
return total, nil
} | limiters/redis.go | 0.731634 | 0.495972 | redis.go | starcoder |
package main
import (
"fmt"
"math"
"os"
"github.com/unixpickle/essentials"
"github.com/unixpickle/model3d/model3d"
"github.com/unixpickle/model3d/render3d"
)
func main() {
// Join all the objects into a mega-object.
object := render3d.JoinedObject{
// Mirror ball.
&render3d.ColliderObject{
Collider: &model3d.Sphere{
Center: model3d.XYZ(2, 7, 0),
Radius: 2,
},
Material: &render3d.PhongMaterial{
Alpha: 400.0,
SpecularColor: render3d.NewColor(1),
},
},
// Red ball.
&render3d.ColliderObject{
Collider: &model3d.Sphere{
Center: model3d.XYZ(-2, 5.5, -1),
Radius: 1,
},
Material: &render3d.PhongMaterial{
Alpha: 10.0,
SpecularColor: render3d.NewColor(0.1),
DiffuseColor: render3d.NewColorRGB(0.95, 0.2, 0.2).Scale(0.5),
},
},
// Glass diamond.
&render3d.ColliderObject{
Collider: LoadDiamond(),
Material: &render3d.JoinedMaterial{
Materials: []render3d.Material{
&render3d.RefractMaterial{
IndexOfRefraction: 1.3,
RefractColor: render3d.NewColor(0.9),
},
&render3d.PhongMaterial{
Alpha: 50.0,
SpecularColor: render3d.NewColor(0.1),
},
},
Probs: []float64{0.9, 0.1},
},
},
// Room walls.
&render3d.ColliderObject{
Collider: model3d.MeshToCollider(
model3d.NewMeshRect(
model3d.XYZ(-5, -10, -2),
model3d.XYZ(5, 10, 7),
).MapCoords(model3d.XYZ(-1, 1, 1).Mul),
),
Material: &render3d.LambertMaterial{
DiffuseColor: render3d.NewColor(0.4),
},
},
// Ceiling light.
&render3d.ColliderObject{
Collider: model3d.MeshToCollider(
model3d.NewMeshRect(
model3d.XYZ(-2, 5, 6.8),
model3d.XYZ(2, 7, 7),
),
),
Material: &render3d.LambertMaterial{
// Make it really bright so it lights the scene
// adequately.
EmissionColor: render3d.NewColor(25),
},
},
}
renderer := render3d.RecursiveRayTracer{
Camera: render3d.NewCameraAt(model3d.Coord3D{Y: -7, Z: 2.5},
model3d.Coord3D{Y: 10, Z: 2.5}, math.Pi/3.6),
// Focus reflections towards the light source
// to lower variance (i.e. grain).
FocusPoints: []render3d.FocusPoint{
&render3d.PhongFocusPoint{
Target: model3d.XYZ(0, 6, 6.9),
Alpha: 40.0,
MaterialFilter: func(m render3d.Material) bool {
if _, ok := m.(*render3d.LambertMaterial); ok {
return true
} else if phong, ok := m.(*render3d.PhongMaterial); ok {
return phong.DiffuseColor.Sum() > 0
} else {
// Don't focus sharp materials like refraction
// and specular-only phong materials.
return false
}
},
},
},
FocusPointProbs: []float64{0.3},
MaxDepth: 5,
NumSamples: 400,
Antialias: 1.0,
Cutoff: 1e-4,
LogFunc: func(p, samples float64) {
fmt.Printf("\rRendering %.1f%%...", p*100)
},
}
fmt.Println("Ray variance:", renderer.RayVariance(object, 200, 200, 5))
img := render3d.NewImage(200, 200)
renderer.Render(img, object)
fmt.Println()
img.Save("output.png")
}
func LoadDiamond() model3d.Collider {
r, err := os.Open("diamond.stl")
essentials.Must(err)
triangles, err := model3d.ReadSTL(r)
essentials.Must(err)
mesh := model3d.NewMeshTriangles(triangles)
// Put the diamond on its side.
mesh = mesh.Rotate(model3d.Y(1), 0.5*math.Pi+math.Atan(1/1.2))
mesh = mesh.Translate(model3d.YZ(4, -(2 + mesh.Min().Z)))
return model3d.MeshToCollider(mesh)
} | examples/renderings/cornell_box/main.go | 0.559771 | 0.404625 | main.go | starcoder |
package expect
import (
"github.com/tinyhubs/et/et"
"testing"
)
// PassValue is used to check if exp equals to got.
func Equal(t *testing.T, exp, got interface{}) {
et.ExpectInner(t, "", &et.Equal{exp, got}, 2)
}
func Equali(t *testing.T, message string, exp, got interface{}) {
et.ExpectInner(t, message, &et.Equal{exp, got}, 2)
}
// NotEqual is used to check if exp is not equals to got
func NotEqual(t *testing.T, exp, got interface{}) {
et.ExpectInner(t, "", &et.NotEqual{exp, got}, 2)
}
// NotEqual is used to check if exp is not equals to got
func NotEquali(t *testing.T, message string, exp, got interface{}) {
et.ExpectInner(t, message, &et.NotEqual{exp, got}, 2)
}
// True is used to check the got be true.
func True(t *testing.T, got bool) {
et.ExpectInner(t, "", &et.True{got}, 2)
}
// True is used to check the got be true.
func Truei(t *testing.T, message string, got bool) {
et.ExpectInner(t, message, &et.True{got}, 2)
}
// False is used to check the got be false.
func False(t *testing.T, got bool) {
et.ExpectInner(t, "", &et.False{got}, 2)
}
// Falsei is used to check the got be false.
func Falsei(t *testing.T, message string, got bool) {
et.ExpectInner(t, message, &et.False{got}, 2)
}
// Panic is used to check the fn should give a panic.
func Panic(t *testing.T, fn func()) {
et.ExpectInner(t, "", &et.Panic{fn}, 2)
}
// Panici is used to check the fn should give a panic.
func Panici(t *testing.T, message string, fn func()) {
et.ExpectInner(t, message, &et.Panic{fn}, 2)
}
// NoPanic is used to check the fn should not give a panic.
func NoPanic(t *testing.T, fn func()) {
et.ExpectInner(t, "", &et.NoPanic{fn}, 2)
}
// NoPanic is used to check the fn should not give a panic.
func NoPanici(t *testing.T, message string, fn func()) {
et.ExpectInner(t, message, &et.NoPanic{fn}, 2)
}
// Match is used to check the got is match to the regular expression of exp.
func Match(t *testing.T, regex string, got string) {
et.ExpectInner(t, "", &et.Match{regex, got}, 2)
}
// Matchi is used to check the got is match to the regular expression of exp.
func Matchi(t *testing.T, message string, regex string, got string) {
et.ExpectInner(t, message, &et.Match{regex, got}, 2)
}
func NotMatch(t *testing.T, regex string, got string) {
et.ExpectInner(t, "", &et.NotMatch{regex, got}, 2)
}
func NotMatchi(t *testing.T, message string, regex string, got string) {
et.ExpectInner(t, message, &et.NotMatch{regex, got}, 2)
}
func Nil(t *testing.T, got interface{}) {
et.ExpectInner(t, "", &et.Nil{got}, 2)
}
func Nili(t *testing.T, message string, got interface{}) {
et.ExpectInner(t, message, &et.Nil{got}, 2)
}
func NotNil(t *testing.T, got interface{}) {
et.ExpectInner(t, "", &et.NotNil{got}, 2)
}
func NotNili(t *testing.T, message string, got interface{}) {
et.ExpectInner(t, message, &et.NotNil{got}, 2)
} | expect/expect.go | 0.562898 | 0.597637 | expect.go | starcoder |
package pgbatch
import (
"fmt"
)
// Command format for sending a batch of sql commands.
// Query is the sql query to execute (required).
// ArgsFunc is called before execution for query arguments (optional).
// Args are query parameters (optional). Ignored if ArgsFunc is non-nil.
// ScanOnce is the scan function for reading at most one row (optional).
// Scan is the scan function for reading each row (optional).
// If ScanOnce is non-nil, Scan is ignored.
// Affect is the number of rows that should be affected.
// If Affect is zero (default), it is not checked.
// If Affect is negative, no rows should be affected.
// If Affect is positive, that should be the number of affected rows.
type Command struct {
Query string
ArgsFunc func() []interface{}
Args []interface{}
ScanOnce func(fn func(...interface{}) error) error
Scan func(fn func(...interface{}) error) error
Affect int64
}
// Batch executes a batch of commands in a single transaction.
// If any error occurs, the transaction will be rolled back.
func (handler *PostgresHandler) Batch(commands []Command) error {
tx, err := handler.Pool.Begin()
if err != nil {
return err
}
defer tx.Rollback()
for _, command := range commands {
args := command.Args
if command.ArgsFunc != nil {
args = command.ArgsFunc()
}
if command.Affect != 0 {
result, err := tx.Exec(command.Query, args...)
if err != nil {
return err
}
affected, err := result.RowsAffected()
if err != nil {
return err
}
expected := command.Affect
if expected < 0 {
expected = 0
}
if expected != affected {
err = fmt.Errorf(expectedDifferentAffect, expected, affected, command.Query)
return err
}
} else {
rows, err := tx.Query(command.Query, args...)
if err != nil {
return err
}
if command.ScanOnce != nil {
if rows.Next() {
err = command.ScanOnce(rows.Scan)
if err != nil {
return err
}
}
} else if command.Scan != nil {
for rows.Next() {
err = command.Scan(rows.Scan)
if err != nil {
return err
}
}
}
if err = rows.Err(); err != nil {
rows.Close()
return err
}
err = rows.Close()
if err != nil {
return err
}
}
}
tx.Commit()
return nil
}
const expectedDifferentAffect = "Expected to affect %v rows, but %v rows affected for query: `%v`" | batch.go | 0.538741 | 0.439687 | batch.go | starcoder |
package english
import (
"sort"
"strings"
)
// Words returns sorted list of all the English words defined by this
// package.
func Words() []string { return _words }
func splitWords() []string {
all := make(map[string]int)
for _, words := range []string{
SinglePrepositionWords,
HowAdverbWords,
WhenAdverbWords,
WhereAdverbWords,
WhatExtentAdverbWords,
VerbWords,
NounWords,
PersonalPronounWords,
PossessivePronounWords,
IndependentPossessivePronounWords,
ObjectPronounWords,
IndefinitePronounWords,
ReflexivePronounWords,
DemonstrativePronounWords,
InterrogativePronounWords,
RelativePronounWords,
ArchaicPronounWords,
AdjectiveWords,
QuestionWords,
ConjunctionWords,
} {
for _, word := range strings.Fields(words) {
all[word]++
}
}
res := make([]string, 0)
for word, _ := range all {
res = append(res, word)
}
sort.Strings(res)
return res
}
var _words []string
func init() {
_words = splitWords()
}
const (
WordCount = 1411 // All distinct words
// The words in each constant are sorted.
SinglePrepositionWords = _SinglePrepositionWords
HowAdverbWords = _HowAdverbWords
WhenAdverbWords = _WhenAdverbWords
WhereAdverbWords = _WhereAdverbWords
WhatExtentAdverbWords = _WhatExtentAdverbWords
VerbWords = _VerbWords
NounWords = _NounWords
PersonalPronounWords = _PersonalPronounWords
PossessivePronounWords = _PossessivePronounWords
IndependentPossessivePronounWords = _IndependentPossessivePronounWords
ObjectPronounWords = _ObjectPronounWords
IndefinitePronounWords = _IndefinitePronounWords
ReflexivePronounWords = _ReflexivePronounWords // also same as intensive pronouns
DemonstrativePronounWords = _DemonstrativePronounWords
InterrogativePronounWords = _InterrogativePronounWords
RelativePronounWords = _RelativePronounWords
ArchaicPronounWords = _ArchaicPronounWords
AdjectiveWords = _AdjectiveWords
QuestionWords = _QuestionWords
ConjunctionWords = _ConjunctionWords
)
const (
_SinglePrepositionWords = `about beside near to above between of
towards across beyond off under after by on underneath against despite
onto unlike along down opposite until among during out up around
except outside upon as for over via at from past with before in round
within behind inside since without below into than beneath like
through`
_HowAdverbWords = `absentmindedly adoringly awkwardly beatifully
briskly brutally carefully cheerfully competitively eagerly
effortlessly extravagantly girlishly gracefully grimly happily
halfheartedly hungrily lazily lifelessly loyally quickly quitely
quizzically really recklessly remorsefully ruthlessly savagely
sloppily so stylishly unabashedly unevenly urgently well wishfully
worriedly`
_WhenAdverbWords = `after afterwards annually before daily never
now soon still then today tomorrow weekly when yesterday`
_WhereAdverbWords = `abroad anywhere away down everywhere here
home in inside out outside somewhere there underground upstairs`
_WhatExtentAdverbWords = `extremely not quite rather really
terribly too very`
_VerbWords = `accept accuse achieve acknowledge acquire adapt add
adjust admire admit adopt adore advise afford agree aim allow announce
anticipate apologize appear apply appreciate approach approve argue
arise arrange arrive ask assume assure astonish attach attempt attend
attract avoid awake bake bathe be bear beat become beg begin behave
believe belong bend bet bind bite blow boil borrow bounce bow break
breed bring broadcast build burn burst buy calculate can could care
carry catch celebrate change choose chop claim climb cling come commit
communicate compare compete complain complete concern confirm consent
consider consist consult contain continue convince cook cost count
crawl create creep criticize cry cut dance dare deal decide defer
delay deliver demand deny depend describe deserve desire destroy
determine develop differ disagree discover discuss dislike distribute
dive do doubt drag dream drill drink drive drop dry earn eat emphasize
enable encourage engage enhance enjoy ensure entail enter establish
examine exist expand expect experiment explain explore extend fail
fall feed feel fight find finish fit fly fold follow forbid forget
forgive freeze fry generate get give go grind grow hang happen hate
have hear hesitate hide hit hold hop hope hug hurry hurt identify
ignore illustrate imagine imply impress improve include incorporate
indicate inform insist install intend introduce invest investigate
involve iron jog jump justify keep kick kiss kneel knit know lack
laugh lay lead lean leap learn leave lend lie lift light lie like
listen look lose love maintain make manage matter may mean measure
meet melt mention might mind miss mix mow must need neglect negotiate
observe obtain occur offer open operate order organize ought overcome
overtake owe own paint participate pay peel perform persuade pinch
plan play point possess postpone pour practice prefer prepare pretend
prevent proceed promise propose protect prove pull punch pursue push
put qualify quit react read realize recall receive recollect recommend
reduce refer reflect refuse regret relate relax relieve rely remain
remember remind repair replace represent require resent resist retain
retire rid ride ring rise risk roast run sanction satisfy say scrub
see seem sell send serve set settle sew shake shall shed shine shoot
should show shrink shut sing sink sit ski sleep slice slide slip smell
snore solve sow speak specify spell spend spill spit spread squat
stack stand start steal stick sting stink stir stop stretch strike
struggle study submit succeed suffer suggest supply suppose surprise
survive swear sweep swell swim swing take talk taste teach tear tell
tend think threaten throw tiptoe tolerate translate try understand
vacuum value vary volunteer wait wake walk want warn wash watch wave
wear weep weigh whip will win wish would write`
_NounWords = `account act adjustment advertisement agreement air
amount amusement angle animal answer ant apparatus apple approval arch
argument arm army art attack attempt attention attraction authority
baby back bag balance ball band base basin basket bath bed bee
behavior belief bell berry bird birth bit bite blade blood blow board
boat body bone book boot bottle box boy brain brake branch brass bread
breath brick bridge brother brush bucket building bulb burn burst
business butter button cake camera canvas card care carriage cart cat
cause chain chalk chance change cheese chess chin church circle clock
cloth cloud coal coat collar color comb comfort committee company
comparison competition condition connection control cook copper copy
copy cord cork cough country cover cow crack credit crime crush cry
cup current curtain curve cushion damage danger daughter day death
debt decision degree design desire destruction detail development
digestion direction discovery discussion disease disgust distance
distribution division dog door doubt drain drawer dress drink driving
drop dust ear earth edge education effect egg end engine error event
example exchange existence expansion experience expert eye face fact
fall family farm father fear feather feeling fiction field fight
finger fire fish flag flame flight floor flower fly fold food foot
force fork form fowl frame friend front fruit garden girl glass glove
goat gold government grain grass grip group growth guide gun hair
hammer hand harbor harmony hat hate head hearing heart heat help
history hole hook hope horn horse hospital hour house humor ice idea
impulse increase industry ink insect instrument insurance interest
invention iron island jelly jewel join journey judge jump kettle key
kick kiss knee knife knot knowledge land language laugh lead leaf
learning leather leg letter level library lift light limit line linen
lip liquid list lock look loss love low machine man manager map mark
market mass match meal measure meat meeting memory metal middle milk
mind mine minute mist money monkey month moon morning mother motion
mountain mouth move muscle music nail name nation neck need needle
nerve net news night noise nose note number nut observation offer
office oil operation opinion orange order organization ornament oven
owner page pain paint paper parcel part paste payment peace pen pencil
person picture pig pin pipe place plane plant plate play pleasure
plough pocket point poison polish porter position pot potato powder
power price print prison process produce profit property prose protest
pull pump punishment purpose push quality question rail rain range rat
rate ray reaction reading reason receipt record regret relation
religion representative request respect rest reward rhythm rice ring
river road rod roll roof room root rub rule run sail salt sand scale
school science scissors screw sea seat secretary seed selection self
sense servant sex shade shake shame sheep shelf ship shirt shock shoe
side sign silk silver sister size skin skirt sky sleep slip slope
smash smell smile smoke snake sneeze snow soap society sock son song
sort sound soup space spade sponge spoon spring square stage stamp
star start statement station steam steel stem step stick stitch
stocking stomach stone stop store story street stretch structure
substance sugar suggestion summer sun support surprise swim system
table tail talk taste tax teaching tendency test theory thing thought
thread throat thumb thunder ticket time tin toe tongue tooth top touch
town trade train transport tray tree trick trouble trousers turn twist
umbrella unit use value verse vessel view voice walk wall war wash
waste watch water wave wax way weather week weight wheel whip whistle
wind window wine wing winter wire woman wood wool word work worm wound
writing year`
_PersonalPronounWords = `I you he she it we they`
_PossessivePronounWords = `my our your his her its their`
_IndependentPossessivePronounWords = `mine ours yours his hers its
theirs`
_ObjectPronounWords = `me you her him it us them`
_IndefinitePronounWords = `all another any anybody anyone anything
both each either everybody everyone everything few many most
neither nobody none nothing one other others several some somebody
someone something such`
_ReflexivePronounWords = `myself yourself herself himself itself
ourselves yourselves themselves`
_DemonstrativePronounWords = `such that these this those`
_InterrogativePronounWords = `what whatever which whichever who
whoever whom whomever whose`
_RelativePronounWords = `as that what whatever which whichever
who whoever whom whomever whose`
_ArchaicPronounWords = `thou thee thy thine ye`
_AdjectiveWords = `adorable adventurous aggressive agreeable
alert alive amused angry annoyed annoying anxious arrogant ashamed
attractive average awful bad beautiful better bewildered black bloody
blue blue-eyed blushing bored brainy brave breakable bright busy calm
careful cautious charming cheerful clean clear clever cloudy clumsy
colorful combative comfortable concerned condemned confused
cooperative courageous crazy creepy crowded cruel curious cute
dangerous dark dead defeated defiant delightful depressed determined
different difficult disgusted distinct disturbed dizzy doubtful drab
dull eager easy elated elegant embarrassed enchanting encouraging
energetic enthusiastic envious evil excited expensive exuberant fair
faithful famous fancy fantastic fierce filthy fine foolish fragile
frail frantic friendly frightened funny gentle gifted glamorous
gleaming glorious good gorgeous graceful grieving grotesque grumpy
handsome happy healthy helpful helpless hilarious homeless homely
horrible hungry hurt ill important impossible inexpensive innocent
inquisitive itchy jealous jittery jolly joyous kind light lively
lonely long lovely lucky magnificent misty modern motionless muddy
mushy mysterious nasty naughty nervous nice nutty obedient obnoxious
odd old-fashioned open outrageous outstanding panicky perfect plain
pleasant poised poor powerful precious prickly proud putrid puzzled
quaint real relieved repulsive rich scary selfish shiny shy silly
sleepy smiling smoggy sore sparkling splendid spotless stormy strange
stupid successful super talented tame tasty tender tense terrible
thankful thoughtful thoughtless tired tough troubled ugliest ugly
uninterested unsightly unusual upset uptight vast victorious vivacious
wandering weary wicked wide-eyed wild witty worried worrisome wrong
zany zealous`
_QuestionWords = `how what when where which who whom whose why`
_ConjunctionWords = `and that but or as if when than because
while where after so though since until whether before although
nor like once unless now except`
) | words.go | 0.651909 | 0.424173 | words.go | starcoder |
package fake
import (
"fmt"
"strconv"
"strings"
)
// OsdLsOutput returns JSON output from 'ceph osd ls' that can be used for unit tests. It
// returns output for a Ceph cluster with the number of OSDs given as input starting with ID 0.
// example: numOSDs = 5 => return: "[0,1,2,3,4]"
func OsdLsOutput(numOSDs int) string {
stringIDs := make([]string, 0, numOSDs)
for id := 0; id < numOSDs; id++ {
stringIDs = append(stringIDs, strconv.Itoa(id))
}
return fmt.Sprintf("[%s]", strings.Join(stringIDs, ","))
}
// OsdTreeOutput returns JSON output from 'ceph osd tree' that can be used for unit tests.
// It returns output for a Ceph cluster with the given number of nodes and the given number of OSDs
// per node with no complex configuration. This should work even for 0 nodes.
// example: OsdTreeOutput(3, 3) // returns JSON output for the Ceph cluster below
// node0: node1: node2:
// - osd0 - osd1 - osd2
// - osd3 - osd4 - osd5
// - osd6 - osd7 - osd8
func OsdTreeOutput(numNodes, numOSDsPerNode int) string {
// JSON output taken from Ceph Pacific
rootFormat := ` {
"id": -1,
"name": "default",
"type": "root",
"type_id": 11,
"children": [%s]
}` // format: negative node IDs as comma-delimited string (e.g., "-3,-4,-5")
nodeFormat := ` {
"id": %d,
"name": "%s",
"type": "host",
"type_id": 1,
"pool_weights": {},
"children": [%s]
}` // format: negative node ID, node name, OSD IDs as comma-delimited string (e.g., "0,3,6")
osdFormat := ` {
"id": %d,
"device_class": "hdd",
"name": "osd.%d",
"type": "osd",
"type_id": 0,
"crush_weight": 0.009796142578125,
"depth": 2,
"pool_weights": {},
"exists": 1,
"status": "up",
"reweight": 1,
"primary_affinity": 1
}` // format: OSD ID, OSD ID
wrapperFormat := `{
"nodes": [
%s
],
"stray": []
}` // format: <rendered root JSON, rendered nodes, rendered osds - with commas in between>
nodesJSON := []string{}
osdsJSON := []string{}
nodes := []string{}
for n := 0; n < numNodes; n++ {
osds := []string{}
nodeName := fmt.Sprintf("node%d", n)
nodeID := -3 - n
nodes = append(nodes, strconv.Itoa(nodeID))
for i := 0; i < numOSDsPerNode; i++ {
osdID := n + 3*i
osds = append(osds, strconv.Itoa(osdID))
osdsJSON = append(osdsJSON, fmt.Sprintf(osdFormat, osdID, osdID))
}
nodesJSON = append(nodesJSON, fmt.Sprintf(nodeFormat, nodeID, nodeName, strings.Join(osds, ",")))
}
rootJSON := fmt.Sprintf(rootFormat, strings.Join(nodes, ","))
fullJSON := append(append([]string{rootJSON}, nodesJSON...), osdsJSON...)
rendered := fmt.Sprintf(wrapperFormat, strings.Join(fullJSON, ",\n"))
return rendered
}
// OsdOkToStopOutput returns JSON output from 'ceph osd ok-to-stop' that can be used for unit tests.
// queriedID should be given as the ID sent to the 'osd ok-to-stop <id> [--max=N]' command. It will
// be returned with relevant NOT ok-to-stop results.
// If returnOsdIds is empty, this returns a NOT ok-to-stop result. Otherwise, it returns an
// ok-to-stop result. returnOsdIds should include queriedID if the result should be successful.
// usePacificPlusOutput instructs the function to render output for Ceph Pacific (v16) and above or
// to render output for Ceph Octopus (v15) and below.
func OsdOkToStopOutput(queriedID int, returnOsdIds []int, useCephPacificPlusOutput bool) string {
// For Pacific and up (Pacific+)
okTemplate := `{"ok_to_stop":true,"osds":[%s],"num_ok_pgs":132,"num_not_ok_pgs":0,"ok_become_degraded":["1.0","1.2","1.3"]}`
notOkTemplate := `{"ok_to_stop":false,"osds":[%d],"num_ok_pgs":161,"num_not_ok_pgs":50,"bad_become_inactive":["1.0","1.3","1.a"],"ok_become_degraded":["1.2","1.4","1.5"]}`
// Ceph Octopus and below don't return anything on stdout, only success/failure via retcode
if !useCephPacificPlusOutput {
return ""
}
// Pacific+, NOT ok-to-stop
if len(returnOsdIds) == 0 {
return fmt.Sprintf(notOkTemplate, queriedID)
}
// Pacific+, ok-to-stop
osdIdsStr := make([]string, len(returnOsdIds))
for i := 0; i < len(returnOsdIds); i++ {
osdIdsStr[i] = strconv.Itoa(returnOsdIds[i])
}
return fmt.Sprintf(okTemplate, strings.Join(osdIdsStr, ","))
}
// OSDDeviceClassOutput returns JSON output from 'ceph osd crush get-device-class' that can be used for unit tests.
// osdId is a osd ID to get from crush map. If ID is empty raise a fake error.
func OSDDeviceClassOutput(osdId string) string {
if osdId == "" {
return "ERR: fake error from ceph cli"
}
okTemplate := `[{"osd":%s,"device_class":"hdd"}]`
return fmt.Sprintf(okTemplate, osdId)
} | pkg/daemon/ceph/client/fake/osd.go | 0.694821 | 0.404272 | osd.go | starcoder |
package aeadcrypter
import (
"crypto/cipher"
"fmt"
)
const (
// TagSize is the tag size in bytes for AES-128-GCM-SHA256,
// AES-256-GCM-SHA384, and CHACHA20-POLY1305-SHA256.
TagSize = 16
// NonceSize is the size of the nonce in number of bytes for
// AES-128-GCM-SHA256, AES-256-GCM-SHA384, and CHACHA20-POLY1305-SHA256.
NonceSize = 12
// SHA256DigestSize is the digest size of sha256 in bytes.
SHA256DigestSize = 32
// SHA384DigestSize is the digest size of sha384 in bytes.
SHA384DigestSize = 48
)
// sliceForAppend takes a slice and a requested number of bytes. It returns a
// slice with the contents of the given slice followed by that many bytes and a
// second slice that aliases into it and contains only the extra bytes. If the
// original slice has sufficient capacity then no allocation is performed.
func sliceForAppend(in []byte, n int) (head, tail []byte) {
if total := len(in) + n; cap(in) >= total {
head = in[:total]
} else {
head = make([]byte, total)
copy(head, in)
}
tail = head[len(in):]
return head, tail
}
// encrypt is the encryption function for an AEAD crypter. aead determines
// the type of AEAD crypter. dst can contain bytes at the beginning of the
// ciphertext that will not be encrypted but will be authenticated. If dst has
// enough capacity to hold these bytes, the ciphertext and the tag, no
// allocation and copy operations will be performed. dst and plaintext may
// fully overlap or not at all.
func encrypt(aead cipher.AEAD, dst, plaintext, nonce, aad []byte) ([]byte, error) {
if len(nonce) != NonceSize {
return nil, fmt.Errorf("nonce size must be %d bytes. received: %d", NonceSize, len(nonce))
}
// If we need to allocate an output buffer, we want to include space for
// the tag to avoid forcing the caller to reallocate as well.
dlen := len(dst)
dst, out := sliceForAppend(dst, len(plaintext)+TagSize)
data := out[:len(plaintext)]
copy(data, plaintext) // data may fully overlap plaintext
// Seal appends the ciphertext and the tag to its first argument and
// returns the updated slice. However, sliceForAppend above ensures that
// dst has enough capacity to avoid a reallocation and copy due to the
// append.
dst = aead.Seal(dst[:dlen], nonce, data, aad)
return dst, nil
}
// decrypt is the decryption function for an AEAD crypter, where aead determines
// the type of AEAD crypter, and dst the destination bytes for the decrypted
// ciphertext. The dst buffer may fully overlap with plaintext or not at all.
func decrypt(aead cipher.AEAD, dst, ciphertext, nonce, aad []byte) ([]byte, error) {
if len(nonce) != NonceSize {
return nil, fmt.Errorf("nonce size must be %d bytes. received: %d", NonceSize, len(nonce))
}
// If dst is equal to ciphertext[:0], ciphertext storage is reused.
plaintext, err := aead.Open(dst, nonce, ciphertext, aad)
if err != nil {
return nil, fmt.Errorf("message auth failed: %v", err)
}
return plaintext, nil
} | internal/record/internal/aeadcrypter/common.go | 0.702836 | 0.416619 | common.go | starcoder |
package main
import (
"strconv"
"strings"
)
func Compatible(a, b Type) bool {
if a.Equals(b) || b.Equals(a) {
return true
}
switch a.(type) {
case IntLitType:
switch b.(type) {
case IntLitType, FloatLitType, NumericType:
return true
}
case FloatLitType:
switch b.(type) {
case IntLitType, FloatLitType:
return true
}
case NumericType:
switch b.(type) {
case IntLitType:
return true
}
}
return false
}
type Type interface {
Equals(other Type) bool
IsConcrete() bool
Concrete() ConcreteType
Format(indent int) string
}
type ConcreteType interface {
Type
Metrics() TypeMetrics
// The QBE name of the base, extended or aggregate type corresponding to this type
IRTypeName(c *Compiler) string
// The QBE name of the base type closest to this type, if any
IRBaseTypeName() byte
// Generate code to zero a value of the type
GenZero(c *Compiler, loc Operand)
}
// TypeMetrics stores the size and alignment of a type. If a type's metrics are zero, a value of that type cannot be created.
type TypeMetrics struct {
Size, Align int
}
type NumericType interface {
ConcreteType
Signed() bool
}
type Namespace struct {
Name string
Vars map[string]Type
Typs map[string]*ConcreteType
}
func (ns Namespace) IsConcrete() bool { return false }
func (a Namespace) Equals(other Type) bool { panic("Namespace used as value") }
func (ns Namespace) Concrete() ConcreteType { panic("Namespace used as value") }
func (ns Namespace) Format(indent int) string { panic("Namespace used as value") }
// The type of integral numeric literals
type IntLitType struct{}
func (_ IntLitType) Equals(other Type) bool {
_, ok := other.(IntLitType)
return ok
}
func (_ IntLitType) IsConcrete() bool {
return false
}
func (_ IntLitType) Concrete() ConcreteType {
return TypeI64
}
func (_ IntLitType) Format(indent int) string {
return "integer literal"
}
// The type of decimal numeric literals
type FloatLitType struct{}
func (_ FloatLitType) Equals(other Type) bool {
_, ok := other.(FloatLitType)
return ok
}
func (_ FloatLitType) IsConcrete() bool {
return false
}
func (_ FloatLitType) Concrete() ConcreteType {
return TypeF64
}
func (_ FloatLitType) Format(indent int) string {
return "float literal"
}
type PrimitiveType int
func (a PrimitiveType) Equals(other Type) bool {
b, ok := other.(PrimitiveType)
return ok && a == b
}
func (p PrimitiveType) Signed() bool {
switch p {
case TypeI64, TypeI32, TypeI16, TypeI8:
return true
case TypeU64, TypeU32, TypeU16, TypeU8:
return false
case TypeF64, TypeF32:
return true
case TypeBool:
return false
}
panic("Invalid primitive type")
}
func (t PrimitiveType) IsConcrete() bool {
return true
}
func (t PrimitiveType) Concrete() ConcreteType {
return t
}
func (p PrimitiveType) Metrics() TypeMetrics {
switch p {
case TypeI64, TypeU64:
return TypeMetrics{8, 8}
case TypeI32, TypeU32:
return TypeMetrics{4, 4}
case TypeI16, TypeU16:
return TypeMetrics{2, 2}
case TypeI8, TypeU8, TypeBool:
return TypeMetrics{1, 1}
case TypeF64:
return TypeMetrics{8, 8}
case TypeF32:
return TypeMetrics{4, 4}
}
panic("Invalid primitive type")
}
func (p PrimitiveType) Format(indent int) string {
switch p {
case TypeI64:
return "I64"
case TypeI32:
return "I32"
case TypeI16:
return "I16"
case TypeI8:
return "I8"
case TypeU64:
return "U64"
case TypeU32:
return "U32"
case TypeU16:
return "U16"
case TypeU8:
return "U8"
case TypeF64:
return "F64"
case TypeF32:
return "F32"
case TypeBool:
return "Bool"
}
panic("Invalid primitive type")
}
func (p PrimitiveType) IRTypeName(c *Compiler) string {
switch p {
case TypeI64, TypeU64:
return "l"
case TypeI32, TypeU32:
return "w"
case TypeI16, TypeU16:
return "h"
case TypeI8, TypeU8, TypeBool:
return "b"
case TypeF64:
return "d"
case TypeF32:
return "s"
}
panic("Invalid primitive type")
}
func (p PrimitiveType) IRBaseTypeName() byte {
switch p {
case TypeI64, TypeU64:
return 'l'
case TypeI32, TypeU32, TypeI16, TypeU16, TypeI8, TypeU8, TypeBool:
return 'w'
case TypeF64:
return 'd'
case TypeF32:
return 's'
}
panic("Invalid primitive type")
}
const (
TypeI64 PrimitiveType = iota
TypeI32
TypeI16
TypeI8
TypeU64
TypeU32
TypeU16
TypeU8
TypeF64
TypeF32
TypeBool
)
type PointerType struct {
To ConcreteType
}
func (a PointerType) Equals(other Type) bool {
if b, ok := other.(NamedType); ok {
if b, ok := b.ConcreteType.(PointerType); ok {
return a.To == nil || b.To == nil
}
}
b, ok := other.(PointerType)
// nil To means generic pointer, which is compatible with every pointer type
return ok && (a.To == nil || b.To == nil || a.To.Equals(b.To))
}
func (_ PointerType) Signed() bool {
return false
}
func (_ PointerType) IsConcrete() bool {
return true
}
func (p PointerType) Concrete() ConcreteType {
return p
}
func (_ PointerType) Metrics() TypeMetrics {
return TypeMetrics{8, 8}
}
func (p PointerType) Format(indent int) string {
var t string
if p.To != nil {
t = p.To.Format(indent)
}
return "[" + t + "]"
}
func (_ PointerType) IRTypeName(c *Compiler) string {
return "l"
}
func (_ PointerType) IRBaseTypeName() byte {
return 'l'
}
type ArrayType struct {
Ty ConcreteType
N int
}
func (_ ArrayType) Equals(_ Type) bool { panic("Use of array type") }
func (_ ArrayType) IsConcrete() bool { return true }
func (a ArrayType) Concrete() ConcreteType { return a }
func (a ArrayType) IRBaseTypeName() byte { return 0 }
func (a ArrayType) ptr() PointerType {
return PointerType{a.Ty}
}
func (a ArrayType) Metrics() TypeMetrics {
m := a.Ty.Metrics()
m.Size *= a.N
return m
}
func (a ArrayType) Format(indent int) string {
return "[" + a.Ty.Format(indent) + " " + strconv.Itoa(a.N) + "]"
}
func (a ArrayType) IRTypeName(c *Compiler) string {
return c.CompositeType(CompositeLayout{{a.Ty.IRTypeName(c), a.N}})
}
type FuncType struct {
Var bool // true if the function uses C-style varags
Param []ConcreteType
Ret ConcreteType
}
func (a FuncType) Equals(other Type) bool {
b, ok := other.(FuncType)
if !ok {
return false
}
if a.Ret != b.Ret && !a.Ret.Equals(b.Ret) {
return false
}
if len(a.Param) != len(b.Param) {
return false
}
for i := range a.Param {
if !a.Param[i].Equals(b.Param[i]) {
return false
}
}
return true
}
func (_ FuncType) IsConcrete() bool {
return true
}
func (f FuncType) Concrete() ConcreteType {
return f
}
func (f FuncType) Metrics() TypeMetrics {
return TypeMetrics{}
}
func (f FuncType) Format(indent int) string {
params := make([]string, len(f.Param))
for i, param := range f.Param {
params[i] = param.Format(indent)
}
ret := ""
if f.Ret != nil {
ret = " " + f.Ret.Format(indent)
}
return "fn(" + strings.Join(params, ", ") + ")" + ret
}
func (_ FuncType) IRTypeName(c *Compiler) string {
return ""
}
func (_ FuncType) IRBaseTypeName() byte {
return 0
}
type NamedType struct {
ConcreteType
Name string
}
func (a NamedType) Equals(other Type) bool {
if ap, ok := a.ConcreteType.(PointerType); ok {
return ap.Equals(other)
}
b, ok := other.(NamedType)
return ok && a.Name == b.Name
}
func (a NamedType) Format(indent int) string {
return a.Name
}
type Field struct {
Name string
Ty ConcreteType
}
type compositeType []Field
type StructType struct{ compositeType }
type UnionType struct{ compositeType }
type CompositeType interface {
Field(name string) ConcreteType
Offset(name string) int
}
func (a compositeType) equals(b compositeType) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if !a[i].Ty.Equals(b[i].Ty) {
return false
}
}
return true
}
func (comp compositeType) IsConcrete() bool {
return true
}
func (comp compositeType) format(indent int) string {
b := &strings.Builder{}
b.WriteString("{\n")
for _, field := range comp {
b.WriteByte('\t')
b.WriteString(field.Name)
b.WriteByte(' ')
b.WriteString(field.Ty.Format(indent))
b.WriteByte('\n')
}
b.WriteByte('}')
return b.String()
}
func (comp compositeType) IRBaseTypeName() byte {
return 0
}
func (comp compositeType) Field(name string) ConcreteType {
for _, field := range comp {
if field.Name == name {
return field.Ty
}
}
return nil
}
func (a StructType) Equals(other Type) bool {
b, ok := other.(StructType)
return ok && a.equals(b.compositeType)
}
func (s StructType) Concrete() ConcreteType {
return s
}
func (s StructType) Metrics() (m TypeMetrics) {
m.Size, m.Align = s.metrics("")
return
}
func (s StructType) Format(indent int) string {
return "struct " + s.format(indent)
}
func (s StructType) IRTypeName(c *Compiler) string {
return c.CompositeType(s.layout(c))
}
func (s StructType) layout(c *Compiler) CompositeLayout {
var ent CompositeEntry
var layout CompositeLayout
for _, field := range s.compositeType {
ty := field.Ty.IRTypeName(c)
if ent.N > 0 && ent.Ty != ty {
layout = append(layout, ent)
ent.N = 0
}
ent.Ty = ty
ent.N++
}
if ent.N > 0 {
layout = append(layout, ent)
}
return layout
}
func (s StructType) Offset(name string) int {
if name == "" {
return -1
} else {
off, _ := s.metrics(name)
return off
}
}
func (s StructType) metrics(name string) (off int, align int) {
// This is the internal function behind both Metrics and Offset
// name == "" -> Metrics
// name != "" -> Offset
for _, field := range s.compositeType {
m := field.Ty.Metrics()
off = -(-off & -m.Align) // Align upwards
if field.Name == name {
return
}
off += m.Size
if m.Align > align {
align = m.Align
}
}
if name == "" {
off = -(-off & -align) // Align struct size to max alignment for arrays
return
} else {
return -1, -1
}
}
func (a UnionType) Equals(other Type) bool {
b, ok := other.(UnionType)
return ok && a.equals(b.compositeType)
}
func (u UnionType) Concrete() ConcreteType {
return u
}
func (u UnionType) Metrics() TypeMetrics {
return u.largest().Ty.Metrics()
}
func (u UnionType) Format(indent int) string {
return "union " + u.format(indent)
}
func (u UnionType) IRTypeName(c *Compiler) string {
return c.CompositeType(u.layout(c))
}
func (u UnionType) layout(c *Compiler) CompositeLayout {
return CompositeLayout{{u.largest().Ty.IRTypeName(c), 1}}
}
func (u UnionType) largest() (f Field) {
fs := 0
for _, field := range u.compositeType {
fsiz := field.Ty.Metrics().Size
if fsiz > fs {
f = field
fs = fsiz
}
}
return
}
func (_ UnionType) Offset(name string) int {
return 0
} | types.go | 0.654453 | 0.472318 | types.go | starcoder |
package game
import "tipsy/tools"
const (
//BoardSize the size of the board
BoardSize = 7
)
//Board : the board of tipsy game
type Board struct {
Nodes []Node
Edges []Edge
}
//NewBoard initialize an empty board with obstacles and exits
func NewBoard() Board {
var board Board
initNodes(&board)
initEdges(&board)
return board
}
func initNodes(board *Board) {
obstacles := [][2]int{
{0, 3}, {1, 1}, {1, 5}, {2, 2},
{2, 4}, {3, 0}, {3, 6}, {4, 2},
{4, 4}, {5, 1}, {5, 5}, {6, 3}}
exits := [][2]int{
{1, -1}, {7, 1}, {-1, 5}, {5, 7}}
for i := 0; i < 7; i++ {
for j := 0; j < 7; j++ {
if !tools.ArrayContains(obstacles, []int{i, j}) {
(*board).Nodes = append((*board).Nodes, Node{Position: [2]int{i, j}})
}
}
}
for _, exit := range exits {
(*board).Nodes = append((*board).Nodes, Node{Position: [2]int{exit[0], exit[1]}, Exit: true})
}
}
func initEdges(board *Board) {
for _, node := range board.Nodes {
var rightPosition = [2]int{node.Position[0] + 1, node.Position[1]}
var leftPosition = [2]int{node.Position[0] - 1, node.Position[1]}
var upPosition = [2]int{node.Position[0], node.Position[1] - 1}
var downPosition = [2]int{node.Position[0], node.Position[1] + 1}
addEdge(node, leftPosition, LEFT, board)
addEdge(node, rightPosition, RIGHT, board)
addEdge(node, upPosition, UP, board)
addEdge(node, downPosition, DOWN, board)
}
}
func addEdge(from Node, to [2]int, value string, board *Board) {
if Contains(to, board) {
var to = getNode(to, board)
(*board).Edges = append(board.Edges, Edge{From: from, To: to, Value: value})
}
}
func getNode(position [2]int, board *Board) Node {
for _, node := range board.Nodes {
if node.Position[0] == position[0] && node.Position[1] == position[1] {
return node
}
}
return Node{}
}
func getNodeTo(node Node, board *Board, direction string) Node {
for _, edge := range board.Edges {
if edge.From == node && edge.Value == direction {
return edge.To
}
}
return Node{}
}
// Contains return true if board contains a Node at a given position, and false otherwise.
func Contains(position [2]int, board *Board) bool {
for _, Node := range board.Nodes {
if Node.Position[0] == position[0] && Node.Position[1] == position[1] {
return true
}
}
return false
}
//Get Neighbor in given direction
func GetNeighbor(position [2]int, board *Board, direction string) Node {
puckNode := getNode(position, board)
return getNodeTo(puckNode, board, direction)
}
func isAPuck(node Node, gamePucks map[string]Puck) bool {
for key := range gamePucks {
if tools.GetPositionFromKey(key) == node.Position {
return true
}
}
return false
}
func getPuck(node Node, gamePucks map[string]Puck) Puck {
for key, puck := range gamePucks {
if tools.GetPositionFromKey(key) == node.Position {
return puck
}
}
panic("No Puck on this node")
}
func isAWall(node Node, board *Board) bool {
return (Node{}) == getNode(node.Position, board)
}
func getNextFreeCell(position [2]int, gamePucks map[string]Puck, board *Board, direction string) [2]int {
neighbor := GetNeighbor(position, board, direction)
if isAPuck(neighbor, gamePucks) || isAWall(neighbor, board) {
return position
}
return getNextFreeCell(neighbor.Position, gamePucks, board, direction)
}
func isExit(position [2]int, board *Board) bool {
node := getNode(position, board)
return node.Exit
}
func movePuckTo(puckKey string, currentPuck Puck,
inputGamePucks map[string]Puck, board *Board, direction string) (map[string]Puck, map[string]Puck) {
gamePucks := CloneMap(inputGamePucks)
neighbors := GetNeighbor(tools.GetPositionFromKey(puckKey), board, direction)
var nodesWithPuck []Node
for isAPuck(neighbors, gamePucks) {
nodesWithPuck = append(nodesWithPuck, neighbors)
neighbors = GetNeighbor(neighbors.Position, board, direction)
}
pucks := make(map[string]Puck)
fallenPucks := make(map[string]Puck)
for i := len(nodesWithPuck) - 1; i >= 0; i-- {
nodeWithPuck := nodesWithPuck[i]
nextFreeCell := getNextFreeCell(nodeWithPuck.Position, gamePucks, board, direction)
puck := getPuck(nodeWithPuck, gamePucks)
nextFreeCellKey := tools.GetKeyFromPosition(nextFreeCell)
if nextFreeCell != nodeWithPuck.Position {
if isExit(nextFreeCell, board) {
fallenPucks[tools.GetKeyFromPosition(nodeWithPuck.Position)] = puck
} else {
pucks[nextFreeCellKey] = puck
gamePucks[nextFreeCellKey] = puck
}
delete(gamePucks, tools.GetKeyFromPosition(nodeWithPuck.Position))
}
}
nextFreeCell := getNextFreeCell(tools.GetPositionFromKey(puckKey), gamePucks, board, direction)
if isExit(nextFreeCell, board) {
fallenPucks[puckKey] = currentPuck
} else {
pucks[tools.GetKeyFromPosition(nextFreeCell)] = currentPuck
}
return pucks, fallenPucks
}
//Tilt the game in a given direction
func Tilt(currentGame Game, board *Board, direction string) Game {
gamePucks := make(map[string]Puck)
resultGame := CloneGame(currentGame)
gameFallenPucks := make(map[string]Puck)
for key, puck := range resultGame.Pucks {
movedPucks, fallenPucks := movePuckTo(key, puck, resultGame.Pucks, board, direction)
for key, puck := range movedPucks {
gamePucks[key] = puck
}
for key, puck := range fallenPucks {
puck.Position = key
resultGame.FallenPucks = append(resultGame.FallenPucks, puck)
}
}
resultGame.Pucks = gamePucks
for key, puck := range gameFallenPucks {
puck.Position = key
resultGame.FallenPucks = append(resultGame.FallenPucks, puck)
}
return resultGame
}
func CloneMap(original map[string]Puck) map[string]Puck {
target := make(map[string]Puck)
for key, value := range original {
target[key] = value
}
return target
} | src/tipsy/game/board.go | 0.579638 | 0.683538 | board.go | starcoder |
package common
func MinI(a, b int) int {
if a < b {
return a
}
return b
}
func MaxI(a, b int) int {
if a > b {
return a
}
return b
}
func AbsI(a int) int {
if a < 0 {
return -a
}
return a
}
// DecimalDigits returns a slice of digits representing the different decimal
// positions from most significant to least significant digit
// DecimalDigits(123456) == [1, 2, 3, 4, 5, 6]
func DecimalDigits(value int) []int {
// Turns out doing it in reverse first is easier
digits := DecimalDigitsReverse(value)
// Need to reverse the digits now
reversed := make([]int, len(digits))
for i, digit := range digits {
reversed[len(digits)-i-1] = digit
}
return reversed
}
// DecimalDigitsReverse returns a slice of digits representing the different
// decimal positions from least significant to most significant digit
// DecimalDigitsReverse(123456) == [6, 5, 4, 3, 2, 1]
func DecimalDigitsReverse(value int) []int {
digits := make([]int, 0)
for value > 0 {
digits = append(digits, int(value)%10)
value = value / 10
}
return digits
}
// DecimalDigitsStr converts a string of digits to an int slice of digits
func DecimalDigitsStr(str string) []int {
digits := make([]int, 0)
for _, char := range str {
PanicIf(char < '0' || char > '9', "Character string should only contain integers")
digits = append(digits, int(char-'0'))
}
return digits
}
// GCD finds the greatest common divisor using the Euclidean algorithm
func GCD(a, b int) int {
for b != 0 {
t := b
b = a % b
a = t
}
return a
}
// LCM finds the Least Common Multiple of many integers
func LCM(a, b int, ints ...int) int {
// For a, b ∈ ℕ, a*b = LCM(a, b)*GCD(a, b)
lcm := (a * b) / GCD(a, b)
for _, c := range ints {
lcm = LCM(lcm, c)
}
return lcm
}
// CeilDiv divides the numerator by the denominator and returns the ceiling
func CeilDiv(num, denom int) int {
div := num / denom
mod := num % denom
if mod > 0 {
return div + 1
}
return div
}
func SumInts(ints []int) int {
sum := 0
for _, val := range ints {
sum += val
}
return sum
} | cmd/common/math.go | 0.829768 | 0.493775 | math.go | starcoder |
package turbot
import (
"context"
"fmt"
"strconv"
"github.com/turbot/steampipe-plugin-sdk/grpc/proto"
"github.com/turbot/steampipe-plugin-sdk/plugin"
"github.com/turbot/steampipe-plugin-sdk/plugin/transform"
)
func tableTurbotPolicyValue(ctx context.Context) *plugin.Table {
return &plugin.Table{
Name: "turbot_policy_value",
Description: "Policy value define the value of policy known to Turbot.",
List: &plugin.ListConfig{
Hydrate: listPolicyValue,
KeyColumns: []*plugin.KeyColumn{
{Name: "state", Require: plugin.Optional},
{Name: "policy_type_id", Require: plugin.Optional},
{Name: "resource_id", Require: plugin.Optional},
{Name: "resource_type_id", Require: plugin.Optional},
{Name: "filter", Require: plugin.Optional},
},
},
Columns: []*plugin.Column{
// Top columns
{Name: "id", Type: proto.ColumnType_INT, Transform: transform.FromField("Turbot.ID"), Description: "Unique identifier of the policy value."},
{Name: "policy_type_title", Type: proto.ColumnType_STRING, Transform: transform.FromField("Type.Title"), Description: "Title of the policy type."},
{Name: "poliy_type_trunk_title", Type: proto.ColumnType_STRING, Transform: transform.FromField("Type.Trunk.Title"), Description: "Title with full path of the policy type."},
{Name: "is_default", Type: proto.ColumnType_BOOL, Transform: transform.FromField("Default"), Description: "If true this value is derived from the default value of the type."},
{Name: "is_calculated", Type: proto.ColumnType_BOOL, Description: "If true this value is derived from calculated setting inputs e.g. templateInput and template."},
{Name: "precedence", Type: proto.ColumnType_STRING, Description: "Precedence of the setting: REQUIRED or RECOMMENDED."},
{Name: "resource_id", Type: proto.ColumnType_INT, Transform: transform.FromField("Turbot.ResourceId"), Description: "ID of the resource for the policy value."},
{Name: "resource_trunk_title", Type: proto.ColumnType_STRING, Transform: transform.FromField("Resource.Trunk.Title"), Description: "Full title (including ancestor trunk) of the resource."},
{Name: "resource_type_id", Type: proto.ColumnType_INT, Transform: transform.FromField("Turbot.ResourceTypeID"), Description: "ID of the resource type for this policy setting."},
{Name: "state", Type: proto.ColumnType_STRING, Description: "State of the policy value."},
{Name: "secret_value", Type: proto.ColumnType_STRING, Transform: transform.FromField("SecretValue").Transform(convToString), Description: "Secrect value of the policy value."},
{Name: "value", Type: proto.ColumnType_STRING, Transform: transform.FromField("Value").Transform(convToString), Description: "Value of the policy value."},
{Name: "type_mod_uri", Type: proto.ColumnType_STRING, Transform: transform.FromField("Type.ModURI"), Description: "URI of the mod that contains the policy value."},
// Other columns
{Name: "filter", Type: proto.ColumnType_STRING, Transform: transform.FromQual("filter"), Description: "Filter used for this policy value list."},
{Name: "policy_type_id", Type: proto.ColumnType_INT, Transform: transform.FromField("Turbot.PolicyTypeId"), Description: "ID of the policy type for this policy value."},
{Name: "policy_type_default_template", Type: proto.ColumnType_STRING, Transform: transform.FromField("Type.DefaultTemplate"), Description: "Default template used to calculate template-based policy values. Should be a Jinja based YAML string."},
{Name: "setting_id", Type: proto.ColumnType_INT, Transform: transform.FromField("Turbot.SettingId").Transform(transform.NullIfZeroValue), Description: "Policy setting Id for the policy value."},
{Name: "dependent_controls", Type: proto.ColumnType_JSON, Description: "The controls that depends on this policy value."},
{Name: "dependent_policy_values", Type: proto.ColumnType_JSON, Description: "The policy values that depends on this policy value."},
{Name: "create_timestamp", Type: proto.ColumnType_TIMESTAMP, Transform: transform.FromField("Turbot.CreateTimestamp"), Description: "When the policy value was first set by Turbot. (It may have been created earlier.)"},
{Name: "timestamp", Type: proto.ColumnType_TIMESTAMP, Transform: transform.FromField("Turbot.Timestamp"), Description: "Timestamp when the policy value was last modified (created, updated or deleted)."},
{Name: "update_timestamp", Type: proto.ColumnType_TIMESTAMP, Transform: transform.FromField("Turbot.UpdateTimestamp"), Description: "When the policy value was last updated in Turbot."},
{Name: "version_id", Type: proto.ColumnType_INT, Transform: transform.FromField("Turbot.VersionID"), Description: "Unique identifier for this version of the policy value."},
{Name: "workspace", Type: proto.ColumnType_STRING, Hydrate: plugin.HydrateFunc(getTurbotWorkspace).WithCache(), Transform: transform.FromValue(), Description: "Specifies the workspace URL."},
},
}
}
const (
queryPolicyValueList = `
query MyQuery($filter: [String!], $next_token: String) {
policyValues(filter: $filter, paging: $next_token) {
items {
default
value
state
reason
details
secretValue
isCalculated
precedence
type {
modUri
defaultTemplate
title
trunk {
title
}
}
resource {
trunk {
title
}
}
turbot {
id
policyTypeId
resourceId
resourceTypeId
settingId
createTimestamp
deleteTimestamp
timestamp
updateTimestamp
versionId
}
dependentControls {
items {
turbot {
controlTypeId
controlTypePath
controlCategoryId
controlCategoryPath
id
resourceId
resourceTypeId
}
type {
modUri
title
trunk {
title
}
}
}
}
dependentPolicyValues {
items {
type {
modUri
uri
title
trunk {
title
}
turbot {
id
title
}
}
}
}
}
paging {
next
}
}
}
`
)
func listPolicyValue(ctx context.Context, d *plugin.QueryData, _ *plugin.HydrateData) (interface{}, error) {
conn, err := connect(ctx, d)
if err != nil {
plugin.Logger(ctx).Error("turbot_policy_type.listPolicyType", "connection_error", err)
return nil, err
}
filters := []string{}
quals := d.KeyColumnQuals
filter := ""
if quals["filter"] != nil {
filter = quals["filter"].GetStringValue()
filters = append(filters, filter)
}
// Additional filters
if quals["state"] != nil {
filters = append(filters, fmt.Sprintf("state:%s ", getQualListValues(ctx, quals, "state", "string")))
}
if quals["policy_type_id"] != nil {
filters = append(filters, fmt.Sprintf("policyTypeId:%s policyTypeLevel:self", getQualListValues(ctx, quals, "policy_type_id", "int64")))
}
if quals["resource_id"] != nil {
filters = append(filters, fmt.Sprintf("resourceId:%s resourceTypeLevel:self", getQualListValues(ctx, quals, "resource_id", "int64")))
}
if quals["resource_type_id"] != nil {
filters = append(filters, fmt.Sprintf("resourceTypeId:%s resourceTypeLevel:self", getQualListValues(ctx, quals, "resource_type_id", "int64")))
}
// Setting a high limit and page all results
var pageLimit int64 = 5000
// Adjust page limit, if less than default value
limit := d.QueryContext.Limit
if d.QueryContext.Limit != nil {
if *limit < pageLimit {
pageLimit = *limit
}
}
// Setting page limit
filters = append(filters, fmt.Sprintf("limit:%s", strconv.Itoa(int(pageLimit))))
nextToken := ""
for {
result := &PolicyValuesResponse{}
err = conn.DoRequest(queryPolicyValueList, map[string]interface{}{"filter": filters, "next_token": nextToken}, result)
if err != nil {
plugin.Logger(ctx).Error("turbot_policy_value.listPolicyValue", "query_error", err)
return nil, err
}
for _, r := range result.PolicyValues.Items {
d.StreamListItem(ctx, r)
// Context can be cancelled due to manual cancellation or the limit has been hit
if d.QueryStatus.RowsRemaining(ctx) == 0 {
return nil, nil
}
}
if result.PolicyValues.Paging.Next == "" {
break
}
nextToken = result.PolicyValues.Paging.Next
}
return nil, nil
} | turbot/table_turbot_policy_value.go | 0.63443 | 0.400222 | table_turbot_policy_value.go | starcoder |
package block
import (
"fmt"
"sort"
"strings"
"time"
"github.com/m3db/m3/src/query/models"
)
// Metadata is metadata for a block, describing size and common tags across
// constituent series.
type Metadata struct {
// Bounds represents the time bounds for all series in the block.
Bounds models.Bounds
// Tags contains any tags common across all series in the block.
Tags models.Tags
// ResultMetadata contains metadata from any database access operations during
// fetching block details.
ResultMetadata ResultMetadata
}
// Equals returns a boolean reporting whether the compared metadata has equal
// fields.
func (m Metadata) Equals(other Metadata) bool {
return m.Tags.Equals(other.Tags) && m.Bounds.Equals(other.Bounds)
}
// String returns a string representation of metadata.
func (m Metadata) String() string {
return fmt.Sprintf("Bounds: %v, Tags: %v", m.Bounds, m.Tags)
}
// Warnings is a slice of warnings.
type Warnings []Warning
// ResultMetricMetadata describes metadata on a per metric-name basis.
type ResultMetricMetadata struct {
// NoSamples is the total number of series that were fetched to compute
// this result but had no samples.
NoSamples int
// WithSamples is the total number of series that were fetched to compute
// this result and had samples.
WithSamples int
// Aggregated is the total number of aggregated series that were fetched to
// compute this result.
Aggregated int
// Unaggregated is the total number of unaggregated series that were fetched to
// compute this result.
Unaggregated int
}
// Equals determines if two result metric metadatas are equal.
func (m ResultMetricMetadata) Equals(other ResultMetricMetadata) bool {
if m.NoSamples != other.NoSamples {
return false
}
if m.WithSamples != other.WithSamples {
return false
}
if m.Aggregated != other.Aggregated {
return false
}
if m.Unaggregated != other.Unaggregated {
return false
}
return true
}
// Merge takes another ResultMetricMetadata and merges it into this one.
func (m *ResultMetricMetadata) Merge(other ResultMetricMetadata) {
m.NoSamples += other.NoSamples
m.WithSamples += other.WithSamples
m.Aggregated += other.Aggregated
m.Unaggregated += other.Unaggregated
}
func mergeMetricMetadataMaps(dst, src map[string]*ResultMetricMetadata) {
for name, other := range src {
m, ok := dst[name]
if !ok {
dst[name] = other
} else {
m.Merge(*other)
}
}
}
// ResultMetadata describes metadata common to each type of query results,
// indicating any additional information about the result.
type ResultMetadata struct {
// Namespaces are the set of namespaces queried.
// External users must access via `AddNamespace`
namespaces map[string]struct{}
// FetchedResponses is the number of M3 RPC fetch responses received.
FetchedResponses int
// FetchedBytesEstimate is the estimated number of bytes fetched.
FetchedBytesEstimate int
// LocalOnly indicates that this query was executed only on the local store.
LocalOnly bool
// Exhaustive indicates whether the underlying data set presents a full
// collection of retrieved data.
Exhaustive bool
// Warnings is a list of warnings that indicate potentially partial or
// incomplete results.
Warnings Warnings
// Resolutions is a list of resolutions for series obtained by this query.
Resolutions []time.Duration
// KeepNaNs indicates if NaNs should be kept when returning results.
KeepNaNs bool
// WaitedIndex counts how many times index querying had to wait for permits.
WaitedIndex int
// WaitedSeriesRead counts how many times series being read had to wait for permits.
WaitedSeriesRead int
// FetchedSeriesCount is the total number of series that were fetched to compute
// this result.
FetchedSeriesCount int
// FetchedMetadataCount is the total amount of metadata that was fetched to compute
// this result.
FetchedMetadataCount int
// MetricNames is the set of unique metric tag name values across all series in this result.
// External users must access via `ByName(name)`.
metadataByName map[string]*ResultMetricMetadata
}
// AddNamespace adds a namespace to the namespace set, initializing the underlying map if necessary.
func (m *ResultMetadata) AddNamespace(namespace string) {
if m.namespaces == nil {
m.namespaces = make(map[string]struct{})
}
m.namespaces[namespace] = struct{}{}
}
// GetNamespaces returns an array representing the set of namespaces added via AddNamespace.
func (m ResultMetadata) GetNamespaces() []string {
if m.namespaces == nil {
return []string{}
}
namespaces := []string{}
for n := range m.namespaces {
namespaces = append(namespaces, n)
}
sort.Strings(namespaces)
return namespaces
}
// ByName returns the ResultMetricMetadata for a given metric name.
func (m *ResultMetadata) ByName(nameTag []byte) *ResultMetricMetadata {
if m.metadataByName == nil {
m.metadataByName = make(map[string]*ResultMetricMetadata)
}
r, ok := m.metadataByName[string(nameTag)]
if ok {
return r
}
r = &ResultMetricMetadata{}
m.metadataByName[string(nameTag)] = r
return r
}
// MetadataByNameMerged returns the metadataByName map values merged into one.
func (m ResultMetadata) MetadataByNameMerged() ResultMetricMetadata {
r := ResultMetricMetadata{}
for _, m := range m.metadataByName {
r.Merge(*m)
}
return r
}
// TopMetadataByName returns the top `max` ResultMetricMetadatas by the sum of their
// contained counters.
func (m ResultMetadata) TopMetadataByName(max int) map[string]*ResultMetricMetadata {
if len(m.metadataByName) <= max {
return m.metadataByName
}
keys := []string{}
for k := range m.metadataByName {
keys = append(keys, k)
}
sort.SliceStable(keys, func(i, j int) bool {
a := m.metadataByName[keys[i]]
b := m.metadataByName[keys[j]]
n := a.Aggregated + a.Unaggregated + a.NoSamples + a.WithSamples
m := b.Aggregated + b.Unaggregated + b.NoSamples + b.WithSamples
// Sort in descending order
return n > m
})
top := make(map[string]*ResultMetricMetadata, max)
for i := 0; i < max; i++ {
k := keys[i]
top[k] = m.metadataByName[k]
}
return top
}
// NewResultMetadata creates a new result metadata.
func NewResultMetadata() ResultMetadata {
return ResultMetadata{
LocalOnly: true,
Exhaustive: true,
}
}
func combineResolutions(a, b []time.Duration) []time.Duration {
if len(a) == 0 {
if len(b) != 0 {
return b
}
} else {
if len(b) == 0 {
return a
}
combined := make([]time.Duration, 0, len(a)+len(b))
combined = append(combined, a...)
combined = append(combined, b...)
return combined
}
return nil
}
func combineWarnings(a, b Warnings) Warnings {
if len(a) == 0 {
if len(b) != 0 {
return b
}
} else {
if len(b) == 0 {
return a
}
combinedWarnings := make(Warnings, 0, len(a)+len(b))
combinedWarnings = append(combinedWarnings, a...)
return combinedWarnings.addWarnings(b...)
}
return nil
}
func combineNamespaces(a, b map[string]struct{}) map[string]struct{} {
if a == nil {
return b
}
if b == nil {
return a
}
merged := make(map[string]struct{})
for n := range a {
merged[n] = struct{}{}
}
for n := range b {
merged[n] = struct{}{}
}
return merged
}
func combineMetricMetadata(a, b map[string]*ResultMetricMetadata) map[string]*ResultMetricMetadata {
if a == nil && b == nil {
return nil
}
merged := make(map[string]*ResultMetricMetadata)
mergeMetricMetadataMaps(merged, a)
mergeMetricMetadataMaps(merged, b)
return merged
}
// Equals determines if two result metadatas are equal.
func (m ResultMetadata) Equals(n ResultMetadata) bool {
if m.Exhaustive && !n.Exhaustive || !m.Exhaustive && n.Exhaustive {
return false
}
if m.LocalOnly && !n.LocalOnly || !m.LocalOnly && n.LocalOnly {
return false
}
if len(m.Resolutions) != len(n.Resolutions) {
return false
}
for i, mRes := range m.Resolutions {
if n.Resolutions[i] != mRes {
return false
}
}
for i, mWarn := range m.Warnings {
if !n.Warnings[i].equals(mWarn) {
return false
}
}
if m.WaitedIndex != n.WaitedIndex {
return false
}
if m.WaitedSeriesRead != n.WaitedSeriesRead {
return false
}
if m.FetchedSeriesCount != n.FetchedSeriesCount {
return false
}
if !m.MetadataByNameMerged().Equals(n.MetadataByNameMerged()) {
return false
}
return m.FetchedMetadataCount == n.FetchedMetadataCount
}
// CombineMetadata combines two result metadatas.
func (m ResultMetadata) CombineMetadata(other ResultMetadata) ResultMetadata {
return ResultMetadata{
namespaces: combineNamespaces(m.namespaces, other.namespaces),
FetchedResponses: m.FetchedResponses + other.FetchedResponses,
FetchedBytesEstimate: m.FetchedBytesEstimate + other.FetchedBytesEstimate,
LocalOnly: m.LocalOnly && other.LocalOnly,
Exhaustive: m.Exhaustive && other.Exhaustive,
Warnings: combineWarnings(m.Warnings, other.Warnings),
Resolutions: combineResolutions(m.Resolutions, other.Resolutions),
WaitedIndex: m.WaitedIndex + other.WaitedIndex,
WaitedSeriesRead: m.WaitedSeriesRead + other.WaitedSeriesRead,
FetchedSeriesCount: m.FetchedSeriesCount + other.FetchedSeriesCount,
metadataByName: combineMetricMetadata(m.metadataByName, other.metadataByName),
FetchedMetadataCount: m.FetchedMetadataCount + other.FetchedMetadataCount,
}
}
// IsDefault returns true if this result metadata matches the unchanged default.
func (m ResultMetadata) IsDefault() bool {
return m.Exhaustive && m.LocalOnly && len(m.Warnings) == 0
}
// VerifyTemporalRange will verify that each resolution seen is below the
// given step size, adding warning headers if it is not.
func (m *ResultMetadata) VerifyTemporalRange(step time.Duration) {
// NB: this map is unlikely to have more than 2 elements in real execution,
// since these correspond to namespace count.
invalidResolutions := make(map[time.Duration]struct{}, 10)
for _, res := range m.Resolutions {
if res > step {
invalidResolutions[res] = struct{}{}
}
}
if len(invalidResolutions) > 0 {
warnings := make([]string, 0, len(invalidResolutions))
for k := range invalidResolutions {
warnings = append(warnings, fmt.Sprintf("%v", k))
}
sort.Strings(warnings)
warning := fmt.Sprintf("range: %v, resolutions: %s",
step, strings.Join(warnings, ", "))
m.AddWarning("resolution larger than query range", warning)
}
}
// AddWarning adds a warning to the result metadata.
// NB: warnings are expected to be small in general, so it's better to iterate
// over the array rather than introduce a map.
func (m *ResultMetadata) AddWarning(name string, message string) {
m.Warnings = m.Warnings.addWarnings(Warning{
Name: name,
Message: message,
})
}
// AddWarnings adds several warnings to the result metadata.
func (m *ResultMetadata) AddWarnings(warnings ...Warning) {
m.Warnings = m.Warnings.addWarnings(warnings...)
}
// NB: this is not a very efficient merge but this is extremely unlikely to be
// merging more than 5 or 6 total warnings.
func (w Warnings) addWarnings(warnings ...Warning) Warnings {
for _, newWarning := range warnings {
found := false
for _, warning := range w {
if warning.equals(newWarning) {
found = true
break
}
}
if !found {
w = append(w, newWarning)
}
}
return w
}
// WarningStrings converts warnings to a slice of strings for presentation.
func (m ResultMetadata) WarningStrings() []string {
size := len(m.Warnings)
if !m.Exhaustive {
size++
}
strs := make([]string, 0, size)
for _, warn := range m.Warnings {
strs = append(strs, warn.Header())
}
if !m.Exhaustive {
strs = append(strs, "m3db exceeded query limit: results not exhaustive")
}
return strs
}
// Warning is a message that indicates potential partial or incomplete results.
type Warning struct {
// Name is the name of the store originating the warning.
Name string
// Message is the content of the warning message.
Message string
}
// Header formats the warning into a format to send in a response header.
func (w Warning) Header() string {
return fmt.Sprintf("%s_%s", w.Name, w.Message)
}
func (w Warning) equals(warning Warning) bool {
return w.Name == warning.Name && w.Message == warning.Message
} | src/query/block/meta.go | 0.849129 | 0.421433 | meta.go | starcoder |
package indicators
// Sma calculates simple moving average of a slice for a certain
// number of time periods.
func (slice mfloat) SMA(period int) []float64 {
var smaSlice []float64
for i := period; i <= len(slice); i++ {
smaSlice = append(smaSlice, Sum(slice[i-period:i])/float64(period))
}
return smaSlice
}
// Ema calculates exponential moving average of a slice for a certain
// number of tiSmame periods.
func (slice mfloat) EMA(period int) []float64 {
var emaSlice []float64
ak := period + 1
k := float64(2) / float64(ak)
emaSlice = append(emaSlice, slice[0])
for i := 1; i < len(slice); i++ {
emaSlice = append(emaSlice, (slice[i]*float64(k)) + (emaSlice[i-1]*float64(1-k)))
}
return emaSlice
}
// BollingerBands returns upper band, lower band and simple moving
// average of a slice.
func BollingerBands(slice mfloat, period int, nStd float64) ([]float64, []float64, []float64) {
var upperBand, lowerBand, middleBand mfloat
middleBand = slice.SMA(period)
std := Std(middleBand)
upperBand = middleBand.AddToAll(std * nStd)
lowerBand = middleBand.AddToAll(-1.0 * std * nStd)
return middleBand, upperBand, lowerBand
}
// MACD stands for moving average convergence divergence.
func MACD(data mfloat, ema ...int) ([]float64, []float64) {
var macd, ema1, ema2, ema3 mfloat
if len(ema) < 3 {
ema = []int{12, 26, 9}
}
ema1 = data.EMA(ema[0])
ema2 = data.EMA(ema[1])
macd = SubSlices(ema1, ema2)
ema3 = macd.EMA(ema[2])
return macd, ema3
}
// OBV means On Balance Volume.
func OBV(priceData, volumeData mfloat) []float64 {
obv := []float64{volumeData[0]}
for i, vol := range volumeData[1:] {
if priceData[i] > priceData[i-1] {
obv = append(obv, obv[i-1]+vol)
} else if priceData[i] < priceData[i-1] {
obv = append(obv, obv[i-1]-vol)
} else {
obv = append(obv, obv[i-1])
}
}
return obv
}
// Ichimoku Cloud.
func IchimokuCloud(priceData, lowData, highData mfloat, configs []int) ([]float64, []float64, []float64,[]float64, []float64) {
var conversionLine, baseLine, leadSpanA, leadSpanB, lagSpan []float64
conversionLine = DivSlice(SubSlices(highData.SMA(9), lowData.SMA(9)),2)
baseLine = DivSlice(SubSlices(highData.SMA(26), lowData.SMA(26)),2)
leadSpanA = DivSlice(AddSlices(conversionLine, baseLine),2)
leadSpanB = DivSlice(SubSlices(highData.SMA(52), lowData.SMA(52)),2)
lagSpan = priceData[0:len(priceData)-26]
return conversionLine, baseLine, leadSpanA, leadSpanB, lagSpan
} | indicators.go | 0.856302 | 0.706773 | indicators.go | starcoder |
package cpu
import (
"os"
log "pajalic.go.emulator/packages/logger"
)
/*
PC - Program counter, where in memory(address) the processor should read from
SP - In 8086, the main "stack register" is called stack pointer. Tracks the operations of the stack and
stores address of the last program request.
F - 8Bit register to store flags, indicates outcome of last operation performed
--- z zero flag, is set if result of operation is zero
--- n subtraction flag, is set if the last operation was a subtraction
--- h half carry flag, carrying result of lower 4 bits
--- c carry flag, When the result of an 8-bit addition is higher than $FF.
When the result of a 16-bit addition is higher than $FFFF.
When the result of a subtraction or comparison is lower than zero (like in Z80 and 80x86 CPUs, but unlike in 65XX and ARM CPUs).
When a rotate/shift operation shifts out a “1” bit.
A - 8bit register , can be combined with F to store 16bits - Accumulator, to contain values or
store results. Can be shifted with a one byte instruction, can be complemented, adjusted, negated with
single byte instruction. Number you want to add should be in A and other register, result should be in A.
B - 8bit register , can be combined with C to store 16bits, Generally used as a counter whe moving data, can be used for operations
C - 8bit register , can be combined with B to store 16bits, Generally used as a counter whe moving data, can be used for operations
D - 8bit register , can be combined with E to store 16bits, Generally used with E to store 16 bit destination
addresses when moving data. Can be used to other operations.
E - 8bit register , can be combined with D to store 16bits, Generally used with D to store 16 bit destination
addresses when moving data. Can be used to other operations.
H - 8bit register , can be combined with L to store 16bits, Special registers, used as pair with HL for indirect addressing,
instead of specifying an address in an operation you can use HL as the destination
L - 8bit register , can be combined with H to store 16bits Special registers, used as pair with HL for indirect addressing,
instead of specifying an address in an operation you can use HL as the destination
*/
type CpuRegisters struct {
a byte
f byte
b byte
c byte
d byte
e byte
h byte
l byte
pc uint16
sp uint16
}
type CpuContext struct {
Regs CpuRegisters
//Current fetch
FetchedData uint16
MemDest uint16
DestIsMem bool
CurOpCode byte
currentInst *Instruction
Halted bool
Stepping bool
IntMasterEnabled bool
enablingIme bool
IERegister byte
IntFlags byte
}
var CpuCtx CpuContext
func CpuInit() {
CpuCtx.Regs.pc = 0x100
CpuCtx.Halted = false
CpuCtx.Regs.sp = 0xFFFE
CpuCtx.Regs.f = 0xB0
CpuCtx.Regs.a = 0x01
CpuCtx.Regs.c = 0x13
CpuCtx.Regs.b = 0x00
CpuCtx.Regs.e = 0xD8
CpuCtx.Regs.d = 0x00
CpuCtx.Regs.l = 0x4D
CpuCtx.Regs.h = 0x01
CpuCtx.IERegister = 0
CpuCtx.IntFlags = 0
CpuCtx.IntMasterEnabled = false
CpuCtx.enablingIme = false
GetTimerContext().div = 0xABCC
InitProcessors()
}
func fetchInstruction() {
CpuCtx.CurOpCode = BusRead(CpuCtx.Regs.pc)
CpuCtx.Regs.pc++
CpuCtx.currentInst = instructionByOpcode(CpuCtx.CurOpCode)
}
func execute() {
var proc = InstGetProccessor(CpuCtx.currentInst.Type)
if proc == nil {
log.Warn("No processor for this execution!")
return
}
proc(&CpuCtx)
}
func CpuStep() bool {
if !CpuCtx.Halted {
pc := CpuCtx.Regs.pc
fetchInstruction()
EmuCycles(1)
FetchData()
var z = "-"
var n = "-"
var h = "-"
var c = "-"
if (CpuCtx.Regs.f & (1 << 7)) >= 1 {
z = "Z"
}
if CpuCtx.Regs.f&(1<<6) >= 1 {
n = "N"
}
if CpuCtx.Regs.f&(1<<5) >= 1 {
h = "H"
}
if CpuCtx.Regs.f&(1<<4) >= 1 {
c = "C"
}
var inst string
instToStr(&CpuCtx, &inst)
temp := GetEmuContext().Ticks
log.Info("%08X - %04X: %-12s (%02X %02X %02X) A: %02X F: %s%s%s%s BC: %02X%02X DE: %02X%02X HL: %02X%02X\n",
temp,
pc, inst, CpuCtx.CurOpCode,
BusRead(pc+1), BusRead(pc+2), CpuCtx.Regs.a, z, n, h, c, CpuCtx.Regs.b, CpuCtx.Regs.c,
CpuCtx.Regs.d, CpuCtx.Regs.e, CpuCtx.Regs.h, CpuCtx.Regs.l)
if CpuCtx.currentInst == nil {
log.Warn("Unknown instruction! %02X\n", CpuCtx.CurOpCode)
os.Exit(1)
}
DbgUpdate()
if !DbgPrint() {
return false
}
execute()
} else {
EmuCycles(1)
if CpuCtx.IntFlags == 1 {
CpuCtx.Halted = false
}
}
if CpuCtx.IntMasterEnabled {
CpuHandleInterrupts(&CpuCtx)
CpuCtx.enablingIme = false
}
if CpuCtx.enablingIme {
CpuCtx.IntMasterEnabled = true
}
return true
}
func CpuGetIERegister() byte {
return CpuCtx.IERegister
}
//Interupt enable register
func CpuSetIERegister(n byte) {
CpuCtx.IERegister = n
}
func CpuRequestInterrupt(t InterruptType) {
CpuCtx.IntFlags |= byte(t)
} | packages/cpu/cpu.go | 0.516352 | 0.463991 | cpu.go | starcoder |
package adapter
import (
"time"
rpc "github.com/googleapis/googleapis/google/rpc"
)
type (
// QuotasAspect handles quotas and rate limits within Mixer.
QuotasAspect interface {
Aspect
// Alloc allocates the specified amount or fails when not available.
Alloc(QuotaArgsLegacy) (QuotaResultLegacy, error)
// AllocBestEffort allocates from 0 to the specified amount, based on availability.
AllocBestEffort(QuotaArgsLegacy) (QuotaResultLegacy, error)
// ReleaseBestEffort releases from 0 to the specified amount, based on current usage.
ReleaseBestEffort(QuotaArgsLegacy) (int64, error)
}
// QuotasBuilder builds new instances of the Quota aspect.
QuotasBuilder interface {
Builder
// NewQuotasAspect returns a new instance of the Quota aspect.
NewQuotasAspect(env Env, c Config, quotas map[string]*QuotaDefinition) (QuotasAspect, error)
}
// QuotaDefinition is used to describe an individual quota that the aspect will encounter at runtime.
QuotaDefinition struct {
// Name of this quota definition.
Name string
// DisplayName is an optional user-friendly name for this quota.
DisplayName string
// Description is an optional user-friendly description for this quota.
Description string
// MaxAmount defines the upper limit for the quota
MaxAmount int64
// Expiration determines the size of rolling window. A value of 0 means no rolling window,
// allocated quota remains allocated until explicitly released.
Expiration time.Duration
// Labels are the names of keys for dimensional data that will
// be generated at runtime and passed along with quota values.
Labels map[string]LabelType
}
// QuotaArgsLegacy supplies the arguments for quota operations.
QuotaArgsLegacy struct {
// The metadata describing the quota.
Definition *QuotaDefinition
// DeduplicationID is used for deduplicating quota allocation/free calls in the case of
// failed RPCs and retries. This should be a UUID per call, where the same
// UUID is used for retries of the same quota allocation or release call.
DeduplicationID string
// The amount of quota being allocated or released.
QuotaAmount int64
// Labels determine the identity of the quota cell.
Labels map[string]interface{}
}
// QuotaArgs supplies the arguments for quota operations.
QuotaArgs struct {
// DeduplicationID is used for deduplicating quota allocation/free calls in the case of
// failed RPCs and retries. This should be a UUID per call, where the same
// UUID is used for retries of the same quota allocation or release call.
DeduplicationID string
// The amount of quota being allocated or released.
QuotaAmount int64
// If true, allows a response to return less quota than requested. When
// false, the exact requested amount is returned or 0 if not enough quota
// was available.
BestEffort bool
}
// QuotaResultLegacy provides return values from quota allocation calls
QuotaResultLegacy struct {
// The amount of time until which the returned quota expires, this is 0 for non-expiring quotas.
Expiration time.Duration
// The total amount of quota returned, may be less than requested.
Amount int64
}
// QuotaResult provides return values from quota allocation calls on the handler
QuotaResult struct {
// The outcome status of the operation.
Status rpc.Status
// The amount of time until which the returned quota expires, this is 0 for non-expiring quotas.
ValidDuration time.Duration
// The total amount of quota returned, may be less than requested.
Amount int64
}
)
// GetStatus gets status embedded in the result.
func (r QuotaResult) GetStatus() rpc.Status { return r.Status }
// SetStatus embeds status in result.
func (r *QuotaResult) SetStatus(s rpc.Status) { r.Status = s } | mixer/pkg/adapter/quotas.go | 0.748812 | 0.500854 | quotas.go | starcoder |
package mahjong
import sort2 "sort"
type Tile struct {
TileType
Id int8
}
func (tile Tile) IsRed() bool {
return tile.Id == 0 && (tile.TileType == Dots5 || tile.TileType == Bamboo5 || tile.TileType == Characters5)
}
func (tileType TileType) IsSuit() bool {
if tileType > Characters9 || tileType < Dots1 {
return false
}
return true
}
func (tileType TileType) IsDots() bool {
if tileType > Dots9 || tileType < Dots1 {
return false
}
return true
}
func (tileType TileType) IsBamboo() bool {
if tileType > Bamboo9 || tileType < Bamboo1 {
return false
}
return true
}
func (tileType TileType) IsCharacter() bool {
if tileType > Characters9 || tileType < Characters1 {
return false
}
return true
}
func (tileType TileType) Suit() Suit {
return Suit(tileType-1) / 9
}
func (tileType TileType) SameSuit(other TileType) bool {
if !tileType.IsSuit() || tileType.Suit() != other.Suit() {
return false
}
return true
}
func (tileType TileType) IsHonor() bool {
if tileType > Red || tileType < East {
return false
}
return true
}
func (tileType TileType) IsWind() bool {
if tileType > North || tileType < East {
return false
}
return true
}
func (tileType TileType) IsActiveWind(w1 FieldWind) bool {
if !tileType.IsWind() {
return false
}
wind := tileType.Wind()
return wind == w1
}
func (tileType TileType) Wind() FieldWind {
return FieldWind(tileType - East)
}
func (tileType TileType) IsDragon() bool {
if tileType > Red || tileType < White {
return false
}
return true
}
func (tileType TileType) IsTerminals() bool {
if tileType != Bamboo1 && tileType != Bamboo9 && tileType != Dots1 &&
tileType != Dots9 && tileType != Characters1 && tileType != Characters9 {
return false
}
return true
}
func (tileType TileType) IsGreen() bool {
if tileType != Bamboo2 && tileType != Bamboo3 && tileType != Bamboo4 &&
tileType != Bamboo6 && tileType != Bamboo8 && tileType != Green {
return false
}
return true
}
func (tileType TileType) Number() int8 {
n := int8(tileType % 9)
if n == 0 {
return 9
}
return n
}
func (tileType TileType) IsYaochu() bool {
return !((tileType > Dots1 && tileType < Dots9) ||
(tileType > Bamboo1 && tileType < Bamboo9) ||
(tileType > Characters1 && tileType < Characters9))
}
func SortTileTypes(tileTypes []TileType) []TileType {
sort2.Slice(tileTypes, func(i, j int) bool { return tileTypes[i] < tileTypes[j] })
return tileTypes
}
func SortTiles(tiles []Tile) []Tile {
sort2.Slice(tiles, func(i, j int) bool {
return uint8(tiles[i].Id)+uint8(tiles[i].TileType)*4 < uint8(tiles[j].Id)+uint8(tiles[j].TileType)*4
})
return tiles
}
func split3Suits(tiles []Tile) [3][]Tile {
dots := make([]Tile, 0)
bamboo := make([]Tile, 0)
character := make([]Tile, 0)
for _, tile := range tiles {
if tile.IsDots() {
dots = append(dots, tile)
} else if tile.IsBamboo() {
bamboo = append(bamboo, tile)
} else if tile.IsCharacter() {
character = append(character, tile)
}
}
return [3][]Tile{dots, bamboo, character}
}
func split3SuitsType(tileTypes []TileType) [3][]TileType {
dots := make([]TileType, 0)
bamboo := make([]TileType, 0)
character := make([]TileType, 0)
for _, tileType := range tileTypes {
if tileType.IsDots() {
dots = append(dots, tileType)
} else if tileType.IsBamboo() {
bamboo = append(bamboo, tileType)
} else if tileType.IsCharacter() {
character = append(character, tileType)
}
}
return [3][]TileType{dots, bamboo, character}
}
func for2Tile(tiles []Tile) []TilesXY {
var a Tile
var b Tile
ab := make([]TilesXY, 0)
for i := 0; i < len(tiles); i++ {
a = tiles[i]
for j := i + 1; j < len(tiles); j++ {
b = tiles[j]
ab = append(ab, TilesXY{a, b})
}
}
return ab
}
func toTileTypes(tiles []Tile) []TileType {
tts := make([]TileType, 0)
for _, tile := range tiles {
tts = append(tts, tile.TileType)
}
return tts
}
func toSampleTiles(tileTypes []TileType) []Tile {
tiles := make([]Tile, 0)
for _, tileType := range tileTypes {
tiles = append(tiles, Tile{TileType: tileType})
}
return tiles
}
func ToSampleTiles(tileTypes []TileType) []Tile {
return toSampleTiles(tileTypes)
}
func findTileType(tileType TileType, xyz [3]TileType) (int, TileType) {
for i, t := range xyz {
if t == tileType {
return i, t
}
}
return -1, None
}
type TilesXX [2]Tile
type TilesXXX [3]Tile
type TilesXY [2]Tile
type TilesXYZ [3]Tile
type Sequential struct {
TilesXYZ
Concealed bool
}
func (sequential *Sequential) ToTileType() []TileType {
tt := make([]TileType, len(sequential.TilesXYZ))
for i, tile := range sequential.TilesXYZ {
tt[i] = tile.TileType
}
return tt
}
type Triplet struct {
TilesXXX
Concealed bool
}
func (triplet *Triplet) ToTileType() []TileType {
tt := make([]TileType, len(triplet.TilesXXX))
for i, tile := range triplet.TilesXXX {
tt[i] = tile.TileType
}
return tt
}
type Quad struct {
TilesXXXX TileType
Concealed bool
Jun
}
type DiscardTile struct {
Tile
Jun
TsumoGiri bool
}
type FuuroType int8
const (
MinKo FuuroType = iota
AnKo
MinKan
AnKan
ShunTsu
)
type Suit int8
const (
DotsSuit Suit = iota
BambooSuit
CharacterSuit
)
type TileType int8
var Yaochu = []TileType{Dots1, Dots9, Bamboo1, Bamboo9, Characters1, Characters9, East, South, West, North, White, Green, Red}
const (
None TileType = iota
Dots1
Dots2
Dots3
Dots4
Dots5
Dots6
Dots7
Dots8
Dots9
Bamboo1
Bamboo2
Bamboo3
Bamboo4
Bamboo5
Bamboo6
Bamboo7
Bamboo8
Bamboo9
Characters1
Characters2
Characters3
Characters4
Characters5
Characters6
Characters7
Characters8
Characters9
East
South
West
North
White
Green
Red
PlumBlossom
Orchid
Chrysanthemum
Bamboo
Spring
Summer
Autumn
Winter
)
var TilesName = map[TileType]string{
Dots1: "1p", //🀙
Dots2: "2p", //🀚
Dots3: "3p", //🀛
Dots4: "4p", //🀜
Dots5: "5p", //🀝
Dots6: "6p", //🀞
Dots7: "7p", //🀟
Dots8: "8p", //🀠
Dots9: "9p", //🀡
Bamboo1: "1s", //🀐
Bamboo2: "2s", //🀑
Bamboo3: "3s", //🀒
Bamboo4: "4s", //🀓
Bamboo5: "5s", //🀔
Bamboo6: "6s", //🀕
Bamboo7: "7s", //🀖
Bamboo8: "8s", //🀗
Bamboo9: "9s", //🀘
Characters1: "1m", //🀇
Characters2: "2m", //🀈
Characters3: "3m", //🀉
Characters4: "4m", //🀊
Characters5: "5m", //🀋
Characters6: "6m", //🀌
Characters7: "7m", //🀍
Characters8: "8m", //🀎
Characters9: "9m", //🀏
East: "1z", //🀀
South: "2z", //🀁
West: "3z", //🀂
North: "4z", //🀃
White: "5z", //🀆
Green: "6z", //🀅
Red: "7z", //🀄
} | tiles.go | 0.551332 | 0.49048 | tiles.go | starcoder |
package lib
// heuristics.go provides a number of heuristics to order the edges by. The goal is to potentially speed up the
// computation of hypergraph decompositions
import (
"math"
"math/rand"
"sort"
"time"
)
// GetMSCOrder produces the Maximal Cardinality Search Ordering.
// Implementation is based det-k-decomp of Samer and Gottlob '09
func GetMSCOrder(edges Edges) Edges {
rand.Seed(time.Now().UTC().UnixNano())
if edges.Len() <= 1 {
return edges
}
var selected []Edge
chosen := make([]bool, edges.Len())
//randomly select last edge in the ordering
i := rand.Intn(edges.Len())
chosen[i] = true
selected = append(selected, edges.Slice()[i])
for len(selected) < edges.Len() {
var candidates []int
maxcard := 0
for current := range edges.Slice() {
currentCard := edges.Slice()[current].numNeighboursOrder(edges, chosen)
if !chosen[current] && currentCard >= maxcard {
if currentCard > maxcard {
candidates = []int{}
maxcard = currentCard
}
candidates = append(candidates, current)
}
}
//randomly select one of the edges with equal connectivity
nextInOrder := candidates[rand.Intn(len(candidates))]
selected = append(selected, edges.Slice()[nextInOrder])
chosen[nextInOrder] = true
}
return NewEdges(selected)
}
// GetMaxSepOrder orders the edges by how much they increase shortest paths within the hypergraph, using basic Floyd-Warschall (using the primal graph)
func GetMaxSepOrder(edges Edges) Edges {
if edges.Len() <= 1 {
return edges
}
vertices := edges.Vertices()
weights := make([]int, edges.Len())
initialDiff, order := getMinDistances(vertices, edges)
for i, e := range edges.Slice() {
edgesWihoutE := diffEdges(edges, e)
newDiff, _ := getMinDistances(vertices, edgesWihoutE)
newDiffPrep := addEdgeDistances(order, newDiff, e)
weights[i] = diffDistances(initialDiff, newDiffPrep)
}
sort.Slice(edges.Slice(), func(i, j int) bool { return weights[i] > weights[j] })
return edges
}
func order(a, b int) (int, int) {
if a < b {
return a, b
}
return b, a
}
func isInf(a int) bool {
return a == math.MaxInt64
}
func addEdgeDistances(order map[int]int, output [][]int, e Edge) [][]int {
for _, n := range e.Vertices {
for _, m := range e.Vertices {
nIndex, _ := order[n]
mIndex, _ := order[m]
if nIndex != mIndex {
output[nIndex][mIndex] = 1
}
}
}
return output
}
func getMinDistances(vertices []int, edges Edges) ([][]int, map[int]int) {
var output [][]int
order := make(map[int]int)
for i, n := range vertices {
order[n] = i
}
row := make([]int, len(vertices))
for j := 0; j < len(vertices); j++ {
row[j] = math.MaxInt64
}
for j := 0; j < len(vertices); j++ {
newRow := make([]int, len(vertices))
copy(newRow, row)
output = append(output, newRow)
}
for _, e := range edges.Slice() {
output = addEdgeDistances(order, output, e)
}
for j := 0; j < edges.Len(); j++ {
changed := false
for k := range vertices {
for l := range vertices {
if isInf(output[k][l]) {
continue
}
for m := range vertices {
if isInf(output[l][m]) {
continue
}
newdist := output[k][l] + output[l][m]
if output[k][m] > newdist {
output[k][m] = newdist
changed = true
}
}
}
}
if !changed {
break
}
}
return output, order
}
// weight of each edge = (sum of path disconnected)*SepWeight + (sum of each path made longer * diff)
func diffDistances(old, new [][]int) int {
var output int
SepWeight := len(old) * len(old)
for j := 0; j < len(old); j++ {
for i := 0; i < len(old[j]); i++ {
if isInf(old[j][i]) && !isInf(new[j][i]) { // disconnected a path
output = output + SepWeight
} else if !isInf(old[j][i]) && !isInf(new[j][i]) { // check if path shortened
diff := old[j][i] - new[j][i]
output = output + diff
}
}
}
return output
}
// GetDegreeOrder orders the edges based on the sum of the vertex degrees
func GetDegreeOrder(edges Edges) Edges {
if edges.Len() <= 1 {
return edges
}
sort.Slice(edges.Slice(), func(i, j int) bool {
return edgeVertexDegree(edges, edges.Slice()[i]) > edgeVertexDegree(edges, edges.Slice()[j])
})
return edges
}
func edgeVertexDegree(edges Edges, edge Edge) int {
var output int
for _, v := range edge.Vertices {
output = output + getDegree(edges, v)
}
return output - len(edge.Vertices)
}
// GetEdgeDegreeOrder orders the edges based on the sum of the edge degrees
func GetEdgeDegreeOrder(edges Edges) Edges {
if edges.Len() <= 1 {
return edges
}
sort.Slice(edges.Slice(), func(i, j int) bool {
return edgeDegree(edges, edges.Slice()[i]) > edgeDegree(edges, edges.Slice()[j])
})
return edges
}
func edgeDegree(edges Edges, edge Edge) int {
output := 0
for i := range edges.Slice() {
if edges.Slice()[i].areNeighbours(edge) {
output++
}
}
return output
} | lib/heuristics.go | 0.673406 | 0.519399 | heuristics.go | starcoder |
// Package bulletproof implements the zero knowledge protocol bulletproofs as defined in https://eprint.iacr.org/2017/1066.pdf
package bulletproof
import (
"github.com/gtank/merlin"
"github.com/pkg/errors"
"github.com/coinbase/kryptology/pkg/core/curves"
)
// InnerProductProver is the struct used to create InnerProductProofs
// It specifies which curve to use and holds precomputed generators
// See NewInnerProductProver() for prover initialization
type InnerProductProver struct {
curve curves.Curve
generators ippGenerators
}
// InnerProductProof contains necessary output for the inner product proof
// a and b are the final input vectors of scalars, they should be of length 1
// Ls and Rs are calculated per recursion of the IPP and are necessary for verification
// See section 3.1 on pg 15 of https://eprint.iacr.org/2017/1066.pdf
type InnerProductProof struct {
a, b curves.Scalar
capLs, capRs []curves.Point
curve *curves.Curve
}
// ippRecursion is the same as IPP but tracks recursive a', b', g', h' and Ls and Rs
// It should only be used internally by InnerProductProver.Prove()
// See L35 on pg 16 of https://eprint.iacr.org/2017/1066.pdf
type ippRecursion struct {
a, b []curves.Scalar
c curves.Scalar
capLs, capRs []curves.Point
g, h []curves.Point
u, capP curves.Point
transcript *merlin.Transcript
}
// NewInnerProductProver initializes a new prover
// It uses the specified domain to generate generators for vectors of at most maxVectorLength
// A prover can be used to construct inner product proofs for vectors of length less than or equal to maxVectorLength
// A prover is defined by an explicit curve
func NewInnerProductProver(maxVectorLength int, domain []byte, curve curves.Curve) (*InnerProductProver, error) {
generators, err := getGeneratorPoints(maxVectorLength, domain, curve)
if err != nil {
return nil, errors.Wrap(err, "ipp getGenerators")
}
return &InnerProductProver{curve: curve, generators: *generators}, nil
}
// NewInnerProductProof initializes a new InnerProductProof for a specified curve
// This should be used in tandem with UnmarshalBinary() to convert a marshaled proof into the struct
func NewInnerProductProof(curve *curves.Curve) *InnerProductProof {
var capLs, capRs []curves.Point
newProof := InnerProductProof{
a: curve.NewScalar(),
b: curve.NewScalar(),
capLs: capLs,
capRs: capRs,
curve: curve,
}
return &newProof
}
// rangeToIPP takes the output of a range proof and converts it into an inner product proof
// See section 4.2 on pg 20
// The conversion specifies generators to use (g and hPrime), as well as the two vectors l, r of which the inner product is tHat
// Additionally, note that the P used for the IPP is in fact P*h^-mu from the range proof
func (prover *InnerProductProver) rangeToIPP(proofG, proofH []curves.Point, l, r []curves.Scalar, tHat curves.Scalar, capPhmuinv, u curves.Point, transcript *merlin.Transcript) (*InnerProductProof, error) {
// Note that P as a witness is only g^l * h^r
// P needs to be in the form of g^l * h^r * u^<l,r>
// Calculate the final P including the u^<l,r> term
utHat := u.Mul(tHat)
capP := capPhmuinv.Add(utHat)
// Use params to prove inner product
recursionParams := &ippRecursion{
a: l,
b: r,
capLs: []curves.Point{},
capRs: []curves.Point{},
c: tHat,
g: proofG,
h: proofH,
capP: capP,
u: u,
transcript: transcript,
}
return prover.proveRecursive(recursionParams)
}
// getP returns the initial P value given two scalars a,b and point u
// This method should only be used for testing
// See (3) on page 13 of https://eprint.iacr.org/2017/1066.pdf
func (prover *InnerProductProver) getP(a, b []curves.Scalar, u curves.Point) (curves.Point, error) {
// Vectors must have length power of two
if !isPowerOfTwo(len(a)) {
return nil, errors.New("ipp vector length must be power of two")
}
// Generator vectors must be same length
if len(prover.generators.G) != len(prover.generators.H) {
return nil, errors.New("ipp generator lengths of g and h must be equal")
}
// Inner product requires len(a) == len(b) else error is returned
c, err := innerProduct(a, b)
if err != nil {
return nil, errors.Wrap(err, "ipp getInnerProduct")
}
// In case where len(a) is less than number of generators precomputed by prover, trim to length
proofG := prover.generators.G[0:len(a)]
proofH := prover.generators.H[0:len(b)]
// initial P = g^a * h^b * u^(a dot b) (See (3) on page 13 of https://eprint.iacr.org/2017/1066.pdf)
ga := prover.curve.NewGeneratorPoint().SumOfProducts(proofG, a)
hb := prover.curve.NewGeneratorPoint().SumOfProducts(proofH, b)
uadotb := u.Mul(c)
capP := ga.Add(hb).Add(uadotb)
return capP, nil
}
// Prove executes the prover protocol on pg 16 of https://eprint.iacr.org/2017/1066.pdf
// It generates an inner product proof for vectors a and b, using u to blind the inner product in P
// A transcript is used for the Fiat Shamir heuristic
func (prover *InnerProductProver) Prove(a, b []curves.Scalar, u curves.Point, transcript *merlin.Transcript) (*InnerProductProof, error) {
// Vectors must have length power of two
if !isPowerOfTwo(len(a)) {
return nil, errors.New("ipp vector length must be power of two")
}
// Generator vectors must be same length
if len(prover.generators.G) != len(prover.generators.H) {
return nil, errors.New("ipp generator lengths of g and h must be equal")
}
// Inner product requires len(a) == len(b) else error is returned
c, err := innerProduct(a, b)
if err != nil {
return nil, errors.Wrap(err, "ipp getInnerProduct")
}
// Length of vectors must be less than the number of generators generated
if len(a) > len(prover.generators.G) {
return nil, errors.New("ipp vector length must be less than maxVectorLength")
}
// In case where len(a) is less than number of generators precomputed by prover, trim to length
proofG := prover.generators.G[0:len(a)]
proofH := prover.generators.H[0:len(b)]
// initial P = g^a * h^b * u^(a dot b) (See (3) on page 13 of https://eprint.iacr.org/2017/1066.pdf)
ga := prover.curve.NewGeneratorPoint().SumOfProducts(proofG, a)
hb := prover.curve.NewGeneratorPoint().SumOfProducts(proofH, b)
uadotb := u.Mul(c)
capP := ga.Add(hb).Add(uadotb)
recursionParams := &ippRecursion{
a: a,
b: b,
capLs: []curves.Point{},
capRs: []curves.Point{},
c: c,
g: proofG,
h: proofH,
capP: capP,
u: u,
transcript: transcript,
}
return prover.proveRecursive(recursionParams)
}
// proveRecursive executes the recursion on pg 16 of https://eprint.iacr.org/2017/1066.pdf
func (prover *InnerProductProver) proveRecursive(recursionParams *ippRecursion) (*InnerProductProof, error) {
// length checks
if len(recursionParams.a) != len(recursionParams.b) {
return nil, errors.New("ipp proveRecursive a and b different lengths")
}
if len(recursionParams.g) != len(recursionParams.h) {
return nil, errors.New("ipp proveRecursive g and h different lengths")
}
if len(recursionParams.a) != len(recursionParams.g) {
return nil, errors.New("ipp proveRecursive scalar and point vectors different lengths")
}
// Base case (L14, pg16 of https://eprint.iacr.org/2017/1066.pdf)
if len(recursionParams.a) == 1 {
proof := &InnerProductProof{
a: recursionParams.a[0],
b: recursionParams.b[0],
capLs: recursionParams.capLs,
capRs: recursionParams.capRs,
curve: &prover.curve,
}
return proof, nil
}
// Split current state into low (first half) vs high (second half) vectors
aLo, aHi, err := splitScalarVector(recursionParams.a)
if err != nil {
return nil, errors.Wrap(err, "recursionParams splitScalarVector")
}
bLo, bHi, err := splitScalarVector(recursionParams.b)
if err != nil {
return nil, errors.Wrap(err, "recursionParams splitScalarVector")
}
gLo, gHi, err := splitPointVector(recursionParams.g)
if err != nil {
return nil, errors.Wrap(err, "recursionParams splitPointVector")
}
hLo, hHi, err := splitPointVector(recursionParams.h)
if err != nil {
return nil, errors.Wrap(err, "recursionParams splitPointVector")
}
// c_l, c_r (L21,22, pg16 of https://eprint.iacr.org/2017/1066.pdf)
cL, err := innerProduct(aLo, bHi)
if err != nil {
return nil, errors.Wrap(err, "recursionParams innerProduct")
}
cR, err := innerProduct(aHi, bLo)
if err != nil {
return nil, errors.Wrap(err, "recursionParams innerProduct")
}
// L, R (L23,24, pg16 of https://eprint.iacr.org/2017/1066.pdf)
lga := prover.curve.Point.SumOfProducts(gHi, aLo)
lhb := prover.curve.Point.SumOfProducts(hLo, bHi)
ucL := recursionParams.u.Mul(cL)
capL := lga.Add(lhb).Add(ucL)
rga := prover.curve.Point.SumOfProducts(gLo, aHi)
rhb := prover.curve.Point.SumOfProducts(hHi, bLo)
ucR := recursionParams.u.Mul(cR)
capR := rga.Add(rhb).Add(ucR)
// Add L,R for verifier to use to calculate final g, h
newL := append(recursionParams.capLs, capL)
newR := append(recursionParams.capRs, capR)
// Get x from L, R for non-interactive (See section 4.4 pg22 of https://eprint.iacr.org/2017/1066.pdf)
// Note this replaces the interactive model, i.e. L36-28 of pg16 of https://eprint.iacr.org/2017/1066.pdf
x, err := prover.calcx(capL, capR, recursionParams.transcript)
if err != nil {
return nil, errors.Wrap(err, "recursionParams calcx")
}
// Calculate recursive inputs
xInv, err := x.Invert()
if err != nil {
return nil, errors.Wrap(err, "recursionParams x.Invert")
}
// g', h' (L29,30, pg16 of https://eprint.iacr.org/2017/1066.pdf)
gLoxInverse := multiplyScalarToPointVector(xInv, gLo)
gHix := multiplyScalarToPointVector(x, gHi)
gPrime, err := multiplyPairwisePointVectors(gLoxInverse, gHix)
if err != nil {
return nil, errors.Wrap(err, "recursionParams multiplyPairwisePointVectors")
}
hLox := multiplyScalarToPointVector(x, hLo)
hHixInv := multiplyScalarToPointVector(xInv, hHi)
hPrime, err := multiplyPairwisePointVectors(hLox, hHixInv)
if err != nil {
return nil, errors.Wrap(err, "recursionParams multiplyPairwisePointVectors")
}
// P' (L31, pg16 of https://eprint.iacr.org/2017/1066.pdf)
xSquare := x.Square()
xInvSquare := xInv.Square()
LxSquare := capL.Mul(xSquare)
RxInvSquare := capR.Mul(xInvSquare)
PPrime := LxSquare.Add(recursionParams.capP).Add(RxInvSquare)
// a', b' (L33, 34, pg16 of https://eprint.iacr.org/2017/1066.pdf)
aLox := multiplyScalarToScalarVector(x, aLo)
aHixIn := multiplyScalarToScalarVector(xInv, aHi)
aPrime, err := addPairwiseScalarVectors(aLox, aHixIn)
if err != nil {
return nil, errors.Wrap(err, "recursionParams addPairwiseScalarVectors")
}
bLoxInv := multiplyScalarToScalarVector(xInv, bLo)
bHix := multiplyScalarToScalarVector(x, bHi)
bPrime, err := addPairwiseScalarVectors(bLoxInv, bHix)
if err != nil {
return nil, errors.Wrap(err, "recursionParams addPairwiseScalarVectors")
}
// c'
cPrime, err := innerProduct(aPrime, bPrime)
if err != nil {
return nil, errors.Wrap(err, "recursionParams innerProduct")
}
// Make recursive call (L35, pg16 of https://eprint.iacr.org/2017/1066.pdf)
recursiveIPP := &ippRecursion{
a: aPrime,
b: bPrime,
capLs: newL,
capRs: newR,
c: cPrime,
g: gPrime,
h: hPrime,
capP: PPrime,
u: recursionParams.u,
transcript: recursionParams.transcript,
}
out, err := prover.proveRecursive(recursiveIPP)
if err != nil {
return nil, errors.Wrap(err, "recursionParams proveRecursive")
}
return out, nil
}
// calcx uses a merlin transcript for Fiat Shamir
// For each recursion, it takes the current state of the transcript and appends the newly calculated L and R values
// A new scalar is then read from the transcript
// See section 4.4 pg22 of https://eprint.iacr.org/2017/1066.pdf
func (prover *InnerProductProver) calcx(L, R curves.Point, transcript *merlin.Transcript) (curves.Scalar, error) {
// Add the newest L and R values to transcript
transcript.AppendMessage([]byte("addRecursiveL"), L.ToAffineUncompressed())
transcript.AppendMessage([]byte("addRecursiveR"), R.ToAffineUncompressed())
// Read 64 bytes from, set to scalar
outBytes := transcript.ExtractBytes([]byte("getx"), 64)
x, err := prover.curve.NewScalar().SetBytesWide(outBytes)
if err != nil {
return nil, errors.Wrap(err, "calcx NewScalar SetBytesWide")
}
return x, nil
}
// MarshalBinary takes an inner product proof and marshals into bytes
func (proof *InnerProductProof) MarshalBinary() []byte {
var out []byte
out = append(out, proof.a.Bytes()...)
out = append(out, proof.b.Bytes()...)
for i, capLElem := range proof.capLs {
capRElem := proof.capRs[i]
out = append(out, capLElem.ToAffineCompressed()...)
out = append(out, capRElem.ToAffineCompressed()...)
}
return out
}
// UnmarshalBinary takes bytes of a marshaled proof and writes them into an inner product proof
// The inner product proof used should be from the output of NewInnerProductProof()
func (proof *InnerProductProof) UnmarshalBinary(data []byte) error {
scalarLen := len(proof.curve.NewScalar().Bytes())
pointLen := len(proof.curve.NewGeneratorPoint().ToAffineCompressed())
ptr := 0
// Get scalars
a, err := proof.curve.NewScalar().SetBytes(data[ptr : ptr+scalarLen])
if err != nil {
return errors.New("InnerProductProof UnmarshalBinary SetBytes")
}
proof.a = a
ptr += scalarLen
b, err := proof.curve.NewScalar().SetBytes(data[ptr : ptr+scalarLen])
if err != nil {
return errors.New("InnerProductProof UnmarshalBinary SetBytes")
}
proof.b = b
ptr += scalarLen
// Get points
var capLs, capRs []curves.Point
for ptr < len(data) {
capLElem, err := proof.curve.Point.FromAffineCompressed(data[ptr : ptr+pointLen])
if err != nil {
return errors.New("InnerProductProof UnmarshalBinary FromAffineCompressed")
}
capLs = append(capLs, capLElem)
ptr += pointLen
capRElem, err := proof.curve.Point.FromAffineCompressed(data[ptr : ptr+pointLen])
if err != nil {
return errors.New("InnerProductProof UnmarshalBinary FromAffineCompressed")
}
capRs = append(capRs, capRElem)
ptr += pointLen
}
proof.capLs = capLs
proof.capRs = capRs
return nil
} | pkg/bulletproof/ipp_prover.go | 0.855218 | 0.47658 | ipp_prover.go | starcoder |
package urts
import (
"time"
"github.com/iotaledger/hive.go/app"
)
// ParametersTipsel contains the definition of the parameters used by Tipselection.
type ParametersTipsel struct {
// the config group used for the non-lazy tip-pool
NonLazy struct {
// Defines the maximum amount of current tips for which "CfgTipSelMaxReferencedTipAge"
// and "CfgTipSelMaxChildren" are checked. if the amount of tips exceeds this limit,
// referenced tips get removed directly to reduce the amount of tips in the network.
RetentionRulesTipsLimit int `default:"100" usage:"the maximum number of current tips for which the retention rules are checked (non-lazy)"`
// Defines the maximum time a tip remains in the tip pool
// after it was referenced by the first message.
MaxReferencedTipAge time.Duration `default:"3s" usage:"the maximum time a tip remains in the tip pool after it was referenced by the first message (non-lazy)"`
// Defines the maximum amount of references by other messages
// before the tip is removed from the tip pool.
MaxChildren uint32 `default:"30" usage:"the maximum amount of references by other messages before the tip is removed from the tip pool (non-lazy)"`
// Defines the maximum amount of tips in a tip-pool before the spammer tries to reduce these (0 = disable (semi-lazy), 0 = always (non-lazy))
// this is used to support the network if someone attacks the tangle by spamming a lot of tips
SpammerTipsThreshold int `default:"0" usage:"the maximum amount of tips in a tip-pool (non-lazy) before the spammer tries to reduce these (0 = always)"`
}
// the config group used for the semi-lazy tip-pool
SemiLazy struct {
// Defines the maximum amount of current tips for which "CfgTipSelMaxReferencedTipAge"
// and "CfgTipSelMaxChildren" are checked. if the amount of tips exceeds this limit,
// referenced tips get removed directly to reduce the amount of tips in the network.
RetentionRulesTipsLimit int `default:"20" usage:"the maximum number of current tips for which the retention rules are checked (semi-lazy)"`
// Defines the maximum time a tip remains in the tip pool
// after it was referenced by the first message.
MaxReferencedTipAge time.Duration `default:"3s" usage:"the maximum time a tip remains in the tip pool after it was referenced by the first message (semi-lazy)"`
// Defines the maximum amount of references by other messages
// before the tip is removed from the tip pool.
MaxChildren uint32 `default:"2" usage:"the maximum amount of references by other messages before the tip is removed from the tip pool (semi-lazy)"`
// Defines the maximum amount of tips in a tip-pool before the spammer tries to reduce these (0 = disable (semi-lazy), 0 = always (non-lazy))
// this is used to support the network if someone attacks the tangle by spamming a lot of tips
SpammerTipsThreshold int `default:"30" usage:"the maximum amount of tips in a tip-pool (semi-lazy) before the spammer tries to reduce these (0 = disable)"`
}
}
var ParamsTipsel = &ParametersTipsel{}
var params = &app.ComponentParams{
Params: map[string]any{
"tipsel": ParamsTipsel,
},
Masked: nil,
} | plugins/urts/params.go | 0.522446 | 0.418637 | params.go | starcoder |
package hipathsys
import (
"fmt"
"strings"
)
var UCUMSystemURI = NewString("http://unitsofmeasure.org")
var QuantityTypeSpec = newAnyTypeSpec("Quantity")
type quantityType struct {
baseAnyType
value DecimalAccessor
unit StringAccessor
}
type QuantityAccessor interface {
AnyAccessor
Comparator
Stringifier
DecimalValueAccessor
Negator
ArithmeticApplier
Unit() StringAccessor
ToUnit(unit StringAccessor) QuantityAccessor
}
func NewQuantity(value DecimalAccessor, unit StringAccessor) QuantityAccessor {
return NewQuantityWithSource(value, unit, nil)
}
func NewQuantityWithSource(value DecimalAccessor, unit StringAccessor, source interface{}) QuantityAccessor {
if value == nil {
panic("value must not be nil")
}
return &quantityType{
baseAnyType: baseAnyType{
source: source,
},
value: value,
unit: unit,
}
}
func (t *quantityType) DataType() DataTypes {
return QuantityDataType
}
func (t *quantityType) Value() DecimalAccessor {
return t.value
}
func (t *quantityType) Unit() StringAccessor {
return t.unit
}
func (t *quantityType) WithValue(node NumberAccessor) DecimalValueAccessor {
var value DecimalAccessor
if node == nil {
return nil
} else if node.DataType() == DecimalDataType {
value = node.(DecimalAccessor)
} else {
value = NewDecimal(node.Decimal())
}
return NewQuantity(value, t.Unit())
}
func (t *quantityType) ArithmeticOpSupported(op ArithmeticOps) bool {
return op == AdditionOp ||
op == SubtractionOp ||
op == MultiplicationOp ||
op == DivisionOp
}
func (t *quantityType) Negate() AnyAccessor {
return NewQuantity(t.value.Negate().(DecimalAccessor), t.unit)
}
func (e *quantityType) TypeSpec() TypeSpecAccessor {
return QuantityTypeSpec
}
func (t *quantityType) Equal(node interface{}) bool {
return quantityValueEqual(t, node, false)
}
func (t *quantityType) Equivalent(node interface{}) bool {
return quantityValueEqual(t, node, true)
}
func quantityValueEqual(t QuantityAccessor, node interface{}, equivalent bool) bool {
if q, ok := node.(QuantityAccessor); ok {
if Equal(t.Unit(), q.Unit()) {
return Equal(t.Value(), q.Value())
}
u1, exp1 := QuantityUnitWithNameString(t.Unit())
u2, exp2 := QuantityUnitWithNameString(q.Unit())
if exp1 != exp2 {
return false
}
v1, v2, u := ConvertUnitToBase(t.Value(), u1, exp1, q.Value(), u2, exp2, !equivalent)
if u != nil {
return Equal(v1, v2)
}
} else if d, ok := node.(DecimalValueAccessor); ok {
v1 := t.Value()
v2 := d.Value()
if equivalent {
return Equivalent(v1, v2)
}
return Equal(v1, v2)
}
return false
}
func (t *quantityType) Compare(comparator Comparator) (int, OperatorStatus) {
if q, ok := comparator.(QuantityAccessor); ok {
if !Equal(t.Unit(), q.Unit()) {
u1, exp1 := QuantityUnitWithNameString(t.Unit())
u2, exp2 := QuantityUnitWithNameString(q.Unit())
if exp1 == exp2 {
v1, v2, u := ConvertUnitToBase(t.Value(), u1, exp1, q.Value(), u2, exp2, true)
if u != nil {
return decimalValueCompare(v1, v2)
}
}
return -1, Empty
}
}
return decimalValueCompare(t.value, comparator)
}
func (t *quantityType) ToUnit(unit StringAccessor) QuantityAccessor {
u2, exp2 := QuantityUnitWithNameString(unit)
if u2 == nil {
return nil
}
u1, exp1 := QuantityUnitWithNameString(t.Unit())
if u1 == nil || exp1 != exp2 {
return nil
}
if u1.Equal(u2) {
return t
}
u := u1.CommonBase(u2, true)
if u == nil {
return nil
}
f1, f2 := u1.Factor(u, exp1), u2.Factor(u, exp2)
v, _ := t.Value().Calc(f1, MultiplicationOp)
v, _ = v.Value().Calc(f2, DivisionOp)
val := v.Value()
return NewQuantity(val, u2.NameWithExp(val, exp2))
}
func (t *quantityType) String() string {
var b strings.Builder
b.Grow(32)
b.WriteString(t.value.String())
if t.unit != nil {
b.WriteByte(' ')
b.WriteByte('\'')
b.WriteString(t.unit.String())
b.WriteByte('\'')
}
return b.String()
}
func (t *quantityType) Calc(operand DecimalValueAccessor, op ArithmeticOps) (DecimalValueAccessor, error) {
if t.value == nil || operand == nil {
return nil, nil
}
if !t.ArithmeticOpSupported(op) || !operand.ArithmeticOpSupported(op) {
return nil, fmt.Errorf("arithmetic operator not supported: %c", op)
}
var valLeft, varRight DecimalAccessor
var unit QuantityUnitAccessor
var exp int
if q, ok := operand.(QuantityAccessor); !ok {
valLeft, varRight = t.Value(), operand.Value()
unit, exp = QuantityUnitWithNameString(t.Unit())
} else {
var err error
valLeft, varRight, unit, exp, err = mergeQuantityUnits(t, q, op)
if err != nil {
return nil, err
}
}
value, _ := valLeft.Calc(varRight, op)
return NewQuantity(value.Value(), unit.NameWithExp(value.Value(), exp)), nil
}
func mergeQuantityUnits(l QuantityAccessor, r QuantityAccessor, op ArithmeticOps) (DecimalAccessor, DecimalAccessor, QuantityUnitAccessor, int, error) {
leftVal, rightVal := l.Value(), r.Value()
leftUnit, leftExp := QuantityUnitWithNameString(l.Unit())
rightUnit, rightExp := QuantityUnitWithNameString(r.Unit())
var unit QuantityUnitAccessor
if leftUnit == nil && rightUnit == nil {
return leftVal, rightVal, EmptyQuantityUnit, 1, nil
}
if leftUnit != nil && leftUnit.Equal(rightUnit) {
unit = leftUnit
} else {
leftVal, rightVal, unit = ConvertUnitToMostGranular(
leftVal, leftUnit, leftExp, rightVal, rightUnit, rightExp, true)
if unit == nil {
return nil, nil, nil, 1, fmt.Errorf("units are not equal: %s != %s",
leftUnit, rightUnit)
}
}
exp := leftExp
switch op {
case AdditionOp, SubtractionOp:
if leftExp != rightExp {
return nil, nil, nil, 1, fmt.Errorf("units exponents are not equal: %d != %d",
leftExp, rightExp)
}
case MultiplicationOp:
exp = leftExp + rightExp
case DivisionOp:
exp = leftExp - rightExp
}
if exp < 1 || exp > 3 {
return nil, nil, nil, 1, fmt.Errorf("resulting unit exponent is invalid (must be between 1 and 3): %d", exp)
}
return leftVal, rightVal, unit, exp, nil
}
func (t *quantityType) Abs() DecimalValueAccessor {
return NewQuantity(t.Value().Abs().(DecimalAccessor), t.Unit())
} | hipathsys/quantity_type.go | 0.620852 | 0.485173 | quantity_type.go | starcoder |
package main
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
)
func isLeapYear(year int) bool {
return (year%400 == 0) || (year%4 == 0 && year%100 != 0)
}
func fixDay(year int, day int) int {
leap := leapYearInt(year)
if day > 60-leap {
return day - 60
}
return day + 305
}
func fixYear(year int) int {
if year < 1790 {
return 1790 - year
}
return year - 1790
}
func suitYear(year int) int {
return (fixYear(year) / 13) % 4
}
func cardYear(year int) int {
return fixYear(year) % 13
}
func leapYearInt(year int) int {
if isLeapYear(year) {
return 1
}
return 0
}
func seasons(day int, year int) int {
leap := leapYearInt(year)
if day <= (62 - leap) {
return 1
}
if day <= (154 - leap) {
return 2
}
if day <= (247 - leap) {
return 3
}
if day <= (338 - leap) {
return 0
}
if day <= (367 - leap) {
return 1
}
return 1
}
func cardMonth(day int) int {
return (day / 28) % 13
}
func suitWeek(day int) int {
return ((day / 7) / 13) % 4
}
func cardWeek(day int) int {
return (day / 7) % 13
}
func suitDay(day int) int {
if day == 0 {
return 4
}
return ((day - 1) / 13) % 4
}
func cardDay(day int) int {
if day == 0 {
return 13
}
return (day - 1) % 13
}
func feb(day int, year int) bool {
return day <= (28 + leapYearInt(year))
}
func validDate(day int, month int, year int) bool {
if day < 1 || day > 31 || year == 0 || month < 1 || month > 12 {
return false
}
if (month == 1 || month == 3 || month == 5 || month == 7 || month == 8 || month == 10 || month == 12) && day <= 31 {
return true
}
if (month == 4 || month == 6 || month == 9 || month == 11) && day <= 30 {
return true
}
if month == 2 {
return feb(day, year)
}
return false
}
func dayOfYear(day int, month int, year int) int {
if !validDate(day, month, year) {
return 0
}
return countDays(day, month, year)
}
func countDays(day int, month int, year int) int {
leap := leapYearInt(year)
switch month {
case 1:
return day
case 2:
return day + 31
case 3:
return day + 59 + leap
case 4:
return day + 90 + leap
case 5:
return day + 120 + leap
case 6:
return day + 151 + leap
case 7:
return day + 181 + leap
case 8:
return day + 212 + leap
case 9:
return day + 243 + leap
case 10:
return day + 273 + leap
case 11:
return day + 304 + leap
case 12:
return day + 334 + leap
}
return 0
}
//ShortVersion return abbr version of the frode calendar like 1O1O1P2P
func ShortVersion(day int, month int, year int) string {
if !validDate(day, month, year) {
return ""
}
cards := [...]string{"1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "J", "Q", "K", "Jo", "Jd"}
suits := [...]string{"O", "P", "C", "E", ""}
days := fixDay(year, dayOfYear(day, month, year))
sDay := cards[cardDay(days)] + suits[suitDay(days)]
sWeek := cards[cardWeek(days)] + suits[suitWeek(days)]
sMonth := cards[cardMonth(days)] + suits[seasons(day, year)]
sYear := cards[cardYear(year)] + suits[suitYear(year)]
output := sDay + sWeek + sMonth + sYear
return output
}
//LongVersion return extense version of the frode calendar portuguese language
func LongVersion(day int, month int, year int) string {
if !validDate(day, month, year) {
return ""
}
cards := [...]string{"As", "Dois", "Tres", "Quatro", "Cinco",
"Seis", "Sete", "Oito", "Nove", "Dez",
"Valete", "Dama", "Rei", "do Curinga"}
suites := [...]string{" de ouros", " de paus", " de copas", " de espadas"}
days := fixDay(year, dayOfYear(day, month, year))
output := "\n\tDia " + cards[cardDay(days)] + suites[suitDay(days)]
output += "\n\tSemana " + cards[cardWeek(days)] + suites[suitWeek(days)]
output += "\n\tMes " + cards[cardMonth(days)] + " estacao" + suites[seasons(day, year)]
output += "\n\tAno " + cards[cardYear(year)] + suites[suitYear(year)]
output += "\n\t" + strconv.Itoa(day) + "/" + strconv.Itoa(month) + "/" + strconv.Itoa(year) + " e dia numero " + strconv.Itoa(days)
return output
}
func header() {
fmt.Println("Entre com dia mes e ano (separados por enter) e precione ctrl-c")
fmt.Println("\n\tEntre com dia mes e ano (separados por espaco):")
}
func readUserInput() string {
reader := bufio.NewReader(os.Stdin)
text, _ := reader.ReadString('\n')
return text
}
func showOutput(day int, month int, year int) {
fmt.Println("\n\tCalendario de Paciencia de LongVersion")
fmt.Println("\t---------------------------------")
fmt.Println(LongVersion(day, month, year))
fmt.Println("\n\tSimples -- " + ShortVersion(day, month, year))
}
type simpleDate struct {
day int
month int
year int
}
func clearInput(input string) simpleDate {
newInput := strings.Replace(input, "\n", " ", -1)
args := strings.Split(newInput, " ")
if len(args) >= 3 {
day, _ := strconv.Atoi(args[0])
month, _ := strconv.Atoi(args[1])
year, _ := strconv.Atoi(args[2])
return simpleDate{day, month, year}
}
return simpleDate{0, 0, 0}
}
func main() {
header()
input := readUserInput()
cleanInput := clearInput(input)
showOutput(cleanInput.day, cleanInput.month, cleanInput.year)
} | ddc.go | 0.625438 | 0.40645 | ddc.go | starcoder |
package main
import (
"fmt"
)
// Function type alias
type calcFunc func(float64) float64
func calcWithTax(price float64) float64 {
return price + (price * 0.2)
}
func calcWithoutTax(price float64) float64 {
return price
}
// Functions as return types
func selectCalculator(price float64) calcFunc {
if price > 100 {
return calcWithTax
}
return calcWithoutTax
}
// Function literals as values
// Using **anonymous functions** here (they have no name!)
func selectCalculator2(price float64) calcFunc {
if price > 100 {
return func(_price float64) float64 {
return _price + _price*0.2
}
}
return func(_price float64) float64 {
return _price
}
}
// Functions as function arguments
func printPrice(product string, price float64, calculator calcFunc) {
fmt.Println("Product:", product, "Price:", calculator(price))
}
func examplesWithBasicFunctionTypes() {
products := map[string]float64{
"Kayak": 275,
"Lifejacket": 48.95,
}
for product, price := range products {
// Note: This is a variable with a "function type" that will hold a function
// A function type is known as a **function signature**
// var calcFunc func(float64) float64
myCalcFunc := selectCalculator2(price)
printPrice(product, price, myCalcFunc)
}
}
func helloWorld() string {
return "Hello World"
}
func examplesWithComparison() {
var myFunc func() string
fmt.Println(myFunc == nil) // true
myFunc = helloWorld
fmt.Println(myFunc == nil) // false
}
/**
* This creates a closure around the "count" variable, or
* The anonymous function returned "closes" on counterFactory()
*/
func counterFactory(initialValue int) func() int {
count := initialValue
return func() int {
count++
return count
}
}
func myFactory(initialMode string) (func(), func(), func() string) {
mode := initialMode
changeToModeA := func() {
mode = "a"
}
changeToModeB := func() {
mode = "b"
}
getMode := func() string {
return mode
}
return changeToModeA, changeToModeB, getMode
}
func myFactory2(mode *string) func() string {
return func() string {
return fmt.Sprintf("Current mode is %q", *mode)
}
}
/**
* Functions defined literally can reference variables from their scope through
* a feature called **closure**
*/
func examplesWithFunctionClosure() {
counter := counterFactory(0)
fmt.Println(counter()) // 1
fmt.Println(counter()) // 2
fmt.Println(counter()) // 3
// Closed variables are evaluated at each invokation of the closure
changeToA, changeToB, getMode := myFactory("b")
fmt.Println(getMode()) // b
changeToA()
fmt.Println(getMode()) // a
changeToB()
fmt.Println(getMode()) // b
// Closure with pointers
mode := "a"
getMode2 := myFactory2(&mode)
fmt.Println(getMode2()) // Current mode is "a"
mode = "b"
fmt.Println(getMode2()) // Current mode is "b"
mode = "c"
fmt.Println(getMode2()) // Current mode is "c"
}
func main() {
// examplesWithBasicFunctionTypes()
// examplesWithComparison()
examplesWithFunctionClosure()
} | courses/pro-go/function-types/main.go | 0.736874 | 0.426142 | main.go | starcoder |
package simulation
import "github.com/pointlesssoft/godevs/pkg/modeling"
type Simulator struct {
AbstractSimulator // It complies the AbstractSimulator interface.
model modeling.Atomic // Atomic Model associated to the Simulator.
}
// NewSimulator returns a pointer to a new Simulator.
func NewSimulator(clock Clock, model modeling.Atomic) *Simulator {
s := Simulator{NewAbstractSimulator(clock), model}
return &s
}
// Initialize initializes the atomic Model and sets last time and next time to their initial values.
func (s *Simulator) Initialize() {
s.model.Initialize()
s.SetTL(s.GetClock().GetTime())
s.SetTN(s.GetTL() + s.model.TA())
}
// Exit calls to the atomic Model Exit function.
func (s *Simulator) Exit() {
s.model.Exit()
}
// TA returns the Atomic Model time advance.
func (s *Simulator) TA() float64 {
return s.model.TA()
}
// Transition checks if the Model has to trigger one of its transition functions and call to the corresponding one.
func (s *Simulator) Transition() {
t := s.GetClock().GetTime()
isInputEmpty := s.model.IsInputEmpty()
if !isInputEmpty || t == s.GetTN() {
// CASE 1: atomic model timed out and no messages were received -> internal delta
if isInputEmpty {
s.model.DeltInt()
} else {
e := t - s.GetTL()
// CASE 2: both atomic model timed out and messages were received -> confluent delta
if t == s.GetTN() {
s.model.DeltCon(e)
// CASE 3: only messages were received -> external delta
} else {
s.model.DeltExt(e)
}
}
s.SetTL(t)
s.SetTN(t + s.model.TA())
}
}
// Clear clear all the ports of the Atomic Model.
func (s *Simulator) Clear() {
for _, ports := range [][]modeling.Port{s.model.GetInPorts(), s.model.GetOutPorts()} {
for _, port := range ports {
port.Clear()
}
}
}
// Collect calls to the output function of the Atomic Model if simulation time is the next time.
func (s *Simulator) Collect() {
if s.GetClock().GetTime() == s.GetTN() {
s.model.Lambda()
}
}
// GetModel returns the Atomic Model
func (s *Simulator) GetModel() modeling.Component {
return s.model
} | pkg/simulation/simulator.go | 0.770119 | 0.512266 | simulator.go | starcoder |
package packed
// Efficient sequential read/write of packed integers.
type BulkOperationPacked7 struct {
*BulkOperationPacked
}
func newBulkOperationPacked7() BulkOperation {
return &BulkOperationPacked7{newBulkOperationPacked(7)}
}
func (op *BulkOperationPacked7) decodeLongToInt(blocks []int64, values []int32, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i++ {
block0 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(int64(uint64(block0) >> 57))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>50) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>43) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>36) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>29) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>22) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>15) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>8) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>1) & 127)
valuesOffset++
block1 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block0 & 1) << 6) | (int64(uint64(block1) >> 58)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>51) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>44) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>37) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>30) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>23) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>16) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>9) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>2) & 127)
valuesOffset++
block2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block1 & 3) << 5) | (int64(uint64(block2) >> 59)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>52) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>45) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>38) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>31) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>24) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>17) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>10) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>3) & 127)
valuesOffset++
block3 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block2 & 7) << 4) | (int64(uint64(block3) >> 60)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>53) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>46) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>39) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>32) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>25) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>18) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>11) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>4) & 127)
valuesOffset++
block4 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block3 & 15) << 3) | (int64(uint64(block4) >> 61)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4)>>54) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4)>>47) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4)>>40) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4)>>33) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4)>>26) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4)>>19) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4)>>12) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4)>>5) & 127)
valuesOffset++
block5 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block4 & 31) << 2) | (int64(uint64(block5) >> 62)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block5)>>55) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block5)>>48) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block5)>>41) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block5)>>34) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block5)>>27) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block5)>>20) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block5)>>13) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block5)>>6) & 127)
valuesOffset++
block6 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block5 & 63) << 1) | (int64(uint64(block6) >> 63)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block6)>>56) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block6)>>49) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block6)>>42) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block6)>>35) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block6)>>28) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block6)>>21) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block6)>>14) & 127)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block6)>>7) & 127)
valuesOffset++
values[valuesOffset] = int32(block6 & 127)
valuesOffset++
}
}
func (op *BulkOperationPacked7) DecodeByteToInt(blocks []byte, values []int32, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i++ {
byte0 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(int64(uint8(byte0) >> 1))
valuesOffset++
byte1 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte0&1) << 6) | int64(uint8(byte1)>>2))
valuesOffset++
byte2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte1&3) << 5) | int64(uint8(byte2)>>3))
valuesOffset++
byte3 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte2&7) << 4) | int64(uint8(byte3)>>4))
valuesOffset++
byte4 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte3&15) << 3) | int64(uint8(byte4)>>5))
valuesOffset++
byte5 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte4&31) << 2) | int64(uint8(byte5)>>6))
valuesOffset++
byte6 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte5&63) << 1) | int64(uint8(byte6)>>7))
valuesOffset++
values[valuesOffset] = int32(int64(byte6) & 127)
valuesOffset++
}
}
func (op *BulkOperationPacked7) DecodeLongToLong(blocks []int64, values []int64, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i++ {
block0 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64(uint64(block0) >> 57)
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>50) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>43) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>36) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>29) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>22) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>15) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>8) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>1) & 127
valuesOffset++
block1 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block0 & 1) << 6) | (int64(uint64(block1) >> 58))
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>51) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>44) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>37) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>30) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>23) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>16) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>9) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>2) & 127
valuesOffset++
block2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block1 & 3) << 5) | (int64(uint64(block2) >> 59))
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>52) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>45) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>38) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>31) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>24) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>17) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>10) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>3) & 127
valuesOffset++
block3 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block2 & 7) << 4) | (int64(uint64(block3) >> 60))
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>53) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>46) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>39) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>32) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>25) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>18) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>11) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>4) & 127
valuesOffset++
block4 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block3 & 15) << 3) | (int64(uint64(block4) >> 61))
valuesOffset++
values[valuesOffset] = int64(uint64(block4)>>54) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block4)>>47) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block4)>>40) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block4)>>33) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block4)>>26) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block4)>>19) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block4)>>12) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block4)>>5) & 127
valuesOffset++
block5 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block4 & 31) << 2) | (int64(uint64(block5) >> 62))
valuesOffset++
values[valuesOffset] = int64(uint64(block5)>>55) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block5)>>48) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block5)>>41) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block5)>>34) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block5)>>27) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block5)>>20) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block5)>>13) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block5)>>6) & 127
valuesOffset++
block6 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block5 & 63) << 1) | (int64(uint64(block6) >> 63))
valuesOffset++
values[valuesOffset] = int64(uint64(block6)>>56) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block6)>>49) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block6)>>42) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block6)>>35) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block6)>>28) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block6)>>21) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block6)>>14) & 127
valuesOffset++
values[valuesOffset] = int64(uint64(block6)>>7) & 127
valuesOffset++
values[valuesOffset] = block6 & 127
valuesOffset++
}
}
func (op *BulkOperationPacked7) decodeByteToLong(blocks []byte, values []int64, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i++ {
byte0 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64(int64(uint8(byte0) >> 1))
valuesOffset++
byte1 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte0&1) << 6) | int64(uint8(byte1)>>2))
valuesOffset++
byte2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte1&3) << 5) | int64(uint8(byte2)>>3))
valuesOffset++
byte3 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte2&7) << 4) | int64(uint8(byte3)>>4))
valuesOffset++
byte4 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte3&15) << 3) | int64(uint8(byte4)>>5))
valuesOffset++
byte5 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte4&31) << 2) | int64(uint8(byte5)>>6))
valuesOffset++
byte6 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte5&63) << 1) | int64(uint8(byte6)>>7))
valuesOffset++
values[valuesOffset] = int64(int64(byte6) & 127)
valuesOffset++
}
} | core/util/packed/bulkOperation7.go | 0.582135 | 0.6419 | bulkOperation7.go | starcoder |
package input
import (
"bufio"
"io"
"strconv"
"strings"
)
// ToInt is used to convert a stream into an integer. This function takes a stream
// of type io.Reader as input. It returns an integer and nil or 0 and an error if
// one occurred.
func ToInt(stream io.Reader) (int, error) {
scanner := bufio.NewScanner(stream)
scanner.Split(bufio.ScanLines)
var result int
for scanner.Scan() {
value, err := strconv.Atoi(scanner.Text())
if err != nil {
return 0, err
}
result = value
}
return result, nil
}
// ToString is used to convert a stream into a string. This function takes a
// stream of type io.Reader as input. It returns a string and nil or an empty
// string and an error if one occurred.
func ToString(stream io.Reader) (string, error) {
scanner := bufio.NewScanner(stream)
scanner.Split(bufio.ScanLines)
var result string
for scanner.Scan() {
result = scanner.Text()
}
return result, nil
}
// ToIntSlice is used to convert a stream into a slice of integers. This function
// takes a stream of type io.Reader as input. It returns a slice of integers
// and nil or nil and an error if one occurred.
func ToIntSlice(stream io.Reader) ([]int, error) {
scanner := bufio.NewScanner(stream)
scanner.Split(bufio.ScanLines)
var result []int
for scanner.Scan() {
value, err := strconv.Atoi(scanner.Text())
if err != nil {
return nil, err
}
result = append(result, value)
}
return result, scanner.Err()
}
// ToStringSlice is used to convert a stream into a slice of strings. This function
// takes a stream of type io.Reader as input. It returns a slice of strings and
// nil or nil and an error if one occurred.
func ToStringSlice(stream io.Reader) ([]string, error) {
scanner := bufio.NewScanner(stream)
scanner.Split(bufio.ScanLines)
var result []string
for scanner.Scan() {
result = append(result, scanner.Text())
}
return result, scanner.Err()
}
// ToGroupedStringSlice is used to convert a stream into a slice of grouped strings.
// The groups must be separated by empty lines. This function takes a stream of type
// io.Reader as input. It returns a slice of grouped strings and nil or nil and an
// error if one occurred.
func ToGroupedStringSlice(stream io.Reader) ([]string, error) {
scanner := bufio.NewScanner(stream)
scanner.Split(bufio.ScanLines)
var result []string
var group string
for scanner.Scan() {
line := scanner.Text()
if line == "" {
result = append(result, group)
group = ""
} else {
group = strings.TrimSpace(group + " " + line)
}
}
if group != "" {
result = append(result, group)
}
return result, scanner.Err()
}
// ToTileMap is used to convert a stream into a 2d slice of runes. This function
// takes a stream of type io.Reader as input. It returns a 2d slice of runes and
// nil or nil and an error if one occurred.
func ToTileMap(stream io.Reader) ([][]rune, error) {
scanner := bufio.NewScanner(stream)
scanner.Split(bufio.ScanLines)
var result [][]rune
for scanner.Scan() {
result = append(result, []rune(scanner.Text()))
}
return result, scanner.Err()
} | pkg/input/parse.go | 0.816187 | 0.427397 | parse.go | starcoder |
package leaf
import (
"encoding/json"
"math"
"time"
)
type model struct {
Alpha float64
Beta float64
T float64
}
// Ebisu implements ebisu SSR algorithm.
type Ebisu struct {
LastReviewedAt time.Time
Alpha float64
Beta float64
Interval float64
Historical []IntervalSnapshot
}
// NewEbisu consturcts a new Ebisu instance.
func NewEbisu() *Ebisu {
return &Ebisu{time.Now().Add(-24 * time.Hour), 3, 3, 24, make([]IntervalSnapshot, 0)}
}
// NextReviewAt returns next review timestamp for a card.
func (eb *Ebisu) NextReviewAt() time.Time {
return eb.LastReviewedAt.Add(time.Duration(eb.Interval) * time.Hour)
}
// Less defines card order for the review.
func (eb *Ebisu) Less(other SRSAlgorithm) bool {
return eb.predictRecall() > other.(*Ebisu).predictRecall()
}
// Advance advances supermemo state for a card.
func (eb *Ebisu) Advance(rating float64) (interval float64) {
model := &model{eb.Alpha, eb.Beta, eb.Interval}
elapsed := float64(time.Since(eb.LastReviewedAt)) / float64(time.Hour)
proposed := updateRecall(model, rating >= ratingSuccess, float64(elapsed), true, eb.Interval)
eb.Historical = append(
eb.Historical,
IntervalSnapshot{time.Now().Unix(), eb.Interval, 0},
)
eb.Alpha = proposed.Alpha
eb.Beta = proposed.Beta
eb.Interval = proposed.T
eb.LastReviewedAt = time.Now()
return eb.Interval
}
// MarshalJSON implements json.Marshaller for Ebisu
func (eb *Ebisu) MarshalJSON() ([]byte, error) {
return json.Marshal(&struct {
LastReviewedAt time.Time
Alpha float64
Beta float64
Interval float64
Historical []IntervalSnapshot
}{eb.LastReviewedAt, eb.Alpha, eb.Beta, eb.Interval, eb.Historical})
}
// UnmarshalJSON implements json.Unmarshaller for Ebisu
func (eb *Ebisu) UnmarshalJSON(b []byte) error {
payload := &struct {
LastReviewedAt time.Time
Alpha float64
Beta float64
Interval float64
Historical []IntervalSnapshot
}{}
if err := json.Unmarshal(b, payload); err != nil {
return err
}
eb.LastReviewedAt = payload.LastReviewedAt
eb.Alpha = payload.Alpha
eb.Beta = payload.Beta
eb.Interval = payload.Interval
eb.Historical = payload.Historical
return nil
}
func (eb *Ebisu) predictRecall() float64 {
tnow := float64(time.Since(eb.LastReviewedAt)) / float64(time.Hour)
dt := tnow / eb.Interval
ret := betaln(eb.Alpha+dt, eb.Beta) - betaln(eb.Alpha, eb.Beta)
return math.Exp(ret)
}
func rebalanceModel(prior *model, result bool, tnow float64, proposed *model) *model {
if proposed.Alpha > 2*proposed.Beta || proposed.Beta > 2*proposed.Alpha {
roughHalflife := modelToPercentileDecay(proposed, 0.5)
return updateRecall(prior, result, tnow, false, roughHalflife)
}
return proposed
}
func updateRecall(prior *model, result bool, tnow float64, rebalance bool, tback float64) *model {
dt := tnow / prior.T
et := tnow / tback
var sig2, mean float64
if result {
if tback == prior.T {
proposed := &model{prior.Alpha + dt, prior.Beta, prior.T}
if rebalance {
return rebalanceModel(prior, result, tnow, proposed)
}
return proposed
}
logDenominator := betaln(prior.Alpha+dt, prior.Beta)
logmean := betaln(prior.Alpha+dt/et*(1+et), prior.Beta) - logDenominator
logm2 := betaln(prior.Alpha+dt/et*(2+et), prior.Beta) - logDenominator
mean = math.Exp(logmean)
sig2 = subexp(logm2, 2*logmean)
} else {
logDenominator := logsumexp(
[2]float64{betaln(prior.Alpha, prior.Beta), betaln(prior.Alpha+dt, prior.Beta)},
[2]float64{1, -1},
)
mean = subexp(
betaln(prior.Alpha+dt/et, prior.Beta)-logDenominator,
betaln(prior.Alpha+(dt/et)*(et+1), prior.Beta)-logDenominator,
)
m2 := subexp(
betaln(prior.Alpha+2*dt/et, prior.Beta)-logDenominator,
betaln(prior.Alpha+dt/et*(et+2), prior.Beta)-logDenominator,
)
if m2 <= 0 {
panic("invalid second moment found")
}
sig2 = m2 - math.Pow(mean, 2)
}
if mean <= 0 {
panic("invalid mean found")
}
if sig2 <= 0 {
panic("invalid variance found")
}
newAlpha, newBeta := meanVarToBeta(mean, sig2)
proposed := &model{newAlpha, newBeta, tback}
if rebalance {
return rebalanceModel(prior, result, tnow, proposed)
}
return proposed
}
func modelToPercentileDecay(model *model, percentile float64) float64 {
if percentile < 0 || percentile > 1 {
panic("percentiles must be between (0, 1) exclusive")
}
alpha := model.Alpha
beta := model.Beta
t0 := model.T
logBab := betaln(alpha, beta)
logPercentile := math.Log(percentile)
f := func(lndelta float64) float64 {
logMean := betaln(alpha+math.Exp(lndelta), beta) - logBab
return logMean - logPercentile
}
bracketWidth := 1.0
blow := -bracketWidth / 2.0
bhigh := bracketWidth / 2.0
flow := f(blow)
fhigh := f(bhigh)
for {
if flow < 0 || fhigh < 0 {
break
}
// Move the bracket up.
blow = bhigh
flow = fhigh
bhigh += bracketWidth
fhigh = f(bhigh)
}
for {
if flow > 0 || fhigh > 0 {
break
}
// Move the bracket down.
bhigh = blow
fhigh = flow
blow -= bracketWidth
flow = f(blow)
}
if !(flow > 0 && fhigh < 0) {
panic("failed to bracket")
}
return (math.Exp(blow) + math.Exp(bhigh)) / 2 * t0
}
func meanVarToBeta(mean, v float64) (float64, float64) {
tmp := mean*(1-mean)/v - 1
return mean * tmp, (1 - mean) * tmp
}
func subexp(x, y float64) float64 {
maxval := math.Max(x, y)
return math.Exp(maxval) * (math.Exp(x-maxval) - math.Exp(y-maxval))
}
func logsumexp(a, b [2]float64) float64 {
aMax := math.Max(a[0], a[1])
sum := b[0] * math.Exp(a[0]-aMax)
sum += b[1] * math.Exp(a[1]-aMax)
return math.Log(sum) + aMax
}
// betaln returns natural logarithm of the Beta function.
func betaln(a, b float64) float64 {
// B(x,y) = Γ(x)Γ(y) / Γ(x+y)
// Therefore log(B(x,y)) = log(Γ(x)) + log(Γ(y)) - log(Γ(x+y))
la, _ := math.Lgamma(a)
lb, _ := math.Lgamma(b)
lab, _ := math.Lgamma(a + b)
return la + lb - lab
} | ebisu.go | 0.860765 | 0.4436 | ebisu.go | starcoder |
package adaptivetable
type AdaptiveTable struct {
values []uint64
initSize int
maxSize int
threshold int
relativePercentage bool
}
func NewAdaptiveTable(initSize int) AdaptiveTable {
return AdaptiveTable{
initSize: initSize,
maxSize: initSize,
threshold: initSize}
}
func NewAdaptiveTableComplete(initSize, maxSize, threshold int, relativePercentage bool) AdaptiveTable {
return AdaptiveTable{
initSize: initSize,
maxSize: maxSize,
threshold: threshold,
relativePercentage: relativePercentage,
}
}
func (at *AdaptiveTable) Size() int {
return len(at.values)
}
func (at *AdaptiveTable) IsEmpty() bool {
return len(at.values) == 0
}
func (at *AdaptiveTable) Min() uint64 {
return at.values[0]
}
func (at *AdaptiveTable) Max() uint64 {
lastIndex := len(at.values) - 1
return at.values[lastIndex]
}
func (at *AdaptiveTable) Pop() uint64 {
last := at.Max()
at.values = at.values[:len(at.values)-1]
return last
}
func (at *AdaptiveTable) Contains(value uint64) bool {
if len(at.values) == 0 || at.Max() < value {
return false
}
for i := at.Size() - 1; i >= 0; i-- {
if value == at.values[i] {
return true
} else if value > at.values[i] {
return false
}
}
return false
}
func (at *AdaptiveTable) IsNewRecord(value uint64) bool {
if (len(at.values) < at.maxSize || at.Max() > value) && !at.Contains(value) {
return true
}
return false
}
func (at *AdaptiveTable) currentThreshold() int {
if !at.relativePercentage {
return at.threshold
}
return int(float32(len(at.values)*at.threshold) / 100.00)
}
func (at *AdaptiveTable) Insert(value uint64) int {
if !at.IsNewRecord(value) {
return -1
}
at.values = append(at.values, value)
index := len(at.values) - 1
done := false
for done != true {
if index == 0 || at.values[index-1] < at.values[index] {
done = true
} else if at.values[index-1] > at.values[index] {
at.values[index-1], at.values[index] = at.values[index], at.values[index-1]
index--
} else {
done = true
}
}
ct := at.currentThreshold()
if index > ct || at.Size() > at.maxSize {
at.Pop()
}
return index
}
func (at *AdaptiveTable) Values() []uint64 {
return at.values
} | adaptive_table.go | 0.620622 | 0.587233 | adaptive_table.go | starcoder |
package plaid
import (
"encoding/json"
)
// WalletTransactionCounterpartyNumbers The counterparty's bank account numbers
type WalletTransactionCounterpartyNumbers struct {
Bacs WalletTransactionCounterpartyBACS `json:"bacs"`
}
// NewWalletTransactionCounterpartyNumbers instantiates a new WalletTransactionCounterpartyNumbers object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewWalletTransactionCounterpartyNumbers(bacs WalletTransactionCounterpartyBACS) *WalletTransactionCounterpartyNumbers {
this := WalletTransactionCounterpartyNumbers{}
this.Bacs = bacs
return &this
}
// NewWalletTransactionCounterpartyNumbersWithDefaults instantiates a new WalletTransactionCounterpartyNumbers object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewWalletTransactionCounterpartyNumbersWithDefaults() *WalletTransactionCounterpartyNumbers {
this := WalletTransactionCounterpartyNumbers{}
return &this
}
// GetBacs returns the Bacs field value
func (o *WalletTransactionCounterpartyNumbers) GetBacs() WalletTransactionCounterpartyBACS {
if o == nil {
var ret WalletTransactionCounterpartyBACS
return ret
}
return o.Bacs
}
// GetBacsOk returns a tuple with the Bacs field value
// and a boolean to check if the value has been set.
func (o *WalletTransactionCounterpartyNumbers) GetBacsOk() (*WalletTransactionCounterpartyBACS, bool) {
if o == nil {
return nil, false
}
return &o.Bacs, true
}
// SetBacs sets field value
func (o *WalletTransactionCounterpartyNumbers) SetBacs(v WalletTransactionCounterpartyBACS) {
o.Bacs = v
}
func (o WalletTransactionCounterpartyNumbers) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["bacs"] = o.Bacs
}
return json.Marshal(toSerialize)
}
type NullableWalletTransactionCounterpartyNumbers struct {
value *WalletTransactionCounterpartyNumbers
isSet bool
}
func (v NullableWalletTransactionCounterpartyNumbers) Get() *WalletTransactionCounterpartyNumbers {
return v.value
}
func (v *NullableWalletTransactionCounterpartyNumbers) Set(val *WalletTransactionCounterpartyNumbers) {
v.value = val
v.isSet = true
}
func (v NullableWalletTransactionCounterpartyNumbers) IsSet() bool {
return v.isSet
}
func (v *NullableWalletTransactionCounterpartyNumbers) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableWalletTransactionCounterpartyNumbers(val *WalletTransactionCounterpartyNumbers) *NullableWalletTransactionCounterpartyNumbers {
return &NullableWalletTransactionCounterpartyNumbers{value: val, isSet: true}
}
func (v NullableWalletTransactionCounterpartyNumbers) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableWalletTransactionCounterpartyNumbers) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | plaid/model_wallet_transaction_counterparty_numbers.go | 0.642769 | 0.408572 | model_wallet_transaction_counterparty_numbers.go | starcoder |
package graphs
import (
"errors"
"fmt"
"math"
"github.com/TectusDreamlab/go-common-utils/datastructure/shared"
"github.com/TectusDreamlab/go-common-utils/datastructure/trees"
)
// DirectedWeightedGraph defines a directed wegithed graph
type DirectedWeightedGraph struct {
DirectedGraph
adjacentEdges [][]DirectedWeightedEdge
weightTo []float64
edgeTo []*DirectedWeightedEdge
}
// NewDirectedWeightedGraph initalises a new directed weighted graph with vertexCount vertices.
func NewDirectedWeightedGraph(vertexCount int) *DirectedWeightedGraph {
return &DirectedWeightedGraph{
DirectedGraph{
UnDirectedGraph{vertexCount: vertexCount, adjacentVertices: make([][]int, vertexCount)},
make([]int, vertexCount), nil, nil, nil,
},
make([][]DirectedWeightedEdge, vertexCount),
make([]float64, vertexCount),
make([]*DirectedWeightedEdge, vertexCount),
}
}
// GetAdjacentVertices gets all adjacent vertices for a given vertex
func (d *DirectedWeightedGraph) GetAdjacentVertices(vertex int) ([]int, error) {
return d.DirectedGraph.GetAdjacentVertices(vertex)
}
// GetAdjacentEdges gets all adjacent edges for a given vertex
func (d *DirectedWeightedGraph) GetAdjacentEdges(vertex int) ([]DirectedWeightedEdge, error) {
if !d.isVertexValid(vertex) {
return nil, errors.New("vertex not found")
}
return d.adjacentEdges[vertex], nil
}
// AddEdge adds an edge to the directed weighted graph
func (d *DirectedWeightedGraph) AddEdge(from, to int, weight float64) error {
if !d.isVertexValid(from) || !d.isVertexValid(to) {
return errors.New("vertex not found")
}
d.DirectedGraph.AddEdge(from, to)
d.adjacentEdges[from] = append(d.adjacentEdges[from], DirectedWeightedEdge{from, to, weight})
return nil
}
// GetEdges prints all edges.
func (d *DirectedWeightedGraph) GetEdges() []DirectedWeightedEdge {
res := make([]DirectedWeightedEdge, 0)
for i := 0; i < d.vertexCount; i++ {
edges := d.adjacentEdges[i]
for _, edge := range edges {
res = append(res, edge)
}
}
return res
}
// Reverse reversees a directed graph, a.k.a revere all edges.
func (d *DirectedWeightedGraph) Reverse() (uv *DirectedWeightedGraph) {
uv = NewDirectedWeightedGraph(d.vertexCount)
for i := 0; i < d.vertexCount; i++ {
for _, adj := range d.adjacentEdges[i] {
uv.AddEdge(adj.GetTo(), adj.GetFrom(), adj.GetWeight())
}
}
return
}
// Print prints the graph.
func (d *DirectedWeightedGraph) Print() string {
res := ""
res += fmt.Sprintf("Vertex Count: %d, Edge Count: %d\n", d.vertexCount, d.edgeCount)
for vertex, adjacentEdges := range d.adjacentEdges {
res += fmt.Sprintf("Vertex %d: %v\n", vertex, adjacentEdges)
}
return res
}
// DijkstraShortestPath gets the shortest path (mimimum weight) from a start vertex to an end vertex
func (d *DirectedWeightedGraph) DijkstraShortestPath(s, v int) (path []DirectedWeightedEdge, distance float64, err error) {
if !d.isVertexValid(s) || !d.isVertexValid(v) {
err = errors.New("vertext not found")
return
}
d.edgeTo = make([]*DirectedWeightedEdge, d.vertexCount)
d.weightTo = make([]float64, d.vertexCount)
for i := range d.weightTo {
d.weightTo[i] = math.MaxFloat64
}
d.weightTo[s] = float64(0)
indexedPriorityQueue := trees.NewIndexedPriorityQueue(d.vertexCount, trees.HeapTypeMin, shared.Float64Comparator)
indexedPriorityQueue.Insert(s, d.weightTo[s])
for !indexedPriorityQueue.IsEmpty() {
var targetV int
targetV, _, err = indexedPriorityQueue.Pop()
if err != nil {
return
}
var adjs []DirectedWeightedEdge
adjs, err = d.GetAdjacentEdges(targetV)
if err != nil {
return
}
// relax all the edges of this vertex
for _, e := range adjs {
// s -> targetV -> w weight
w := e.GetTo()
weight := d.weightTo[targetV] + e.GetWeight()
if d.weightTo[w] > weight {
d.weightTo[w] = weight
d.edgeTo[w] = &e
if indexedPriorityQueue.Contains(w) {
indexedPriorityQueue.ChangeValue(w, weight)
} else {
indexedPriorityQueue.Insert(w, weight)
}
}
}
}
if d.weightTo[v] == math.MaxFloat64 {
err = errors.New("path not found")
return
}
distance = d.weightTo[v]
e := d.edgeTo[v]
for e != nil {
path = append([]DirectedWeightedEdge{*e}, path...)
e = d.edgeTo[e.GetFrom()]
}
return
} | datastructure/graphs/directed_weighted_graph.go | 0.707203 | 0.678681 | directed_weighted_graph.go | starcoder |
package test
import (
"testing"
"time"
"github.com/libp2p/go-libp2p-core/peer"
pstore "github.com/libp2p/go-libp2p-core/peerstore"
ma "github.com/multiformats/go-multiaddr"
"github.com/textileio/go-textile-core/thread"
tstore "github.com/textileio/go-textile-core/threadstore"
)
var addressBookSuite = map[string]func(book tstore.AddrBook) func(*testing.T){
"AddAddress": testAddAddress,
"Clear": testClearWorks,
"SetNegativeTTLClears": testSetNegativeTTLClears,
"UpdateTTLs": testUpdateTTLs,
"NilAddrsDontBreak": testNilAddrsDontBreak,
"AddressesExpire": testAddressesExpire,
"ClearWithIter": testClearWithIterator,
"LogsWithAddresses": testLogsWithAddrs,
"ThreadsWithAddresses": testThreadsFromddrs,
}
type AddrBookFactory func() (tstore.AddrBook, func())
func AddrBookTest(t *testing.T, factory AddrBookFactory) {
for name, test := range addressBookSuite {
// Create a new book.
ab, closeFunc := factory()
// Run the test.
t.Run(name, test(ab))
// Cleanup.
if closeFunc != nil {
closeFunc()
}
}
}
func testAddAddress(ab tstore.AddrBook) func(*testing.T) {
return func(t *testing.T) {
tid := thread.NewIDV1(thread.Raw, 24)
t.Run("add a single address", func(t *testing.T) {
id := GeneratePeerIDs(1)[0]
addrs := GenerateAddrs(1)
check(t, ab.AddAddr(tid, id, addrs[0], time.Hour))
AssertAddressesEqual(t, addrs, checkedAddrs(t, ab, tid, id))
})
t.Run("idempotent add single address", func(t *testing.T) {
id := GeneratePeerIDs(1)[0]
addrs := GenerateAddrs(1)
check(t, ab.AddAddr(tid, id, addrs[0], time.Hour))
check(t, ab.AddAddr(tid, id, addrs[0], time.Hour))
AssertAddressesEqual(t, addrs, checkedAddrs(t, ab, tid, id))
})
t.Run("add multiple addresses", func(t *testing.T) {
id := GeneratePeerIDs(1)[0]
addrs := GenerateAddrs(3)
check(t, ab.AddAddrs(tid, id, addrs, time.Hour))
AssertAddressesEqual(t, addrs, checkedAddrs(t, ab, tid, id))
})
t.Run("idempotent add multiple addresses", func(t *testing.T) {
id := GeneratePeerIDs(1)[0]
addrs := GenerateAddrs(3)
check(t, ab.AddAddrs(tid, id, addrs, time.Hour))
check(t, ab.AddAddrs(tid, id, addrs, time.Hour))
AssertAddressesEqual(t, addrs, checkedAddrs(t, ab, tid, id))
})
t.Run("adding an existing address with a later expiration extends its ttl", func(t *testing.T) {
id := GeneratePeerIDs(1)[0]
addrs := GenerateAddrs(3)
check(t, ab.AddAddrs(tid, id, addrs, time.Second))
// same address as before but with a higher TTL
check(t, ab.AddAddrs(tid, id, addrs[2:], time.Hour))
// after the initial TTL has expired, check that only the third address is present.
time.Sleep(1200 * time.Millisecond)
AssertAddressesEqual(t, addrs[2:], checkedAddrs(t, ab, tid, id))
// make sure we actually set the TTL
check(t, ab.UpdateAddrs(tid, id, time.Hour, 0))
AssertAddressesEqual(t, nil, checkedAddrs(t, ab, tid, id))
})
t.Run("adding an existing address with an earlier expiration never reduces the expiration", func(t *testing.T) {
id := GeneratePeerIDs(1)[0]
addrs := GenerateAddrs(3)
check(t, ab.AddAddrs(tid, id, addrs, time.Hour))
// same address as before but with a lower TTL
check(t, ab.AddAddrs(tid, id, addrs[2:], time.Second))
// after the initial TTL has expired, check that all three addresses are still present (i.e. the TTL on
// the modified one was not shortened).
time.Sleep(2100 * time.Millisecond)
AssertAddressesEqual(t, addrs, checkedAddrs(t, ab, tid, id))
})
t.Run("adding an existing address with an earlier expiration never reduces the TTL", func(t *testing.T) {
id := GeneratePeerIDs(1)[0]
addrs := GenerateAddrs(1)
check(t, ab.AddAddrs(tid, id, addrs, 4*time.Second))
// 4 seconds left
time.Sleep(3 * time.Second)
// 1 second left
check(t, ab.AddAddrs(tid, id, addrs, 3*time.Second))
// 3 seconds left
time.Sleep(2)
// 1 seconds left.
// We still have the address.
AssertAddressesEqual(t, addrs, checkedAddrs(t, ab, tid, id))
// The TTL wasn't reduced
check(t, ab.UpdateAddrs(tid, id, 4*time.Second, 0))
AssertAddressesEqual(t, nil, checkedAddrs(t, ab, tid, id))
})
}
}
func testClearWorks(ab tstore.AddrBook) func(t *testing.T) {
return func(t *testing.T) {
tid := thread.NewIDV1(thread.Raw, 24)
ids := GeneratePeerIDs(2)
addrs := GenerateAddrs(5)
check(t, ab.AddAddrs(tid, ids[0], addrs[0:3], time.Hour))
check(t, ab.AddAddrs(tid, ids[1], addrs[3:], time.Hour))
AssertAddressesEqual(t, addrs[0:3], checkedAddrs(t, ab, tid, ids[0]))
AssertAddressesEqual(t, addrs[3:], checkedAddrs(t, ab, tid, ids[1]))
check(t, ab.ClearAddrs(tid, ids[0]))
AssertAddressesEqual(t, nil, checkedAddrs(t, ab, tid, ids[0]))
AssertAddressesEqual(t, addrs[3:], checkedAddrs(t, ab, tid, ids[1]))
check(t, ab.ClearAddrs(tid, ids[1]))
AssertAddressesEqual(t, nil, checkedAddrs(t, ab, tid, ids[0]))
AssertAddressesEqual(t, nil, checkedAddrs(t, ab, tid, ids[1]))
}
}
func testSetNegativeTTLClears(ab tstore.AddrBook) func(t *testing.T) {
return func(t *testing.T) {
tid := thread.NewIDV1(thread.Raw, 24)
id := GeneratePeerIDs(1)[0]
addrs := GenerateAddrs(100)
check(t, ab.SetAddrs(tid, id, addrs, time.Hour))
AssertAddressesEqual(t, addrs, checkedAddrs(t, ab, tid, id))
// remove two addresses.
check(t, ab.SetAddr(tid, id, addrs[50], -1))
check(t, ab.SetAddr(tid, id, addrs[75], -1))
// calculate the survivors
survivors := append(addrs[0:50], addrs[51:]...)
survivors = append(survivors[0:74], survivors[75:]...)
AssertAddressesEqual(t, survivors, checkedAddrs(t, ab, tid, id))
}
}
func testUpdateTTLs(ab tstore.AddrBook) func(t *testing.T) {
return func(t *testing.T) {
tid := thread.NewIDV1(thread.Raw, 24)
t.Run("update ttl of log with no addrs", func(t *testing.T) {
id := GeneratePeerIDs(1)[0]
// Shouldn't panic.
check(t, ab.UpdateAddrs(tid, id, time.Hour, time.Minute))
})
t.Run("update ttls successfully", func(t *testing.T) {
ids := GeneratePeerIDs(2)
addrs1, addrs2 := GenerateAddrs(2), GenerateAddrs(2)
// set two keys with different ttls for each log.
check(t, ab.SetAddr(tid, ids[0], addrs1[0], time.Hour))
check(t, ab.SetAddr(tid, ids[0], addrs1[1], time.Minute))
check(t, ab.SetAddr(tid, ids[1], addrs2[0], time.Hour))
check(t, ab.SetAddr(tid, ids[1], addrs2[1], time.Minute))
// Sanity check.
AssertAddressesEqual(t, addrs1, checkedAddrs(t, ab, tid, ids[0]))
AssertAddressesEqual(t, addrs2, checkedAddrs(t, ab, tid, ids[1]))
// Will only affect addrs1[0].
// Badger does not support subsecond TTLs.
// https://github.com/dgraph-io/badger/issues/339
check(t, ab.UpdateAddrs(tid, ids[0], time.Hour, 1*time.Second))
// No immediate effect.
AssertAddressesEqual(t, addrs1, checkedAddrs(t, ab, tid, ids[0]))
AssertAddressesEqual(t, addrs2, checkedAddrs(t, ab, tid, ids[1]))
// After a wait, addrs[0] is gone.
time.Sleep(1500 * time.Millisecond)
AssertAddressesEqual(t, addrs1[1:2], checkedAddrs(t, ab, tid, ids[0]))
AssertAddressesEqual(t, addrs2, checkedAddrs(t, ab, tid, ids[1]))
// Will only affect addrs2[0].
check(t, ab.UpdateAddrs(tid, ids[1], time.Hour, 1*time.Second))
// No immediate effect.
AssertAddressesEqual(t, addrs1[1:2], checkedAddrs(t, ab, tid, ids[0]))
AssertAddressesEqual(t, addrs2, checkedAddrs(t, ab, tid, ids[1]))
time.Sleep(1500 * time.Millisecond)
// First addrs is gone in both.
AssertAddressesEqual(t, addrs1[1:], checkedAddrs(t, ab, tid, ids[0]))
AssertAddressesEqual(t, addrs2[1:], checkedAddrs(t, ab, tid, ids[1]))
})
}
}
func testNilAddrsDontBreak(ab tstore.AddrBook) func(t *testing.T) {
return func(t *testing.T) {
tid := thread.NewIDV1(thread.Raw, 24)
id := GeneratePeerIDs(1)[0]
check(t, ab.SetAddr(tid, id, nil, time.Hour))
check(t, ab.AddAddr(tid, id, nil, time.Hour))
}
}
func testAddressesExpire(ab tstore.AddrBook) func(t *testing.T) {
return func(t *testing.T) {
tid := thread.NewIDV1(thread.Raw, 24)
ids := GeneratePeerIDs(2)
addrs1 := GenerateAddrs(3)
addrs2 := GenerateAddrs(2)
check(t, ab.AddAddrs(tid, ids[0], addrs1, time.Hour))
check(t, ab.AddAddrs(tid, ids[1], addrs2, time.Hour))
AssertAddressesEqual(t, addrs1, checkedAddrs(t, ab, tid, ids[0]))
AssertAddressesEqual(t, addrs2, checkedAddrs(t, ab, tid, ids[1]))
check(t, ab.AddAddrs(tid, ids[0], addrs1, 2*time.Hour))
check(t, ab.AddAddrs(tid, ids[1], addrs2, 2*time.Hour))
AssertAddressesEqual(t, addrs1, checkedAddrs(t, ab, tid, ids[0]))
AssertAddressesEqual(t, addrs2, checkedAddrs(t, ab, tid, ids[1]))
check(t, ab.SetAddr(tid, ids[0], addrs1[0], 100*time.Microsecond))
<-time.After(100 * time.Millisecond)
AssertAddressesEqual(t, addrs1[1:3], checkedAddrs(t, ab, tid, ids[0]))
AssertAddressesEqual(t, addrs2, checkedAddrs(t, ab, tid, ids[1]))
check(t, ab.SetAddr(tid, ids[0], addrs1[2], 100*time.Microsecond))
<-time.After(100 * time.Millisecond)
AssertAddressesEqual(t, addrs1[1:2], checkedAddrs(t, ab, tid, ids[0]))
AssertAddressesEqual(t, addrs2, checkedAddrs(t, ab, tid, ids[1]))
check(t, ab.SetAddr(tid, ids[1], addrs2[0], 100*time.Microsecond))
<-time.After(100 * time.Millisecond)
AssertAddressesEqual(t, addrs1[1:2], checkedAddrs(t, ab, tid, ids[0]))
AssertAddressesEqual(t, addrs2[1:], checkedAddrs(t, ab, tid, ids[1]))
check(t, ab.SetAddr(tid, ids[1], addrs2[1], 100*time.Microsecond))
<-time.After(100 * time.Millisecond)
AssertAddressesEqual(t, addrs1[1:2], checkedAddrs(t, ab, tid, ids[0]))
AssertAddressesEqual(t, nil, checkedAddrs(t, ab, tid, ids[1]))
check(t, ab.SetAddr(tid, ids[0], addrs1[1], 100*time.Microsecond))
<-time.After(100 * time.Millisecond)
AssertAddressesEqual(t, nil, checkedAddrs(t, ab, tid, ids[0]))
AssertAddressesEqual(t, nil, checkedAddrs(t, ab, tid, ids[1]))
}
}
func testClearWithIterator(ab tstore.AddrBook) func(t *testing.T) {
return func(t *testing.T) {
tid := thread.NewIDV1(thread.Raw, 24)
ids := GeneratePeerIDs(2)
addrs := GenerateAddrs(100)
// Add the logs with 50 addresses each.
check(t, ab.AddAddrs(tid, ids[0], addrs[:50], pstore.PermanentAddrTTL))
check(t, ab.AddAddrs(tid, ids[1], addrs[50:], pstore.PermanentAddrTTL))
if all := append(checkedAddrs(t, ab, tid, ids[0]), checkedAddrs(t, ab, tid, ids[1])...); len(all) != 100 {
t.Fatal("expected tstore to contain both logs with all their maddrs")
}
// Since we don't fetch these logs, they won't be present in cache.
check(t, ab.ClearAddrs(tid, ids[0]))
if all := append(checkedAddrs(t, ab, tid, ids[0]), checkedAddrs(t, ab, tid, ids[1])...); len(all) != 50 {
t.Fatal("expected tstore to contain only addrs of log 2")
}
check(t, ab.ClearAddrs(tid, ids[1]))
if all := append(checkedAddrs(t, ab, tid, ids[0]), checkedAddrs(t, ab, tid, ids[1])...); len(all) != 0 {
t.Fatal("expected tstore to contain no addresses")
}
}
}
func testLogsWithAddrs(ab tstore.AddrBook) func(t *testing.T) {
return func(t *testing.T) {
tid := thread.NewIDV1(thread.Raw, 24)
// cannot run in parallel as the store is modified.
// go runs sequentially in the specified order
// see https://blog.golang.org/subtests
t.Run("empty addrbook", func(t *testing.T) {
if logs, err := ab.LogsWithAddrs(tid); err != nil || len(logs) != 0 {
t.Fatal("expected to find no logs without errors")
}
})
t.Run("non-empty addrbook", func(t *testing.T) {
ids := GeneratePeerIDs(2)
addrs := GenerateAddrs(10)
err := ab.AddAddrs(tid, ids[0], addrs[:5], pstore.PermanentAddrTTL)
check(t, err)
err = ab.AddAddrs(tid, ids[1], addrs[5:], pstore.PermanentAddrTTL)
check(t, err)
if logs, err := ab.LogsWithAddrs(tid); err != nil || len(logs) != 2 {
t.Fatal("expected to find 2 logs without errors")
}
})
}
}
func testThreadsFromddrs(ab tstore.AddrBook) func(t *testing.T) {
return func(t *testing.T) {
// cannot run in parallel as the store is modified.
// go runs sequentially in the specified order
// see https://blog.golang.org/subtests
t.Run("empty addrbook", func(t *testing.T) {
if logs, err := ab.ThreadsFromAddrs(); err != nil || len(logs) != 0 {
t.Fatal("expected to find no threads witout errors")
}
})
t.Run("non-empty addrbook", func(t *testing.T) {
tids := make([]thread.ID, 3)
for i := range tids {
tids[i] = thread.NewIDV1(thread.Raw, 24)
ids := GeneratePeerIDs(2)
addrs := GenerateAddrs(4)
err := ab.AddAddrs(tids[i], ids[0], addrs[:2], pstore.PermanentAddrTTL)
check(t, err)
err = ab.AddAddrs(tids[i], ids[1], addrs[2:], pstore.PermanentAddrTTL)
check(t, err)
}
if threads, err := ab.ThreadsFromAddrs(); err != nil || len(threads) != len(tids) {
t.Fatalf("expected to find %d threads without errors, got %d with err: %v", len(tids), len(threads), err)
}
})
}
}
func checkedAddrs(t *testing.T, ab tstore.AddrBook, tid thread.ID, id peer.ID) []ma.Multiaddr {
addrs, err := ab.Addrs(tid, id)
if err != nil {
t.Fatal("error when getting addresses")
}
return addrs
} | test/addr_book_suite.go | 0.513425 | 0.535463 | addr_book_suite.go | starcoder |
package plan
import "github.com/wolffcm/flux/values"
// EmptyBounds is a time range containing only a single point
var EmptyBounds = &Bounds{
Start: values.Time(0),
Stop: values.Time(0),
}
// Bounds is a range of time
type Bounds struct {
Start values.Time
Stop values.Time
}
// BoundsAwareProcedureSpec is any procedure
// that modifies the time bounds of its data.
type BoundsAwareProcedureSpec interface {
TimeBounds(predecessorBounds *Bounds) *Bounds
}
// ComputeBounds computes the time bounds for a
// plan node from the bounds of its predecessors.
func ComputeBounds(node Node) error {
var bounds *Bounds
for _, pred := range node.Predecessors() {
if pred.Bounds() != nil && bounds == nil {
bounds = pred.Bounds()
}
if pred.Bounds() != nil && bounds != nil {
bounds = bounds.Union(pred.Bounds())
}
}
if s, ok := node.ProcedureSpec().(BoundsAwareProcedureSpec); ok {
bounds = s.TimeBounds(bounds)
}
node.SetBounds(bounds)
return nil
}
// IsEmpty reports whether the given bounds contain at most a single point
func (b *Bounds) IsEmpty() bool {
return b.Start >= b.Stop
}
// Contains reports whether a given time is contained within the time range
func (b *Bounds) Contains(t values.Time) bool {
return t >= b.Start && t < b.Stop
}
// Overlaps reports whether two given bounds have overlapping time ranges
func (b *Bounds) Overlaps(o *Bounds) bool {
return b.Contains(o.Start) ||
(b.Contains(o.Stop) && o.Stop > b.Start) ||
o.Contains(b.Start)
}
// Union returns the smallest bounds which contain both input bounds.
// It returns empty bounds if one of the input bounds are empty.
func (b *Bounds) Union(o *Bounds) *Bounds {
if b.IsEmpty() || o.IsEmpty() {
return EmptyBounds
}
u := new(Bounds)
u.Start = b.Start
if o.Start < b.Start {
u.Start = o.Start
}
u.Stop = b.Stop
if o.Stop > b.Stop {
u.Stop = o.Stop
}
return u
}
// Intersect returns the intersection of two bounds.
// It returns empty bounds if one of the input bounds are empty.
func (b *Bounds) Intersect(o *Bounds) *Bounds {
if b.IsEmpty() || o.IsEmpty() || !b.Overlaps(o) {
return EmptyBounds
}
i := new(Bounds)
i.Start = b.Start
if o.Start > b.Start {
i.Start = o.Start
}
i.Stop = b.Stop
if o.Stop < b.Stop {
i.Stop = o.Stop
}
return i
}
// Shift moves the start and stop values of a time range by a specified duration
func (b *Bounds) Shift(d values.Duration) *Bounds {
return &Bounds{
Start: b.Start.Add(d),
Stop: b.Stop.Add(d),
}
} | plan/bounds.go | 0.845465 | 0.461866 | bounds.go | starcoder |
package dependencygraph2
import (
"context"
"fmt"
"math"
"github.com/google/gapid/gapis/api"
"github.com/google/gapid/gapis/capture"
)
// Node represents a node in the dependency graph, and holds data about the
// associated command or memory observation.
type Node interface {
dependencyNode()
}
// CmdNode is a dependency node corresponding to an API call
type CmdNode struct {
Index api.SubCmdIdx
CmdFlags api.CmdFlags
}
func (CmdNode) dependencyNode() {}
// ObsNode is a dependency node corresponding to a memory observation
type ObsNode struct {
CmdObservation api.CmdObservation
CmdID api.CmdID
IsWrite bool
Index int
}
func (ObsNode) dependencyNode() {}
type NodeAccesses struct {
FragmentAccesses []FragmentAccess
MemoryAccesses []MemoryAccess
ForwardAccesses []ForwardAccess
ParentNode NodeID
InitCmdNodes []NodeID
}
// Information about what sort of data to store in a dependency graph
type DependencyGraphConfig struct {
// MergeSubCmdNodes indicates whether the graph should have one node per
// command (true), or a separate node for each subcommand (false)
MergeSubCmdNodes bool
// IncludeInitialCommands indicates whether nodes should be created for
// the initial (state rebuild) commands
IncludeInitialCommands bool
// ReverseDependencies indicates whether reverse edges should be created
ReverseDependencies bool
SaveNodeAccesses bool
}
// NodeID identifies a node in a dependency graph
type NodeID uint32
const NodeNoID = NodeID(math.MaxUint32)
// NodeIDSorter is a structure to use for sorting NodeIDs in the sort package
type NodeIDSorter struct {
Nodes []NodeID
}
// Len returns the length of the node list
func (s *NodeIDSorter) Len() int {
return len(s.Nodes)
}
// Less returns trus if the elements at index i are less than j
func (s *NodeIDSorter) Less(i, j int) bool {
return s.Nodes[i] < s.Nodes[j]
}
// Swap swaps the locations of 2 nodes in the list
func (s *NodeIDSorter) Swap(i, j int) {
s.Nodes[i], s.Nodes[j] = s.Nodes[j], s.Nodes[i]
}
// DependencyGraph stores the dependencies among api calls and memory observations,
type DependencyGraph interface {
// NumNodes returns the number of nodes in the graph
NumNodes() int
// NumDependencies returns the number of dependencies (edges) in the graph
NumDependencies() uint64
// GetNode returns the node data associated with the given NodeID
GetNode(NodeID) Node
// GetNodeID returns the NodeID associated with given node data
GetCmdNodeID(api.CmdID, api.SubCmdIdx) NodeID
// GetCmdAncestorNodeIDs returns the NodeIDs associated with the ancestors of the
// given subcommand.
GetCmdAncestorNodeIDs(api.CmdID, api.SubCmdIdx) []NodeID
// ForeachCmd iterates over all API calls in the graph.
// If IncludeInitialCommands is true, this includes the initial commands
// which reconstruct the initial state.
// CmdIDs for initial commands are:
// CmdID(0).Derived(), CmdID(1).Derived(), ...
// Whether or not IncludeInitialCommands is true, the CmdIDs for captured
// commands are: 0, 1, 2, ...
ForeachCmd(ctx context.Context,
cb func(context.Context, api.CmdID, api.Cmd) error) error
// ForeachNode iterates over all nodes in the graph in chronological order.
// I.e., the following order:
// * For each initial command
// * Read observation nodes for this command
// * command node
// * Write observation nodes for this command
// * For each (non-initial) command
// * Read observation nodes for this command
// * command node
// * Write observation nodes for this command
ForeachNode(cb func(NodeID, Node) error) error
// ForeachDependency iterates over all pairs (src, tgt), where src depends on tgt
ForeachDependency(cb func(NodeID, NodeID) error) error
// ForeachDependencyFrom iterates over all the nodes tgt, where src depends on tgt
ForeachDependencyFrom(src NodeID, cb func(NodeID) error) error
// ForeachDependencyTo iterates over all the nodes src, where src depends on tgt.
// If Config().ReverseDependencies is false, this will return an error.
ForeachDependencyTo(tgt NodeID, cb func(NodeID) error) error
// Capture returns the capture whose dependencies are stored in this graph
Capture() *capture.GraphicsCapture
// GetUnopenedForwardDependencies returns the commands that have dependencies that
// are not part of the capture.
GetUnopenedForwardDependencies() []api.CmdID
// GetCommand returns the command identified by the given CmdID
GetCommand(api.CmdID) api.Cmd
// NumInitialCommands returns the number of initial commands
// (the commands needed to reconstruct the initial state before the
// first command in the capture)
NumInitialCommands() int
GetNodeAccesses(NodeID) NodeAccesses
// Config returns the config used to create this graph
Config() DependencyGraphConfig
}
type obsNodeIDs struct {
readNodeIDStart NodeID
writeNodeIDStart NodeID
}
type dependencyGraph struct {
capture *capture.GraphicsCapture
cmdNodeIDs *api.SubCmdIdxTrie
initialCommands []api.Cmd
nodes []Node
numDependencies uint64
dependenciesFrom [][]NodeID
dependenciesTo [][]NodeID
nodeAccesses []NodeAccesses
unopenedForwardDependencies []api.CmdID
stateRefs map[api.RefID]RefFrag
config DependencyGraphConfig
}
// newDependencyGraph constructs a new dependency graph
func newDependencyGraph(ctx context.Context, config DependencyGraphConfig,
c *capture.GraphicsCapture, initialCmds []api.Cmd, nodes []Node) *dependencyGraph {
g := &dependencyGraph{
capture: c,
cmdNodeIDs: new(api.SubCmdIdxTrie),
initialCommands: initialCmds,
config: config,
nodes: nodes,
dependenciesFrom: make([][]NodeID, len(nodes)),
}
for i, n := range nodes {
if c, ok := n.(CmdNode); ok {
g.cmdNodeIDs.SetValue(c.Index, (NodeID)(i))
}
}
return g
}
// NumNodes returns the number of nodes in the graph
func (g *dependencyGraph) NumNodes() int {
return len(g.nodes)
}
// NumDependencies returns the number of dependencies (edges) in the graph
func (g *dependencyGraph) NumDependencies() uint64 {
return g.numDependencies
}
// GetNode returns the node data associated with the given NodeID
func (g *dependencyGraph) GetNode(nodeID NodeID) Node {
if nodeID >= NodeID(len(g.nodes)) {
return nil
}
return g.nodes[nodeID]
}
// GetCmdNodeID returns the NodeID associated with a given (sub)command
func (g *dependencyGraph) GetCmdNodeID(cmdID api.CmdID, idx api.SubCmdIdx) NodeID {
fullIdx := append(api.SubCmdIdx{(uint64)(cmdID)}, idx...)
x := g.cmdNodeIDs.Value(fullIdx)
if x != nil {
return x.(NodeID)
}
return NodeNoID
}
// GetCmdAncestorNodeIDs returns the NodeIDs associated with the ancestors of the
// given subcommand.
func (g *dependencyGraph) GetCmdAncestorNodeIDs(cmdID api.CmdID, idx api.SubCmdIdx) []NodeID {
fullIdx := append(api.SubCmdIdx{(uint64)(cmdID)}, idx...)
values := g.cmdNodeIDs.Values(fullIdx)
nodeIDs := make([]NodeID, len(values))
for i, v := range values {
if v != nil {
nodeIDs[i] = v.(NodeID)
} else {
nodeIDs[i] = NodeNoID
}
}
return nodeIDs
}
// ForeachCmd iterates over all API calls in the graph.
// If IncludeInitialCommands is true, this includes the initial commands
// which reconstruct the initial state.
// CmdIDs for initial commands are:
// CmdID(0).Derived(), CmdID(1).Derived(), ...
// Whether or not IncludeInitialCommands is true, the CmdIDs for captured
// commands are: 0, 1, 2, ...
func (g *dependencyGraph) ForeachCmd(ctx context.Context, cb func(context.Context, api.CmdID, api.Cmd) error) error {
if g.config.IncludeInitialCommands {
cbDerived := func(ctx context.Context, cmdID api.CmdID, cmd api.Cmd) error {
return cb(ctx, cmdID.Derived(), cmd)
}
if err := api.ForeachCmd(ctx, g.initialCommands, cbDerived); err != nil {
return err
}
}
return api.ForeachCmd(ctx, g.capture.Commands, cb)
}
// ForeachNode iterates over all nodes in the graph
func (g *dependencyGraph) ForeachNode(cb func(NodeID, Node) error) error {
for i, node := range g.nodes {
err := cb(NodeID(i), node)
if err != nil {
return err
}
}
return nil
}
// ForeachDependency iterates over all pairs (src, tgt), where src depends on tgt
func (g *dependencyGraph) ForeachDependency(cb func(NodeID, NodeID) error) error {
for i, depsFrom := range g.dependenciesFrom {
src := NodeID(i)
for _, tgt := range depsFrom {
err := cb(src, tgt)
if err != nil {
return err
}
}
}
return nil
}
// ForeachDependencyFrom iterates over all the nodes tgt, where src depends on tgt
func (g *dependencyGraph) ForeachDependencyFrom(src NodeID, cb func(NodeID) error) error {
for _, tgt := range g.dependenciesFrom[src] {
err := cb(tgt)
if err != nil {
return err
}
}
return nil
}
// ForeachDependencyTo iterates over all the nodes src, where src depends on tgt.
// If Config().ReverseDependencies is false, this will return an error.
func (g *dependencyGraph) ForeachDependencyTo(tgt NodeID, cb func(NodeID) error) error {
if !g.Config().ReverseDependencies {
return fmt.Errorf("ForeachDependencyTo called on dependency graph with reverse dependencies disabled.")
}
for _, src := range g.dependenciesTo[tgt] {
err := cb(src)
if err != nil {
return err
}
}
return nil
}
// Capture returns the capture whose dependencies are stored in this graph
func (g *dependencyGraph) Capture() *capture.GraphicsCapture {
return g.capture
}
func (g *dependencyGraph) GetUnopenedForwardDependencies() []api.CmdID {
return g.unopenedForwardDependencies
}
// GetCommand returns the command identified by the given CmdID
func (g *dependencyGraph) GetCommand(cmdID api.CmdID) api.Cmd {
if cmdID.IsReal() {
if cmdID >= api.CmdID(len(g.capture.Commands)) {
return nil
}
return g.capture.Commands[cmdID]
} else {
cmdID = cmdID.Real()
if cmdID >= api.CmdID(len(g.initialCommands)) {
return nil
}
return g.initialCommands[cmdID]
}
}
// InitialCommands returns the initial commands, which
// reconstruct the initial state before the first command in the capture.
func (g *dependencyGraph) NumInitialCommands() int {
return len(g.initialCommands)
}
func (g *dependencyGraph) GetNodeAccesses(nodeID NodeID) NodeAccesses {
if nodeID < NodeID(len(g.nodeAccesses)) {
return g.nodeAccesses[nodeID]
} else {
return NodeAccesses{
ParentNode: NodeNoID,
}
}
}
// Config returns the config used to create this graph
func (g *dependencyGraph) Config() DependencyGraphConfig {
return g.config
}
func (g *dependencyGraph) addNode(node Node) NodeID {
nodeID := (NodeID)(len(g.nodes))
g.nodes = append(g.nodes, node)
g.dependenciesFrom = append(g.dependenciesFrom, []NodeID{})
if g.config.SaveNodeAccesses {
g.nodeAccesses = append(g.nodeAccesses, NodeAccesses{})
}
if cmdNode, ok := node.(CmdNode); ok {
g.cmdNodeIDs.SetValue(cmdNode.Index, nodeID)
}
return nodeID
}
func (g *dependencyGraph) setNodeAccesses(nodeID NodeID, acc NodeAccesses) {
if nodeID < NodeID(len(g.nodeAccesses)) {
g.nodeAccesses[nodeID] = acc
}
}
func (g *dependencyGraph) setDependencies(src NodeID, targets []NodeID) {
g.numDependencies -= (uint64)(len(g.dependenciesFrom[src]))
g.numDependencies += (uint64)(len(targets))
g.dependenciesFrom[src] = targets
}
func (g *dependencyGraph) addUnopenedForwardDependency(id api.CmdID) {
g.unopenedForwardDependencies = append(g.unopenedForwardDependencies, id)
}
func (g *dependencyGraph) setStateRefs(stateRefs map[api.RefID]RefFrag) {
if g.config.SaveNodeAccesses {
g.stateRefs = stateRefs
}
}
func (g *dependencyGraph) addDependency(src, tgt NodeID) {
deg := len(g.dependenciesFrom[src])
if deg > 0 {
last := g.dependenciesFrom[src][deg-1]
if last == tgt {
return
} else if last > tgt {
panic(fmt.Errorf("Dependency (%v,%v) added after (%v,%v)", src, tgt, src, g.dependenciesFrom[deg-1]))
}
}
if len(g.dependenciesFrom[src]) == cap(g.dependenciesFrom[src]) {
// This should not happen.
// addDependency is only called for forward dependencies, and
// sufficient capacity should have been allocated for all open
// forward dependencies.
// Re-allocating the slice could significantly impact performance.
// Panic so that we can fix this.
panic(fmt.Errorf("AddDependency: No remaining capacity (size %v)\n", len(g.dependenciesFrom[src])))
}
g.dependenciesFrom[src] = append(g.dependenciesFrom[src], tgt)
g.numDependencies++
}
func (g *dependencyGraph) buildDependenciesTo() {
degTo := make([]uint32, len(g.nodes))
for _, depsFrom := range g.dependenciesFrom {
for _, tgt := range depsFrom {
degTo[tgt]++
}
}
g.dependenciesTo = make([][]NodeID, len(g.nodes))
for tgt := range g.dependenciesTo {
g.dependenciesTo[tgt] = make([]NodeID, 0, degTo[tgt])
}
for src, depsFrom := range g.dependenciesFrom {
for _, tgt := range depsFrom {
g.dependenciesTo[tgt] = append(g.dependenciesTo[tgt], NodeID(src))
}
}
} | gapis/resolve/dependencygraph2/dependency_graph.go | 0.68437 | 0.426202 | dependency_graph.go | starcoder |
package materials
import (
"math"
"math/rand"
"github.com/go-gl/mathgl/mgl64"
"github.com/markzuber/zgotrace/raytrace"
"github.com/markzuber/zgotrace/raytrace/vectorextensions"
)
type DialectricMaterial struct {
refractionIndex float64
}
func NewDialectricMaterial(refractionIndex float64) raytrace.Material {
m := &DialectricMaterial{refractionIndex}
return m
}
func calculateSchlickApproximation(cosine float64, refractionIndex float64) float64 {
r0 := (1.0 - refractionIndex) / (1.0 + refractionIndex)
r0 *= r0
return r0 + ((1.0 - r0) * math.Pow(1.0-cosine, 5.0))
}
func (m *DialectricMaterial) Scatter(rayIn *raytrace.Ray, hitRecord *raytrace.HitRecord) *raytrace.ScatterResult {
var reflected = vectorextensions.Reflect(rayIn.Direction(), hitRecord.Normal())
var attenuation = raytrace.NewColorVector(1.0, 1.0, 1.0)
var niOverNt float64
var outwardNormal mgl64.Vec3
var cosine float64
if rayIn.Direction().Dot(hitRecord.Normal()) > 0.0 {
outwardNormal = vectorextensions.Invert(hitRecord.Normal())
niOverNt = m.refractionIndex
cosine = m.refractionIndex * rayIn.Direction().Dot(hitRecord.Normal()) / rayIn.Direction().Len()
} else {
outwardNormal = hitRecord.Normal()
niOverNt = 1.0 / m.refractionIndex
cosine = -rayIn.Direction().Dot(hitRecord.Normal()) / rayIn.Direction().Len()
}
var reflectProbability float64
var scattered *raytrace.Ray
var refracted = vectorextensions.Refract(rayIn.Direction(), outwardNormal, niOverNt)
if vectorextensions.IsVectorZero(refracted) {
scattered = raytrace.NewRay(hitRecord.P(), reflected)
reflectProbability = 1.0
} else {
reflectProbability = calculateSchlickApproximation(cosine, m.refractionIndex)
}
if rand.Float64() < reflectProbability {
scattered = raytrace.NewRay(hitRecord.P(), reflected)
} else {
scattered = raytrace.NewRay(hitRecord.P(), refracted)
}
return raytrace.NewScatterResult(true, attenuation, scattered, nil)
}
func (m *DialectricMaterial) ScatteringPdf(rayIn *raytrace.Ray, hitRecord *raytrace.HitRecord, scattered *raytrace.Ray) float64 {
return 0.0
}
func (m *DialectricMaterial) Emitted(rayIn *raytrace.Ray, hitRecord *raytrace.HitRecord, uvCoords mgl64.Vec2, p mgl64.Vec3) raytrace.ColorVector {
return raytrace.NewColorVector(0, 0, 0)
} | raytrace/materials/dialectricmaterial.go | 0.817283 | 0.451447 | dialectricmaterial.go | starcoder |
package ogórek
import (
"encoding/binary"
"fmt"
"io"
"math"
"reflect"
)
// An Encoder encodes Go data structures into pickle byte stream
type Encoder struct {
w io.Writer
}
// NewEncoder returns a new Encoder struct with default values
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{w: w}
}
// Encode writes the pickle encoding of v to w, the encoder's writer
func (e *Encoder) Encode(v interface{}) error {
rv := reflectValueOf(v)
e.encode(rv)
e.w.Write([]byte{opStop})
return nil
}
func (e *Encoder) encode(rv reflect.Value) error {
switch rk := rv.Kind(); rk {
case reflect.Bool:
e.encodeBool(rv.Bool())
case reflect.Int, reflect.Int8, reflect.Int64, reflect.Int32, reflect.Int16:
e.encodeInt(reflect.Int, rv.Int())
case reflect.Uint8, reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16:
e.encodeInt(reflect.Uint, int64(rv.Uint()))
case reflect.String:
e.encodeString(rv.String())
case reflect.Array, reflect.Slice:
if rv.Type().Elem().Kind() == reflect.Uint8 {
e.encodeBytes(rv.Bytes())
} else {
e.encodeArray(rv)
}
case reflect.Map:
e.encodeMap(rv)
case reflect.Struct:
e.encodeStruct(rv)
case reflect.Float32, reflect.Float64:
e.encodeFloat(float64(rv.Float()))
case reflect.Interface:
// recurse until we get a concrete type
// could be optmized into a tail call
var err error
err = e.encode(rv.Elem())
if err != nil {
return err
}
case reflect.Ptr:
if rv.Elem().Kind() == reflect.Struct {
switch rv.Elem().Interface().(type) {
case None:
e.encodeStruct(rv.Elem())
return nil
}
}
e.encode(rv.Elem())
case reflect.Invalid:
e.w.Write([]byte{opNone})
default:
panic(fmt.Sprintf("no support for type '%s'", rk.String()))
}
return nil
}
func (e *Encoder) encodeArray(arr reflect.Value) {
l := arr.Len()
e.w.Write([]byte{opEmptyList, opMark})
for i := 0; i < l; i++ {
v := arr.Index(i)
e.encode(v)
}
e.w.Write([]byte{opAppends})
}
func (e *Encoder) encodeBool(b bool) {
if b {
e.w.Write([]byte(opTrue))
} else {
e.w.Write([]byte(opFalse))
}
}
func (e *Encoder) encodeBytes(byt []byte) {
l := len(byt)
if l < 256 {
e.w.Write([]byte{opShortBinstring, byte(l)})
} else {
e.w.Write([]byte{opBinstring})
var b [4]byte
binary.LittleEndian.PutUint32(b[:], uint32(l))
e.w.Write(b[:])
}
e.w.Write(byt)
}
func (e *Encoder) encodeFloat(f float64) {
var u uint64
u = math.Float64bits(f)
e.w.Write([]byte{opBinfloat})
var b [8]byte
binary.BigEndian.PutUint64(b[:], uint64(u))
e.w.Write(b[:])
}
func (e *Encoder) encodeInt(k reflect.Kind, i int64) {
// FIXME: need support for 64-bit ints
switch {
case i > 0 && i < math.MaxUint8:
e.w.Write([]byte{opBinint1, byte(i)})
case i > 0 && i < math.MaxUint16:
e.w.Write([]byte{opBinint2, byte(i), byte(i >> 8)})
case i >= math.MinInt32 && i <= math.MaxInt32:
e.w.Write([]byte{opBinint})
var b [4]byte
binary.LittleEndian.PutUint32(b[:], uint32(i))
e.w.Write(b[:])
default: // int64, but as a string :/
e.w.Write([]byte{opInt})
fmt.Fprintf(e.w, "%d\n", i)
}
}
func (e *Encoder) encodeMap(m reflect.Value) {
keys := m.MapKeys()
l := len(keys)
e.w.Write([]byte{opEmptyDict})
if l > 0 {
e.w.Write([]byte{opMark})
for _, k := range keys {
e.encode(k)
v := m.MapIndex(k)
e.encode(v)
}
e.w.Write([]byte{opSetitems})
}
}
func (e *Encoder) encodeString(s string) {
e.encodeBytes([]byte(s))
}
func (e *Encoder) encodeStruct(st reflect.Value) {
typ := st.Type()
// first test if it's one of our internal python structs
if _, ok := st.Interface().(None); ok {
e.w.Write([]byte{opNone})
return
}
structTags := getStructTags(st)
e.w.Write([]byte{opEmptyDict, opMark})
if structTags != nil {
for f, i := range structTags {
e.encodeString(f)
e.encode(st.Field(i))
}
} else {
l := typ.NumField()
for i := 0; i < l; i++ {
fty := typ.Field(i)
if fty.PkgPath != "" {
continue // skip unexported names
}
e.encodeString(fty.Name)
e.encode(st.Field(i))
}
}
e.w.Write([]byte{opSetitems})
}
func reflectValueOf(v interface{}) reflect.Value {
rv, ok := v.(reflect.Value)
if !ok {
rv = reflect.ValueOf(v)
}
return rv
}
func getStructTags(ptr reflect.Value) map[string]int {
if ptr.Kind() != reflect.Struct {
return nil
}
m := make(map[string]int)
t := ptr.Type()
l := t.NumField()
numTags := 0
for i := 0; i < l; i++ {
field := t.Field(i).Tag.Get("pickle")
if field != "" {
m[field] = i
numTags++
}
}
if numTags == 0 {
return nil
}
return m
} | encode.go | 0.664214 | 0.446253 | encode.go | starcoder |
package flux
import "github.com/influxdata/flux/ast"
// File creates a new *ast.File.
func File(name string, imports []*ast.ImportDeclaration, body []ast.Statement) *ast.File {
return &ast.File{
Name: name,
Imports: imports,
Body: body,
}
}
// GreaterThan returns a greater than *ast.BinaryExpression.
func GreaterThan(lhs, rhs ast.Expression) *ast.BinaryExpression {
return &ast.BinaryExpression{
Operator: ast.GreaterThanOperator,
Left: lhs,
Right: rhs,
}
}
// LessThan returns a less than *ast.BinaryExpression.
func LessThan(lhs, rhs ast.Expression) *ast.BinaryExpression {
return &ast.BinaryExpression{
Operator: ast.LessThanOperator,
Left: lhs,
Right: rhs,
}
}
// Equal returns an equal to *ast.BinaryExpression.
func Equal(lhs, rhs ast.Expression) *ast.BinaryExpression {
return &ast.BinaryExpression{
Operator: ast.EqualOperator,
Left: lhs,
Right: rhs,
}
}
// Subtract returns a subtraction *ast.BinaryExpression.
func Subtract(lhs, rhs ast.Expression) *ast.BinaryExpression {
return &ast.BinaryExpression{
Operator: ast.SubtractionOperator,
Left: lhs,
Right: rhs,
}
}
// Add returns a addition *ast.BinaryExpression.
func Add(lhs, rhs ast.Expression) *ast.BinaryExpression {
return &ast.BinaryExpression{
Operator: ast.AdditionOperator,
Left: lhs,
Right: rhs,
}
}
// Member returns an *ast.MemberExpression where the key is p and the values is c.
func Member(p, c string) *ast.MemberExpression {
return &ast.MemberExpression{
Object: &ast.Identifier{Name: p},
Property: &ast.Identifier{Name: c},
}
}
// And returns an and *ast.LogicalExpression.
func And(lhs, rhs ast.Expression) *ast.LogicalExpression {
return &ast.LogicalExpression{
Operator: ast.AndOperator,
Left: lhs,
Right: rhs,
}
}
// Or returns an or *ast.LogicalExpression.
func Or(lhs, rhs ast.Expression) *ast.LogicalExpression {
return &ast.LogicalExpression{
Operator: ast.OrOperator,
Left: lhs,
Right: rhs,
}
}
// If returns an *ast.ConditionalExpression
func If(test, consequent, alternate ast.Expression) *ast.ConditionalExpression {
return &ast.ConditionalExpression{
Test: test,
Consequent: consequent,
Alternate: alternate,
}
}
// Pipe returns a *ast.PipeExpression that is a piped sequence of call expressions starting at base.
// It requires at least one call expression and will panic otherwise.
func Pipe(base ast.Expression, calls ...*ast.CallExpression) *ast.PipeExpression {
if len(calls) < 1 {
panic("must pipe forward to at least one *ast.CallExpression")
}
pe := appendPipe(base, calls[0])
for _, call := range calls[1:] {
pe = appendPipe(pe, call)
}
return pe
}
func appendPipe(base ast.Expression, next *ast.CallExpression) *ast.PipeExpression {
return &ast.PipeExpression{
Argument: base,
Call: next,
}
}
// Call returns a *ast.CallExpression that is a function call of fn with args.
func Call(fn ast.Expression, args *ast.ObjectExpression) *ast.CallExpression {
return &ast.CallExpression{
Callee: fn,
Arguments: []ast.Expression{
args,
},
}
}
// ExpressionStatement returns an *ast.ExpressionStagement of e.
func ExpressionStatement(e ast.Expression) *ast.ExpressionStatement {
return &ast.ExpressionStatement{Expression: e}
}
// Function returns an *ast.FunctionExpression with params with body b.
func Function(params []*ast.Property, b ast.Expression) *ast.FunctionExpression {
return &ast.FunctionExpression{
Params: params,
Body: b,
}
}
// FuncBlock takes a series of statements and produces a function.
func FuncBlock(params []*ast.Property, stms ...ast.Statement) *ast.FunctionExpression {
b := &ast.Block{
Body: stms,
}
return &ast.FunctionExpression{
Params: params,
Body: b,
}
}
// String returns an *ast.StringLiteral of s.
func String(s string) *ast.StringLiteral {
return &ast.StringLiteral{
Value: s,
}
}
// Bool returns an *ast.BooleanLiteral of b.
func Bool(b bool) *ast.BooleanLiteral {
return &ast.BooleanLiteral{
Value: b,
}
}
// Duration returns an *ast.DurationLiteral for a single duration.
func Duration(m int64, u string) *ast.DurationLiteral {
return &ast.DurationLiteral{
Values: []ast.Duration{
{
Magnitude: m,
Unit: u,
},
},
}
}
// Identifier returns an *ast.Identifier of i.
func Identifier(i string) *ast.Identifier {
return &ast.Identifier{Name: i}
}
// Float returns an *ast.FloatLiteral of f.
func Float(f float64) *ast.FloatLiteral {
return &ast.FloatLiteral{
Value: f,
}
}
// Integer returns an *ast.IntegerLiteral of i.
func Integer(i int64) *ast.IntegerLiteral {
return &ast.IntegerLiteral{
Value: i,
}
}
// Negative returns *ast.UnaryExpression for -(e).
func Negative(e ast.Expression) *ast.UnaryExpression {
return &ast.UnaryExpression{
Operator: ast.SubtractionOperator,
Argument: e,
}
}
// DefineVariable returns an *ast.VariableAssignment of id to the e. (e.g. id = <expression>)
func DefineVariable(id string, e ast.Expression) *ast.VariableAssignment {
return &ast.VariableAssignment{
ID: &ast.Identifier{
Name: id,
},
Init: e,
}
}
// DefineTaskOption returns an *ast.OptionStatement with the object provided. (e.g. option task = {...})
func DefineTaskOption(o *ast.ObjectExpression) *ast.OptionStatement {
return &ast.OptionStatement{
Assignment: DefineVariable("task", o),
}
}
// Property returns an *ast.Property of key to e. (e.g. key: <expression>)
func Property(key string, e ast.Expression) *ast.Property {
return &ast.Property{
Key: &ast.Identifier{
Name: key,
},
Value: e,
}
}
// Dictionary returns an *ast.Property of string key to value expression.
func Dictionary(key string, v ast.Expression) *ast.Property {
return &ast.Property{
Key: String(key),
Value: v,
}
}
// Object returns an *ast.ObjectExpression with properties ps.
func Object(ps ...*ast.Property) *ast.ObjectExpression {
return &ast.ObjectExpression{
Properties: ps,
}
}
// ObjectWith adds many properties to an existing named identifier.
func ObjectWith(name string, ps ...*ast.Property) *ast.ObjectExpression {
obj := Object(ps...)
obj.With = &ast.Identifier{
Name: name,
}
return obj
}
// Array returns *ast.ArrayExpression with elements es.
func Array(es ...ast.Expression) *ast.ArrayExpression {
return &ast.ArrayExpression{
Elements: es,
}
}
// FunctionParams returns a slice of *ast.Property for the parameters of a function.
func FunctionParams(args ...string) []*ast.Property {
var params []*ast.Property
for _, arg := range args {
params = append(params, &ast.Property{Key: &ast.Identifier{Name: arg}})
}
return params
}
// Imports returns a []*ast.ImportDeclaration for each package in pkgs.
func Imports(pkgs ...string) []*ast.ImportDeclaration {
var is []*ast.ImportDeclaration
for _, pkg := range pkgs {
is = append(is, ImportDeclaration(pkg))
}
return is
}
// ImportDeclaration returns an *ast.ImportDeclaration for pkg.
func ImportDeclaration(pkg string) *ast.ImportDeclaration {
return &ast.ImportDeclaration{
Path: &ast.StringLiteral{
Value: pkg,
},
}
} | notification/flux/ast.go | 0.865181 | 0.523847 | ast.go | starcoder |
package protocol
import (
"image/color"
)
const (
MapObjectTypeEntity = iota
MapObjectTypeBlock
)
// MapTrackedObject is an object on a map that is 'tracked' by the client, such as an entity or a block. This
// object may move, which is handled client-side.
type MapTrackedObject struct {
// Type is the type of the tracked object. It is either MapObjectTypeEntity or MapObjectTypeBlock.
Type int32
// EntityUniqueID is the unique ID of the entity, if the tracked object was an entity. It needs not to be
// filled out if Type is not MapObjectTypeEntity.
EntityUniqueID int64
// BlockPosition is the position of the block, if the tracked object was a block. It needs not to be
// filled out if Type is not MapObjectTypeBlock.
BlockPosition BlockPos
}
// MapDecoration is a fixed decoration on a map: Its position or other properties do not change automatically
// client-side.
type MapDecoration struct {
// Type is the type of the map decoration. The type specifies the shape (and sometimes the colour) that
// the map decoration gets.
Type byte
// Rotation is the rotation of the map decoration. It is byte due to the 16 fixed directions that the
// map decoration may face.
Rotation byte
// X is the offset on the X axis in pixels of the decoration.
X byte
// Y is the offset on the Y axis in pixels of the decoration.
Y byte
// Label is the name of the map decoration. This name may be of any value.
Label string
// Colour is the colour of the map decoration. Some map decoration types have a specific colour set
// automatically, whereas others may be changed.
Colour color.RGBA
}
// MapTrackedObj reads/writes a MapTrackedObject x using IO r.
func MapTrackedObj(r IO, x *MapTrackedObject) {
r.Int32(&x.Type)
switch x.Type {
case MapObjectTypeEntity:
r.Varint64(&x.EntityUniqueID)
case MapObjectTypeBlock:
r.UBlockPos(&x.BlockPosition)
default:
r.UnknownEnumOption(x.Type, "map tracked object type")
}
}
// MapDeco reads/writes a MapDecoration x using IO r.
func MapDeco(r IO, x *MapDecoration) {
r.Uint8(&x.Type)
r.Uint8(&x.Rotation)
r.Uint8(&x.X)
r.Uint8(&x.Y)
r.String(&x.Label)
r.VarRGBA(&x.Colour)
} | minecraft/protocol/map.go | 0.668339 | 0.411998 | map.go | starcoder |
package utils
import (
"math"
"time"
)
var (
K = math.Pow(10, 3)
M = math.Pow(10, 6)
G = math.Pow(10, 9)
T = math.Pow(10, 12)
)
func roundOffNearestTen(num float64, divisor float64) float64 {
x := num / divisor
return math.Round(x*10) / 10
}
func RoundValues(num1, num2 float64, inBytes bool) ([]float64, string) {
nums := []float64{}
var units string
var n float64
if num1 > num2 {
n = num1
} else {
n = num2
}
switch {
case n < K:
nums = append(nums, num1)
nums = append(nums, num2)
units = " "
case n < M:
nums = append(nums, roundOffNearestTen(num1, K))
nums = append(nums, roundOffNearestTen(num2, K))
units = " per thousand "
case n < G:
nums = append(nums, roundOffNearestTen(num1, M))
nums = append(nums, roundOffNearestTen(num2, M))
units = " per million "
case n < T:
nums = append(nums, roundOffNearestTen(num1, G))
nums = append(nums, roundOffNearestTen(num2, G))
units = " per trillion "
}
if inBytes {
switch units {
case " ":
units = " B "
case " per thousand ":
units = " kB "
case " per million ":
units = " mB "
case " per trillion ":
units = " gB "
}
}
return nums, units
}
func roundDownFloat(num float64) int {
return int(num + math.Copysign(0.5, num))
}
// Trim trims a float to the specified number of precision decimal digits.
func Trim(num float64, precision int) float64 {
output := math.Pow(10, float64(precision))
return float64(roundDownFloat(num*output)) / output
}
// GetInMB converts bytes to MB
func GetInMB(bytes uint64, precision int) float64 {
temp := float64(bytes) / 1000000
return Trim(temp, precision)
}
// GetDateFromUnix gets a date and time in RFC822 format from a unix epoch
func GetDateFromUnix(createTime int64) string {
t := time.Unix(createTime/1000, 0)
date := t.Format(time.RFC822)
return date
}
func RoundFloat(num float64, base string, precision int) float64 {
x := num
div := math.Pow10(precision)
switch base {
case "K":
x /= 1024
case "M":
x /= (1024 * 1024)
case "G":
x /= (1024 * 1024 * 1024)
}
return math.Round(x*div) / div
}
func RoundUint(num uint64, base string, precision int) float64 {
x := float64(num)
return RoundFloat(x, base, precision)
} | src/utils/dataFormat.go | 0.772874 | 0.440529 | dataFormat.go | starcoder |
package trie
// builder builds Succinct Trie.
type builder struct {
valueWidth uint32
totalCount int
// LOUDS-Sparse bitvecs, pooling
lsLabels [][]byte
lsHasChild [][]uint64
lsLoudsBits [][]uint64
// value
values [][]byte
valueCounts []uint32
// prefix
hasPrefix [][]uint64
prefixes [][][]byte
// suffix
hasSuffix [][]uint64
suffixes [][][]byte
nodeCounts []uint32
isLastItemTerminator []bool
// pooling data-structures
cachedLabel [][]byte
cachedUint64s [][]uint64
}
// NewBuilder returns a new Trie builder.
func NewBuilder() Builder {
return &builder{}
}
func (b *builder) Build(keys, vals [][]byte, valueWidth uint32) SuccinctTrie {
b.valueWidth = valueWidth
b.totalCount = len(keys)
b.buildNodes(keys, vals, 0, 0, 0)
tree := new(trie)
tree.Init(b)
return tree
}
// buildNodes is recursive algorithm to bulk building Trie nodes.
// * We divide keys into groups by the `key[depth]`, so keys in each group shares the same prefix
// * If depth larger than the length if the first key in group, the key is prefix of others in group
// So we should append `labelTerminator` to labels and update `b.isLastItemTerminator`, then remove it from group.
// * Scan over keys in current group when meets different label, use the new sub group call buildNodes with level+1 recursively
// * If all keys in current group have the same label, this node can be compressed, use this group call buildNodes with level recursively.
// * If current group contains only one key constract suffix of this key and return.
func (b *builder) buildNodes(keys, vals [][]byte, prefixDepth, depth, level int) {
b.ensureLevel(level)
nodeStartPos := b.numItems(level)
groupStart := 0
if depth >= len(keys[groupStart]) {
b.lsLabels[level] = append(b.lsLabels[level], labelTerminator)
b.isLastItemTerminator[level] = true
b.ensureLevel(level)
b.insertValue(vals[groupStart], level)
b.moveToNextItemSlot(level)
groupStart++
}
for groupEnd := groupStart; groupEnd <= len(keys); groupEnd++ {
if groupEnd < len(keys) && keys[groupStart][depth] == keys[groupEnd][depth] {
continue
}
if groupEnd == len(keys) && groupStart == 0 && groupEnd-groupStart != 1 {
// node at this level is one-way node, compress it to next node
b.buildNodes(keys, vals, prefixDepth, depth+1, level)
return
}
b.lsLabels[level] = append(b.lsLabels[level], keys[groupStart][depth])
b.moveToNextItemSlot(level)
if groupEnd-groupStart == 1 {
if depth+1 < len(keys[groupStart]) {
b.ensureLevel(level)
setBit(b.hasSuffix[level], b.numItems(level)-1)
b.suffixes[level] = append(b.suffixes[level], keys[groupStart][depth+1:])
}
b.insertValue(vals[groupStart], level)
} else {
setBit(b.lsHasChild[level], b.numItems(level)-1)
b.buildNodes(keys[groupStart:groupEnd], vals[groupStart:groupEnd], depth+1, depth+1, level+1)
}
groupStart = groupEnd
}
// check if current node contains compressed path.
if depth-prefixDepth > 0 {
prefix := keys[0][prefixDepth:depth]
b.insertPrefix(prefix, level)
}
setBit(b.lsLoudsBits[level], nodeStartPos)
b.nodeCounts[level]++
if b.nodeCounts[level]%wordSize == 0 {
b.hasPrefix[level] = append(b.hasPrefix[level], 0)
b.hasSuffix[level] = append(b.hasSuffix[level], 0)
}
}
func (b *builder) ensureLevel(level int) {
if level >= b.treeHeight() {
b.addLevel()
}
}
func (b *builder) treeHeight() int {
return len(b.nodeCounts)
}
func (b *builder) numItems(level int) uint32 {
return uint32(len(b.lsLabels[level]))
}
func (b *builder) addLevel() {
// cached
b.lsLabels = append(b.lsLabels, b.pickLabels())
b.lsHasChild = append(b.lsHasChild, b.pickUint64Slice())
b.lsLoudsBits = append(b.lsLoudsBits, b.pickUint64Slice())
b.hasPrefix = append(b.hasPrefix, b.pickUint64Slice())
b.hasSuffix = append(b.hasSuffix, b.pickUint64Slice())
b.values = append(b.values, []byte{})
b.valueCounts = append(b.valueCounts, 0)
b.prefixes = append(b.prefixes, [][]byte{})
b.suffixes = append(b.suffixes, [][]byte{})
b.nodeCounts = append(b.nodeCounts, 0)
b.isLastItemTerminator = append(b.isLastItemTerminator, false)
level := b.treeHeight() - 1
b.lsHasChild[level] = append(b.lsHasChild[level], 0)
b.lsLoudsBits[level] = append(b.lsLoudsBits[level], 0)
b.hasPrefix[level] = append(b.hasPrefix[level], 0)
b.hasSuffix[level] = append(b.hasSuffix[level], 0)
}
func (b *builder) moveToNextItemSlot(level int) {
if b.numItems(level)%wordSize == 0 {
b.hasSuffix[level] = append(b.hasSuffix[level], 0)
b.lsHasChild[level] = append(b.lsHasChild[level], 0)
b.lsLoudsBits[level] = append(b.lsLoudsBits[level], 0)
}
}
func (b *builder) insertValue(value []byte, level int) {
b.values[level] = append(b.values[level], value[:b.valueWidth]...)
b.valueCounts[level]++
}
func (b *builder) insertPrefix(prefix []byte, level int) {
setBit(b.hasPrefix[level], b.nodeCounts[level])
b.prefixes[level] = append(b.prefixes[level], prefix)
}
func (b *builder) Reset() {
b.valueWidth = 0
b.totalCount = 0
// cache lsLabels
for idx := range b.lsLabels {
b.cachedLabel = append(b.cachedLabel, b.lsLabels[idx][:0])
}
b.lsLabels = b.lsLabels[:0]
// cache lsHasChild
for idx := range b.lsHasChild {
b.cachedUint64s = append(b.cachedUint64s, b.lsHasChild[idx][:0])
}
b.lsHasChild = b.lsHasChild[:0]
// cache lsLoudsBits
for idx := range b.lsLoudsBits {
b.cachedUint64s = append(b.cachedUint64s, b.lsLoudsBits[idx][:0])
}
b.lsLoudsBits = b.lsLoudsBits[:0]
// reset values
b.values = b.values[:0]
b.valueCounts = b.valueCounts[:0]
// cache has prefix
for idx := range b.hasPrefix {
b.hasPrefix = append(b.hasPrefix, b.hasPrefix[idx][:0])
}
b.hasPrefix = b.hasPrefix[:0]
// cache has suffix
for idx := range b.hasSuffix {
b.hasSuffix = append(b.hasSuffix, b.hasSuffix[idx][:0])
}
b.hasSuffix = b.hasSuffix[:0]
// reset prefixes
b.hasPrefix = b.hasPrefix[:0]
b.prefixes = b.prefixes[:0]
// reset suffixes
b.hasSuffix = b.hasSuffix[:0]
b.suffixes = b.suffixes[:0]
// reset nodeCounts
b.nodeCounts = b.nodeCounts[:0]
b.isLastItemTerminator = b.isLastItemTerminator[:0]
}
func (b *builder) pickLabels() []byte {
if len(b.cachedLabel) == 0 {
return []byte{}
}
tailIndex := len(b.cachedLabel) - 1
ptr := b.cachedLabel[tailIndex]
b.cachedLabel = b.cachedLabel[:tailIndex]
return ptr
}
func (b *builder) pickUint64Slice() []uint64 {
if len(b.cachedUint64s) == 0 {
return []uint64{}
}
tailIndex := len(b.cachedUint64s) - 1
ptr := b.cachedUint64s[tailIndex]
b.cachedUint64s = b.cachedUint64s[:tailIndex]
return ptr
} | pkg/trie/builder.go | 0.719975 | 0.494751 | builder.go | starcoder |
package bitutils
import (
"encoding/binary"
"fmt"
)
// ParseByte4 parses 4 bits of data from the data array, starting at the given index
func ParseByte4(data []byte, bitStartIndex uint) (byte, error) {
startByte := bitStartIndex / 8
bitStartOffset := bitStartIndex % 8
if bitStartOffset < 5 {
if uint(len(data)) < (startByte + 1) {
return 0, fmt.Errorf("ParseByte4 expected 4 bits to start at bit %d, but the consent string was only %d bytes long", bitStartIndex, len(data))
}
return (data[startByte] & (0xf0 >> bitStartOffset)) >> (4 - bitStartOffset), nil
}
if uint(len(data)) < (startByte+2) && bitStartOffset > 4 {
return 0, fmt.Errorf("ParseByte4 expected 4 bits to start at bit %d, but the consent string was only %d bytes long (needs second byte)", bitStartIndex, len(data))
}
leftBits := (data[startByte] & (0xf0 >> bitStartOffset)) << (bitStartOffset - 4)
bitsConsumed := 8 - bitStartOffset
overflow := 4 - bitsConsumed
rightBits := (data[startByte+1] & (0xf0 << (4 - overflow))) >> (8 - overflow)
return leftBits | rightBits, nil
}
// ParseByte8 parses 8 bits of data from the data array, starting at the given index
func ParseByte8(data []byte, bitStartIndex uint) (byte, error) {
startByte := bitStartIndex / 8
bitStartOffset := bitStartIndex % 8
if bitStartOffset == 0 {
if uint(len(data)) < (startByte + 1) {
return 0, fmt.Errorf("ParseByte8 expected 8 bits to start at bit %d, but the consent string was only %d bytes long", bitStartIndex, len(data))
}
return data[startByte], nil
}
if uint(len(data)) < (startByte + 2) {
return 0, fmt.Errorf("ParseByte8 expected 8 bitst to start at bit %d, but the consent string was only %d bytes long", bitStartIndex, len(data))
}
leftBits := (data[startByte] & (0xff >> bitStartOffset)) << bitStartOffset
shiftComplement := 8 - bitStartOffset
rightBits := (data[startByte+1] & (0xff << shiftComplement)) >> shiftComplement
return leftBits | rightBits, nil
}
// ParseUInt12 parses 12 bits of data fromt the data array, starting at the given index
func ParseUInt12(data []byte, bitStartIndex uint) (uint16, error) {
end := bitStartIndex + 12
endByte := end / 8
endOffset := end % 8
if endOffset > 0 {
endByte++
}
if uint(len(data)) < endByte {
return 0, fmt.Errorf("ParseUInt12 expected a 12-bit int to start at bit %d, but the consent string was only %d bytes long",
bitStartIndex, len(data))
}
leftByte, err := ParseByte4(data, bitStartIndex)
if err != nil {
return 0, fmt.Errorf("ParseUInt12 error on left byte: %s", err)
}
rightByte, err := ParseByte8(data, bitStartIndex+4)
if err != nil {
return 0, fmt.Errorf("ParseUInt12 error on right byte: %s", err)
}
return binary.BigEndian.Uint16([]byte{leftByte, rightByte}), nil
}
// ParseUInt16 parses a 16-bit integer from the data array, starting at the given index
func ParseUInt16(data []byte, bitStartIndex uint) (uint16, error) {
startByte := bitStartIndex / 8
bitStartOffset := bitStartIndex % 8
if bitStartOffset == 0 {
if uint(len(data)) < (startByte + 2) {
return 0, fmt.Errorf("ParseUInt16 expected a 16-bit int to start at bit %d, but the consent string was only %d bytes long", bitStartIndex, len(data))
}
return binary.BigEndian.Uint16(data[startByte : startByte+2]), nil
}
if uint(len(data)) < (startByte + 3) {
return 0, fmt.Errorf("ParseUInt16 expected a 16-bit int to start at bit %d, but the consent string was only %d bytes long", bitStartIndex, len(data))
}
leftByte, err := ParseByte8(data, bitStartIndex)
if err != nil {
return 0, fmt.Errorf("ParseUInt16 error on left byte: %s", err)
}
rightByte, err := ParseByte8(data, bitStartIndex+8)
if err != nil {
return 0, fmt.Errorf("ParseUInt16 error on right byte: %s", err)
}
return binary.BigEndian.Uint16([]byte{leftByte, rightByte}), nil
} | bitutils/bitutils.go | 0.674801 | 0.53443 | bitutils.go | starcoder |
package main
import (
"fmt"
"sort"
)
type (
Point [3]float64
Face []int
Edge struct {
pn1 int // point number 1
pn2 int // point number 2
fn1 int // face number 1
fn2 int // face number 2
cp Point // center point
}
PointEx struct {
p Point
n int
}
)
func sumPoint(p1, p2 Point) Point {
sp := Point{}
for i := 0; i < 3; i++ {
sp[i] = p1[i] + p2[i]
}
return sp
}
func mulPoint(p Point, m float64) Point {
mp := Point{}
for i := 0; i < 3; i++ {
mp[i] = p[i] * m
}
return mp
}
func divPoint(p Point, d float64) Point {
return mulPoint(p, 1.0/d)
}
func centerPoint(p1, p2 Point) Point {
return divPoint(sumPoint(p1, p2), 2)
}
func getFacePoints(inputPoints []Point, inputFaces []Face) []Point {
facePoints := make([]Point, len(inputFaces))
for i, currFace := range inputFaces {
facePoint := Point{}
for _, cpi := range currFace {
currPoint := inputPoints[cpi]
facePoint = sumPoint(facePoint, currPoint)
}
facePoint = divPoint(facePoint, float64(len(currFace)))
facePoints[i] = facePoint
}
return facePoints
}
func getEdgesFaces(inputPoints []Point, inputFaces []Face) []Edge {
var edges [][3]int
for faceNum, face := range inputFaces {
numPoints := len(face)
for pointIndex := 0; pointIndex < numPoints; pointIndex++ {
pointNum1 := face[pointIndex]
var pointNum2 int
if pointIndex < numPoints-1 {
pointNum2 = face[pointIndex+1]
} else {
pointNum2 = face[0]
}
if pointNum1 > pointNum2 {
pointNum1, pointNum2 = pointNum2, pointNum1
}
edges = append(edges, [3]int{pointNum1, pointNum2, faceNum})
}
}
sort.Slice(edges, func(i, j int) bool {
if edges[i][0] == edges[j][0] {
if edges[i][1] == edges[j][1] {
return edges[i][2] < edges[j][2]
}
return edges[i][1] < edges[j][1]
}
return edges[i][0] < edges[j][0]
})
numEdges := len(edges)
eIndex := 0
var mergedEdges [][4]int
for eIndex < numEdges {
e1 := edges[eIndex]
if eIndex < numEdges-1 {
e2 := edges[eIndex+1]
if e1[0] == e2[0] && e1[1] == e2[1] {
mergedEdges = append(mergedEdges, [4]int{e1[0], e1[1], e1[2], e2[2]})
eIndex += 2
} else {
mergedEdges = append(mergedEdges, [4]int{e1[0], e1[1], e1[2], -1})
eIndex++
}
} else {
mergedEdges = append(mergedEdges, [4]int{e1[0], e1[1], e1[2], -1})
eIndex++
}
}
var edgesCenters []Edge
for _, me := range mergedEdges {
p1 := inputPoints[me[0]]
p2 := inputPoints[me[1]]
cp := centerPoint(p1, p2)
edgesCenters = append(edgesCenters, Edge{me[0], me[1], me[2], me[3], cp})
}
return edgesCenters
}
func getEdgePoints(inputPoints []Point, edgesFaces []Edge, facePoints []Point) []Point {
edgePoints := make([]Point, len(edgesFaces))
for i, edge := range edgesFaces {
cp := edge.cp
fp1 := facePoints[edge.fn1]
var fp2 Point
if edge.fn2 == -1 {
fp2 = fp1
} else {
fp2 = facePoints[edge.fn2]
}
cfp := centerPoint(fp1, fp2)
edgePoints[i] = centerPoint(cp, cfp)
}
return edgePoints
}
func getAvgFacePoints(inputPoints []Point, inputFaces []Face, facePoints []Point) []Point {
numPoints := len(inputPoints)
tempPoints := make([]PointEx, numPoints)
for faceNum := range inputFaces {
fp := facePoints[faceNum]
for _, pointNum := range inputFaces[faceNum] {
tp := tempPoints[pointNum].p
tempPoints[pointNum].p = sumPoint(tp, fp)
tempPoints[pointNum].n++
}
}
avgFacePoints := make([]Point, numPoints)
for i, tp := range tempPoints {
avgFacePoints[i] = divPoint(tp.p, float64(tp.n))
}
return avgFacePoints
}
func getAvgMidEdges(inputPoints []Point, edgesFaces []Edge) []Point {
numPoints := len(inputPoints)
tempPoints := make([]PointEx, numPoints)
for _, edge := range edgesFaces {
cp := edge.cp
for _, pointNum := range []int{edge.pn1, edge.pn2} {
tp := tempPoints[pointNum].p
tempPoints[pointNum].p = sumPoint(tp, cp)
tempPoints[pointNum].n++
}
}
avgMidEdges := make([]Point, len(tempPoints))
for i, tp := range tempPoints {
avgMidEdges[i] = divPoint(tp.p, float64(tp.n))
}
return avgMidEdges
}
func getPointsFaces(inputPoints []Point, inputFaces []Face) []int {
numPoints := len(inputPoints)
pointsFaces := make([]int, numPoints)
for faceNum := range inputFaces {
for _, pointNum := range inputFaces[faceNum] {
pointsFaces[pointNum]++
}
}
return pointsFaces
}
func getNewPoints(inputPoints []Point, pointsFaces []int, avgFacePoints, avgMidEdges []Point) []Point {
newPoints := make([]Point, len(inputPoints))
for pointNum := range inputPoints {
n := float64(pointsFaces[pointNum])
m1, m2, m3 := (n-3)/n, 1.0/n, 2.0/n
oldCoords := inputPoints[pointNum]
p1 := mulPoint(oldCoords, m1)
afp := avgFacePoints[pointNum]
p2 := mulPoint(afp, m2)
ame := avgMidEdges[pointNum]
p3 := mulPoint(ame, m3)
p4 := sumPoint(p1, p2)
newPoints[pointNum] = sumPoint(p4, p3)
}
return newPoints
}
func switchNums(pointNums [2]int) [2]int {
if pointNums[0] < pointNums[1] {
return pointNums
}
return [2]int{pointNums[1], pointNums[0]}
}
func cmcSubdiv(inputPoints []Point, inputFaces []Face) ([]Point, []Face) {
facePoints := getFacePoints(inputPoints, inputFaces)
edgesFaces := getEdgesFaces(inputPoints, inputFaces)
edgePoints := getEdgePoints(inputPoints, edgesFaces, facePoints)
avgFacePoints := getAvgFacePoints(inputPoints, inputFaces, facePoints)
avgMidEdges := getAvgMidEdges(inputPoints, edgesFaces)
pointsFaces := getPointsFaces(inputPoints, inputFaces)
newPoints := getNewPoints(inputPoints, pointsFaces, avgFacePoints, avgMidEdges)
var facePointNums []int
nextPointNum := len(newPoints)
for _, facePoint := range facePoints {
newPoints = append(newPoints, facePoint)
facePointNums = append(facePointNums, nextPointNum)
nextPointNum++
}
edgePointNums := make(map[[2]int]int)
for edgeNum := range edgesFaces {
pointNum1 := edgesFaces[edgeNum].pn1
pointNum2 := edgesFaces[edgeNum].pn2
edgePoint := edgePoints[edgeNum]
newPoints = append(newPoints, edgePoint)
edgePointNums[[2]int{pointNum1, pointNum2}] = nextPointNum
nextPointNum++
}
var newFaces []Face
for oldFaceNum, oldFace := range inputFaces {
if len(oldFace) == 4 {
a, b, c, d := oldFace[0], oldFace[1], oldFace[2], oldFace[3]
facePointAbcd := facePointNums[oldFaceNum]
edgePointAb := edgePointNums[switchNums([2]int{a, b})]
edgePointDa := edgePointNums[switchNums([2]int{d, a})]
edgePointBc := edgePointNums[switchNums([2]int{b, c})]
edgePointCd := edgePointNums[switchNums([2]int{c, d})]
newFaces = append(newFaces, Face{a, edgePointAb, facePointAbcd, edgePointDa})
newFaces = append(newFaces, Face{b, edgePointBc, facePointAbcd, edgePointAb})
newFaces = append(newFaces, Face{c, edgePointCd, facePointAbcd, edgePointBc})
newFaces = append(newFaces, Face{d, edgePointDa, facePointAbcd, edgePointCd})
}
}
return newPoints, newFaces
}
func main() {
inputPoints := []Point{
{-1.0, 1.0, 1.0},
{-1.0, -1.0, 1.0},
{1.0, -1.0, 1.0},
{1.0, 1.0, 1.0},
{1.0, -1.0, -1.0},
{1.0, 1.0, -1.0},
{-1.0, -1.0, -1.0},
{-1.0, 1.0, -1.0},
}
inputFaces := []Face{
{0, 1, 2, 3},
{3, 2, 4, 5},
{5, 4, 6, 7},
{7, 0, 3, 5},
{7, 6, 1, 0},
{6, 1, 2, 4},
}
outputPoints := make([]Point, len(inputPoints))
outputFaces := make([]Face, len(inputFaces))
copy(outputPoints, inputPoints)
copy(outputFaces, inputFaces)
iterations := 1
for i := 0; i < iterations; i++ {
outputPoints, outputFaces = cmcSubdiv(outputPoints, outputFaces)
}
for _, p := range outputPoints {
fmt.Printf("% .4f\n", p)
}
fmt.Println()
for _, f := range outputFaces {
fmt.Printf("%2d\n", f)
}
} | lang/Go/catmull-clark-subdivision-surface.go | 0.599954 | 0.700479 | catmull-clark-subdivision-surface.go | starcoder |
package df
import (
"math/big"
"fmt"
)
// SquareProver proves that the commitment hides the square. Given c,
// prove that c = g^(x^2) * h^r (mod n).
type SquareProver struct {
*EqualityProver
// We have two commitments with the same value: SmallCommitment = g^x * h^r1 and
// c = SmallCommitment^x * h^r2. Also c = g^(x^2) * h^r.
SmallCommitment *big.Int
}
func NewSquareProver(committer *Committer,
x *big.Int, challengeSpaceSize int) (*SquareProver, error) {
// Input committer contains c = g^(x^2) * h^r (mod n).
// We now create two committers - committer1 will contain SmallCommitment = g^x * h^r1 (mod n),
// committer2 will contain the same c as committer, but using a different
// base c = SmallCommitment^x * h^r2.
// Note that c = SmallCommitment^x * h^r2 = g^(x^2) * h^(r1*x) * h^r2, so we choose r2 = r - r1*x.
// SquareProver proves that committer1 and committer2 hide the same value (x) -
// using EqualityProver.
committer1 := NewCommitter(committer.QRSpecialRSA.N,
committer.G, committer.H, committer.T, committer.K)
smallCommitment, err := committer1.GetCommitMsg(x)
if err != nil {
return nil, fmt.Errorf("error when creating commit msg")
}
committer2 := NewCommitter(committer.QRSpecialRSA.N,
smallCommitment, committer.H, committer.T, committer.K)
_, r := committer.GetDecommitMsg()
_, r1 := committer1.GetDecommitMsg()
r1x := new(big.Int).Mul(r1, x)
r2 := new(big.Int).Sub(r, r1x)
// we already know the commitment (it is c), so we ignore the first variable -
// just need to set the committer2 committedValue and r:
_, err = committer2.GetCommitMsgWithGivenR(x, r2)
if err != nil {
return nil, fmt.Errorf("error when creating commit msg with given r")
}
prover := NewEqualityProver(committer1, committer2, challengeSpaceSize)
return &SquareProver{
EqualityProver: prover,
SmallCommitment: smallCommitment,
}, nil
}
type SquareVerifier struct {
*EqualityVerifier
}
func NewSquareVerifier(receiver *Receiver,
c1 *big.Int, challengeSpaceSize int) (*SquareVerifier, error) {
receiver1, err := NewReceiverFromParams(receiver.QRSpecialRSA.GetPrimes(),
receiver.G, receiver.H, receiver.K)
if err != nil {
return nil, fmt.Errorf("error when calling NewReceiverFromParams")
}
receiver1.SetCommitment(c1)
receiver2, err := NewReceiverFromParams(receiver.QRSpecialRSA.GetPrimes(),
c1, receiver.H, receiver.K)
if err != nil {
return nil, fmt.Errorf("error when calling NewReceiverFromParams")
}
receiver2.SetCommitment(receiver.Commitment)
verifier := NewEqualityVerifier(receiver1, receiver2, challengeSpaceSize)
return &SquareVerifier{
verifier,
}, nil
} | df/square_commitment.go | 0.678007 | 0.433921 | square_commitment.go | starcoder |
package util
import (
"math"
)
const (
_pi = math.Pi
_2pi = 2 * math.Pi
_3pi4 = (3 * math.Pi) / 4.0
_4pi3 = (4 * math.Pi) / 3.0
_3pi2 = (3 * math.Pi) / 2.0
_5pi4 = (5 * math.Pi) / 4.0
_7pi4 = (7 * math.Pi) / 4.0
_pi2 = math.Pi / 2.0
_pi4 = math.Pi / 4.0
_d2r = (math.Pi / 180.0)
_r2d = (180.0 / math.Pi)
)
var (
// Math contains helper methods for common math operations.
Math = &mathUtil{}
)
type mathUtil struct{}
// Max returns the maximum value of a group of floats.
func (m mathUtil) Max(values ...float64) float64 {
if len(values) == 0 {
return 0
}
max := values[0]
for _, v := range values {
if max < v {
max = v
}
}
return max
}
// MinAndMax returns both the min and max in one pass.
func (m mathUtil) MinAndMax(values ...float64) (min float64, max float64) {
if len(values) == 0 {
return
}
min = values[0]
max = values[0]
for _, v := range values[1:] {
if max < v {
max = v
}
if min > v {
min = v
}
}
return
}
// GetRoundToForDelta returns a `roundTo` value for a given delta.
func (m mathUtil) GetRoundToForDelta(delta float64) float64 {
startingDeltaBound := math.Pow(10.0, 10.0)
for cursor := startingDeltaBound; cursor > 0; cursor /= 10.0 {
if delta > cursor {
return cursor / 10.0
}
}
return 0.0
}
// RoundUp rounds up to a given roundTo value.
func (m mathUtil) RoundUp(value, roundTo float64) float64 {
if roundTo < 0.000000000000001 {
return value
}
d1 := math.Ceil(value / roundTo)
return d1 * roundTo
}
// RoundDown rounds down to a given roundTo value.
func (m mathUtil) RoundDown(value, roundTo float64) float64 {
if roundTo < 0.000000000000001 {
return value
}
d1 := math.Floor(value / roundTo)
return d1 * roundTo
}
// Normalize returns a set of numbers on the interval [0,1] for a given set of inputs.
// An example: 4,3,2,1 => 0.4, 0.3, 0.2, 0.1
// Caveat; the total may be < 1.0; there are going to be issues with irrational numbers etc.
func (m mathUtil) Normalize(values ...float64) []float64 {
var total float64
for _, v := range values {
total += v
}
output := make([]float64, len(values))
for x, v := range values {
output[x] = m.RoundDown(v/total, 0.0001)
}
return output
}
// MinInt returns the minimum of a set of integers.
func (m mathUtil) MinInt(values ...int) int {
min := math.MaxInt32
for _, v := range values {
if v < min {
min = v
}
}
return min
}
// MaxInt returns the maximum of a set of integers.
func (m mathUtil) MaxInt(values ...int) int {
max := math.MinInt32
for _, v := range values {
if v > max {
max = v
}
}
return max
}
// AbsInt returns the absolute value of an integer.
func (m mathUtil) AbsInt(value int) int {
if value < 0 {
return -value
}
return value
}
// AbsInt64 returns the absolute value of a long.
func (m mathUtil) AbsInt64(value int64) int64 {
if value < 0 {
return -value
}
return value
}
// Mean returns the mean of a set of values
func (m mathUtil) Mean(values ...float64) float64 {
return m.Sum(values...) / float64(len(values))
}
// MeanInt returns the mean of a set of integer values.
func (m mathUtil) MeanInt(values ...int) int {
return m.SumInt(values...) / len(values)
}
// Sum sums a set of values.
func (m mathUtil) Sum(values ...float64) float64 {
var total float64
for _, v := range values {
total += v
}
return total
}
// SumInt sums a set of values.
func (m mathUtil) SumInt(values ...int) int {
var total int
for _, v := range values {
total += v
}
return total
}
// PercentDifference computes the percentage difference between two values.
// The formula is (v2-v1)/v1.
func (m mathUtil) PercentDifference(v1, v2 float64) float64 {
if v1 == 0 {
return 0
}
return (v2 - v1) / v1
}
// DegreesToRadians returns degrees as radians.
func (m mathUtil) DegreesToRadians(degrees float64) float64 {
return degrees * _d2r
}
// RadiansToDegrees translates a radian value to a degree value.
func (m mathUtil) RadiansToDegrees(value float64) float64 {
return math.Mod(value, _2pi) * _r2d
}
// PercentToRadians converts a normalized value (0,1) to radians.
func (m mathUtil) PercentToRadians(pct float64) float64 {
return m.DegreesToRadians(360.0 * pct)
}
// RadianAdd adds a delta to a base in radians.
func (m mathUtil) RadianAdd(base, delta float64) float64 {
value := base + delta
if value > _2pi {
return math.Mod(value, _2pi)
} else if value < 0 {
return math.Mod(_2pi+value, _2pi)
}
return value
}
// DegreesAdd adds a delta to a base in radians.
func (m mathUtil) DegreesAdd(baseDegrees, deltaDegrees float64) float64 {
value := baseDegrees + deltaDegrees
if value > _2pi {
return math.Mod(value, 360.0)
} else if value < 0 {
return math.Mod(360.0+value, 360.0)
}
return value
}
// DegreesToCompass returns the degree value in compass / clock orientation.
func (m mathUtil) DegreesToCompass(deg float64) float64 {
return m.DegreesAdd(deg, -90.0)
}
// CirclePoint returns the absolute position of a circle diameter point given
// by the radius and the theta.
func (m mathUtil) CirclePoint(cx, cy int, radius, thetaRadians float64) (x, y int) {
x = cx + int(radius*math.Sin(thetaRadians))
y = cy - int(radius*math.Cos(thetaRadians))
return
}
func (m mathUtil) RotateCoordinate(cx, cy, x, y int, thetaRadians float64) (rx, ry int) {
tempX, tempY := float64(x-cx), float64(y-cy)
rotatedX := tempX*math.Cos(thetaRadians) - tempY*math.Sin(thetaRadians)
rotatedY := tempX*math.Sin(thetaRadians) + tempY*math.Cos(thetaRadians)
rx = int(rotatedX) + cx
ry = int(rotatedY) + cy
return
} | util/math.go | 0.839635 | 0.525551 | math.go | starcoder |
package expr
import (
"encoding/binary"
"errors"
"fmt"
"strings"
"github.com/genjidb/genji/document"
)
// Functions represents a map of builtin SQL functions.
type Functions struct {
m map[string]func(args ...Expr) (Expr, error)
}
// BuiltinFunctions returns default map of builtin functions.
func BuiltinFunctions() map[string]func(args ...Expr) (Expr, error) {
return map[string]func(args ...Expr) (Expr, error){
"pk": func(args ...Expr) (Expr, error) {
if len(args) != 0 {
return nil, fmt.Errorf("pk() takes no arguments")
}
return new(PKFunc), nil
},
"count": func(args ...Expr) (Expr, error) {
if len(args) != 1 {
return nil, fmt.Errorf("COUNT() takes 1 argument")
}
return &CountFunc{Expr: args[0]}, nil
},
"min": func(args ...Expr) (Expr, error) {
if len(args) != 1 {
return nil, fmt.Errorf("MIN() takes 1 argument")
}
return &MinFunc{Expr: args[0]}, nil
},
"max": func(args ...Expr) (Expr, error) {
if len(args) != 1 {
return nil, fmt.Errorf("MAX() takes 1 argument")
}
return &MaxFunc{Expr: args[0]}, nil
},
"sum": func(args ...Expr) (Expr, error) {
if len(args) != 1 {
return nil, fmt.Errorf("SUM() takes 1 argument")
}
return &SumFunc{Expr: args[0]}, nil
},
"avg": func(args ...Expr) (Expr, error) {
if len(args) != 1 {
return nil, fmt.Errorf("AVG() takes 1 argument")
}
return &AvgFunc{Expr: args[0]}, nil
},
}
}
func NewFunctions() Functions {
return Functions{
m: BuiltinFunctions(),
}
}
// AddFunc adds function to the map.
func (f Functions) AddFunc(name string, fn func(args ...Expr) (Expr, error)) {
f.m[name] = fn
}
// GetFunc return a function expression by name.
func (f Functions) GetFunc(name string, args ...Expr) (Expr, error) {
fn, ok := f.m[strings.ToLower(name)]
if !ok {
return nil, fmt.Errorf("no such function: %q", name)
}
return fn(args...)
}
// PKFunc represents the pk() function.
// It returns the primary key of the current document.
type PKFunc struct{}
// Eval returns the primary key of the current document.
func (k PKFunc) Eval(ctx EvalStack) (document.Value, error) {
if ctx.Info == nil {
return document.Value{}, errors.New("no table specified")
}
pk := ctx.Info.GetPrimaryKey()
if pk != nil {
return pk.Path.GetValue(ctx.Document)
}
i, _ := binary.Uvarint(ctx.Document.(document.Keyer).Key())
return document.NewIntegerValue(int64(i)), nil
}
// IsEqual compares this expression with the other expression and returns
// true if they are equal.
func (k PKFunc) IsEqual(other Expr) bool {
_, ok := other.(PKFunc)
return ok
}
func (k PKFunc) String() string {
return "pk()"
}
// CastFunc represents the CAST expression.
type CastFunc struct {
Expr Expr
CastAs document.ValueType
}
// Eval returns the primary key of the current document.
func (c CastFunc) Eval(ctx EvalStack) (document.Value, error) {
v, err := c.Expr.Eval(ctx)
if err != nil {
return v, err
}
return v.CastAs(c.CastAs)
}
// IsEqual compares this expression with the other expression and returns
// true if they are equal.
func (c CastFunc) IsEqual(other Expr) bool {
if other == nil {
return false
}
o, ok := other.(CastFunc)
if !ok {
return false
}
if c.CastAs != o.CastAs {
return false
}
if c.Expr != nil {
return Equal(c.Expr, o.Expr)
}
return o.Expr != nil
}
func (c CastFunc) String() string {
return fmt.Sprintf("CAST(%v AS %v)", c.Expr, c.CastAs)
}
// CountFunc is the COUNT aggregator function. It aggregates documents
type CountFunc struct {
Expr Expr
Alias string
Wildcard bool
}
func (c *CountFunc) Eval(ctx EvalStack) (document.Value, error) {
if ctx.Document == nil {
return document.Value{}, errors.New("misuse of aggregation function COUNT()")
}
return ctx.Document.GetByField(c.String())
}
func (c *CountFunc) SetAlias(alias string) {
c.Alias = alias
}
func (c *CountFunc) Aggregator(group document.Value) document.Aggregator {
return &CountAggregator{
Fn: c,
}
}
// IsEqual compares this expression with the other expression and returns
// true if they are equal.
func (c *CountFunc) IsEqual(other Expr) bool {
if other == nil {
return false
}
o, ok := other.(*CountFunc)
if !ok {
return false
}
if c.Wildcard && o.Wildcard {
return c.Expr == nil && o.Expr == nil
}
return Equal(c.Expr, o.Expr)
}
func (c *CountFunc) String() string {
if c.Alias != "" {
return c.Alias
}
if c.Wildcard {
return "COUNT(*)"
}
return fmt.Sprintf("COUNT(%v)", c.Expr)
}
// CountAggregator is an aggregator that counts non-null expressions.
type CountAggregator struct {
Fn *CountFunc
Count int64
}
// Add increments the counter if the count expression evaluates to a non-null value.
func (c *CountAggregator) Add(d document.Document) error {
if c.Fn.Wildcard {
c.Count++
return nil
}
v, err := c.Fn.Expr.Eval(EvalStack{
Document: d,
})
if err != nil && err != document.ErrFieldNotFound {
return err
}
if v != nullLitteral {
c.Count++
}
return nil
}
// Aggregate adds a field to the given buffer with the value of the counter.
func (c *CountAggregator) Aggregate(fb *document.FieldBuffer) error {
fb.Add(c.Fn.String(), document.NewIntegerValue(c.Count))
return nil
}
// MinFunc is the MIN aggregator function.
type MinFunc struct {
Expr Expr
Alias string
}
// Eval extracts the min value from the given document and returns it.
func (m *MinFunc) Eval(ctx EvalStack) (document.Value, error) {
if ctx.Document == nil {
return document.Value{}, errors.New("misuse of aggregation function MIN()")
}
return ctx.Document.GetByField(m.String())
}
// SetAlias implements the planner.AggregatorBuilder interface.
func (m *MinFunc) SetAlias(alias string) {
m.Alias = alias
}
// Aggregator implements the planner.AggregatorBuilder interface.
func (m *MinFunc) Aggregator(group document.Value) document.Aggregator {
return &MinAggregator{
Fn: m,
}
}
// IsEqual compares this expression with the other expression and returns
// true if they are equal.
func (m *MinFunc) IsEqual(other Expr) bool {
if other == nil {
return false
}
o, ok := other.(*MinFunc)
if !ok {
return false
}
return Equal(m.Expr, o.Expr)
}
// String returns the alias if non-zero, otherwise it returns a string representation
// of the count expression.
func (m *MinFunc) String() string {
if m.Alias != "" {
return m.Alias
}
return fmt.Sprintf("MIN(%v)", m.Expr)
}
// MinAggregator is an aggregator that returns the minimum non-null value.
type MinAggregator struct {
Fn *MinFunc
Min document.Value
}
// Add stores the minimum value. Values are compared based on their types,
// then if the type is equal their value is compared. Numbers are considered of the same type.
func (m *MinAggregator) Add(d document.Document) error {
v, err := m.Fn.Expr.Eval(EvalStack{
Document: d,
})
if err != nil && err != document.ErrFieldNotFound {
return err
}
if v == nullLitteral {
return nil
}
if m.Min.Type == 0 {
m.Min = v
return nil
}
if m.Min.Type == v.Type || m.Min.Type.IsNumber() && m.Min.Type.IsNumber() {
ok, err := m.Min.IsGreaterThan(v)
if err != nil {
return err
}
if ok {
m.Min = v
}
return nil
}
if m.Min.Type > v.Type {
m.Min = v
}
return nil
}
// Aggregate adds a field to the given buffer with the minimum value.
func (m *MinAggregator) Aggregate(fb *document.FieldBuffer) error {
if m.Min.Type == 0 {
fb.Add(m.Fn.String(), document.NewNullValue())
} else {
fb.Add(m.Fn.String(), m.Min)
}
return nil
}
// MaxFunc is the MAX aggregator function.
type MaxFunc struct {
Expr Expr
Alias string
}
// Eval extracts the max value from the given document and returns it.
func (m *MaxFunc) Eval(ctx EvalStack) (document.Value, error) {
if ctx.Document == nil {
return document.Value{}, errors.New("misuse of aggregation function MAX()")
}
return ctx.Document.GetByField(m.String())
}
// SetAlias implements the planner.AggregatorBuilder interface.
func (m *MaxFunc) SetAlias(alias string) {
m.Alias = alias
}
// Aggregator implements the planner.AggregatorBuilder interface.
func (m *MaxFunc) Aggregator(group document.Value) document.Aggregator {
return &MaxAggregator{
Fn: m,
}
}
// IsEqual compares this expression with the other expression and returns
// true if they are equal.
func (m *MaxFunc) IsEqual(other Expr) bool {
if other == nil {
return false
}
o, ok := other.(*MaxFunc)
if !ok {
return false
}
return Equal(m.Expr, o.Expr)
}
// String returns the alias if non-zero, otherwise it returns a string representation
// of the count expression.
func (m *MaxFunc) String() string {
if m.Alias != "" {
return m.Alias
}
return fmt.Sprintf("MAX(%v)", m.Expr)
}
// MaxAggregator is an aggregator that returns the minimum non-null value.
type MaxAggregator struct {
Fn *MaxFunc
Max document.Value
}
// Add stores the maximum value. Values are compared based on their types,
// then if the type is equal their value is compared. Numbers are considered of the same type.
func (m *MaxAggregator) Add(d document.Document) error {
v, err := m.Fn.Expr.Eval(EvalStack{
Document: d,
})
if err != nil && err != document.ErrFieldNotFound {
return err
}
if v == nullLitteral {
return nil
}
if m.Max.Type == 0 {
m.Max = v
return nil
}
if m.Max.Type == v.Type || m.Max.Type.IsNumber() && m.Max.Type.IsNumber() {
ok, err := m.Max.IsLesserThan(v)
if err != nil {
return err
}
if ok {
m.Max = v
}
return nil
}
if m.Max.Type < v.Type {
m.Max = v
}
return nil
}
// Aggregate adds a field to the given buffer with the maximum value.
func (m *MaxAggregator) Aggregate(fb *document.FieldBuffer) error {
if m.Max.Type == 0 {
fb.Add(m.Fn.String(), document.NewNullValue())
} else {
fb.Add(m.Fn.String(), m.Max)
}
return nil
}
// SumFunc is the SUM aggregator function.
type SumFunc struct {
Expr Expr
Alias string
}
// Eval extracts the sum value from the given document and returns it.
func (s *SumFunc) Eval(ctx EvalStack) (document.Value, error) {
if ctx.Document == nil {
return document.Value{}, errors.New("misuse of aggregation function SUM()")
}
return ctx.Document.GetByField(s.String())
}
// SetAlias implements the planner.AggregatorBuilder interface.
func (s *SumFunc) SetAlias(alias string) {
s.Alias = alias
}
// Aggregator implements the planner.AggregatorBuilder interface.
func (s *SumFunc) Aggregator(group document.Value) document.Aggregator {
return &SumAggregator{
Fn: s,
}
}
// IsEqual compares this expression with the other expression and returns
// true if they are equal.
func (s *SumFunc) IsEqual(other Expr) bool {
if other == nil {
return false
}
o, ok := other.(*SumFunc)
if !ok {
return false
}
return Equal(s.Expr, o.Expr)
}
// String returns the alias if non-zero, otherwise it returns a string representation
// of the count expression.
func (s *SumFunc) String() string {
if s.Alias != "" {
return s.Alias
}
return fmt.Sprintf("SUM(%v)", s.Expr)
}
// SumAggregator is an aggregator that returns the minimum non-null value.
type SumAggregator struct {
Fn *SumFunc
SumI *int64
SumF *float64
}
// Add stores the sum of all non-NULL numeric values in the group.
// The result is an integer value if all summed values are integers.
// If any of the value is a double, the returned result will be a double.
func (s *SumAggregator) Add(d document.Document) error {
v, err := s.Fn.Expr.Eval(EvalStack{
Document: d,
})
if err != nil && err != document.ErrFieldNotFound {
return err
}
if v.Type != document.IntegerValue && v.Type != document.DoubleValue {
return nil
}
if s.SumF != nil {
if v.Type == document.IntegerValue {
*s.SumF += float64(v.V.(int64))
} else {
*s.SumF += float64(v.V.(float64))
}
return nil
}
if v.Type == document.DoubleValue {
var sumF float64
if s.SumI != nil {
sumF = float64(*s.SumI)
}
s.SumF = &sumF
*s.SumF += float64(v.V.(float64))
return nil
}
if s.SumI == nil {
var sumI int64
s.SumI = &sumI
}
*s.SumI += v.V.(int64)
return nil
}
// Aggregate adds a field to the given buffer with the maximum value.
func (s *SumAggregator) Aggregate(fb *document.FieldBuffer) error {
if s.SumF != nil {
fb.Add(s.Fn.String(), document.NewDoubleValue(*s.SumF))
} else if s.SumI != nil {
fb.Add(s.Fn.String(), document.NewIntegerValue(*s.SumI))
} else {
fb.Add(s.Fn.String(), document.NewNullValue())
}
return nil
}
// AvgFunc is the AVG aggregator function.
type AvgFunc struct {
Expr Expr
Alias string
}
// Eval extracts the average value from the given document and returns it.
func (s *AvgFunc) Eval(ctx EvalStack) (document.Value, error) {
if ctx.Document == nil {
return document.Value{}, errors.New("misuse of aggregation function AVG()")
}
return ctx.Document.GetByField(s.String())
}
// SetAlias implements the planner.AggregatorBuilder interface.
func (s *AvgFunc) SetAlias(alias string) {
s.Alias = alias
}
// Aggregator implements the planner.AggregatorBuilder interface.
func (s *AvgFunc) Aggregator(group document.Value) document.Aggregator {
return &AvgAggregator{
Fn: s,
}
}
// IsEqual compares this expression with the other expression and returns
// true if they are equal.
func (s *AvgFunc) IsEqual(other Expr) bool {
if other == nil {
return false
}
o, ok := other.(*AvgFunc)
if !ok {
return false
}
return Equal(s.Expr, o.Expr)
}
// String returns the alias if non-zero, otherwise it returns a string representation
// of the average expression.
func (s *AvgFunc) String() string {
if s.Alias != "" {
return s.Alias
}
return fmt.Sprintf("AVG(%v)", s.Expr)
}
// AvgAggregator is an aggregator that returns the average non-null value.
type AvgAggregator struct {
Fn *AvgFunc
Avg float64
Counter int64
}
// Add stores the average value of all non-NULL numeric values in the group.
func (s *AvgAggregator) Add(d document.Document) error {
v, err := s.Fn.Expr.Eval(EvalStack{
Document: d,
})
if err != nil && err != document.ErrFieldNotFound {
return err
}
switch v.Type {
case document.IntegerValue:
s.Avg += float64(v.V.(int64))
case document.DoubleValue:
s.Avg += v.V.(float64)
default:
return nil
}
s.Counter++
return nil
}
// Aggregate adds a field to the given buffer with the maximum value.
func (s *AvgAggregator) Aggregate(fb *document.FieldBuffer) error {
if s.Counter == 0 {
fb.Add(s.Fn.String(), document.NewDoubleValue(0))
} else {
fb.Add(s.Fn.String(), document.NewDoubleValue(s.Avg/float64(s.Counter)))
}
return nil
} | sql/query/expr/function.go | 0.744656 | 0.407628 | function.go | starcoder |
package otto
import (
"math"
"math/rand"
)
// Math
func builtinMath_abs(call FunctionCall) Value {
number := call.Argument(0).float64()
return toValue_float64(math.Abs(number))
}
func builtinMath_acos(call FunctionCall) Value {
number := call.Argument(0).float64()
return toValue_float64(math.Acos(number))
}
func builtinMath_asin(call FunctionCall) Value {
number := call.Argument(0).float64()
return toValue_float64(math.Asin(number))
}
func builtinMath_atan(call FunctionCall) Value {
number := call.Argument(0).float64()
return toValue_float64(math.Atan(number))
}
func builtinMath_atan2(call FunctionCall) Value {
y := call.Argument(0).float64()
if math.IsNaN(y) {
return NaNValue()
}
x := call.Argument(1).float64()
if math.IsNaN(x) {
return NaNValue()
}
return toValue_float64(math.Atan2(y, x))
}
func builtinMath_cos(call FunctionCall) Value {
number := call.Argument(0).float64()
return toValue_float64(math.Cos(number))
}
func builtinMath_ceil(call FunctionCall) Value {
number := call.Argument(0).float64()
return toValue_float64(math.Ceil(number))
}
func builtinMath_exp(call FunctionCall) Value {
number := call.Argument(0).float64()
return toValue_float64(math.Exp(number))
}
func builtinMath_floor(call FunctionCall) Value {
number := call.Argument(0).float64()
return toValue_float64(math.Floor(number))
}
func builtinMath_log(call FunctionCall) Value {
number := call.Argument(0).float64()
return toValue_float64(math.Log(number))
}
func builtinMath_max(call FunctionCall) Value {
switch len(call.ArgumentList) {
case 0:
return negativeInfinityValue()
case 1:
return toValue_float64(call.ArgumentList[0].float64())
}
result := call.ArgumentList[0].float64()
if math.IsNaN(result) {
return NaNValue()
}
for _, value := range call.ArgumentList[1:] {
value := value.float64()
if math.IsNaN(value) {
return NaNValue()
}
result = math.Max(result, value)
}
return toValue_float64(result)
}
func builtinMath_min(call FunctionCall) Value {
switch len(call.ArgumentList) {
case 0:
return positiveInfinityValue()
case 1:
return toValue_float64(call.ArgumentList[0].float64())
}
result := call.ArgumentList[0].float64()
if math.IsNaN(result) {
return NaNValue()
}
for _, value := range call.ArgumentList[1:] {
value := value.float64()
if math.IsNaN(value) {
return NaNValue()
}
result = math.Min(result, value)
}
return toValue_float64(result)
}
func builtinMath_pow(call FunctionCall) Value {
// TODO Make sure this works according to the specification (172.16.17.32)
x := call.Argument(0).float64()
y := call.Argument(1).float64()
if math.Abs(x) == 1 && math.IsInf(y, 0) {
return NaNValue()
}
return toValue_float64(math.Pow(x, y))
}
func builtinMath_random(call FunctionCall) Value {
var v float64
if call.runtime.random != nil {
v = call.runtime.random()
} else {
v = rand.Float64()
}
return toValue_float64(v)
}
func builtinMath_round(call FunctionCall) Value {
number := call.Argument(0).float64()
value := math.Floor(number + 0.5)
if value == 0 {
value = math.Copysign(0, number)
}
return toValue_float64(value)
}
func builtinMath_sin(call FunctionCall) Value {
number := call.Argument(0).float64()
return toValue_float64(math.Sin(number))
}
func builtinMath_sqrt(call FunctionCall) Value {
number := call.Argument(0).float64()
return toValue_float64(math.Sqrt(number))
}
func builtinMath_tan(call FunctionCall) Value {
number := call.Argument(0).float64()
return toValue_float64(math.Tan(number))
} | vendor/github.com/robertkrimen/otto/builtin_math.go | 0.601359 | 0.579103 | builtin_math.go | starcoder |
package iso20022
// Provides the elements related to the interest amount calculation.
type InterestAmount2 struct {
// Amount of money representing an interest payment.
AccruedInterestAmount *ActiveCurrencyAndAmount `xml:"AcrdIntrstAmt"`
// Agreed date for the interest payment.
ValueDate *DateAndDateTimeChoice `xml:"ValDt"`
// Indicates whether the interest will be settled in cash or rolled in the existing collateral balance.
InterestMethod *InterestMethod1Code `xml:"IntrstMtd"`
// Period for which the calculation has been performed.
InterestPeriod *DatePeriodDetails `xml:"IntrstPrd"`
// Percentage charged for the use of an amount of money, usually expressed at an annual rate. The interest rate is the ratio of the amount of interest paid during a certain period of time compared to the principal amount of the interest bearing financial instrument.
InterestRate *InterestRate1Choice `xml:"IntrstRate,omitempty"`
// Specifies the computation method of (accrued) interest of the security.
DayCountBasis *InterestComputationMethod2Code `xml:"DayCntBsis,omitempty"`
// Amount or percentage of a cash distribution that will be withheld by a tax authority.
AppliedWithholdingTax *YesNoIndicator `xml:"ApldWhldgTax,omitempty"`
// Specifies whether the interest is simple or compounded.
CalculationMethod *CalculationMethod1Code `xml:"ClctnMtd,omitempty"`
// Specifies the periodicity of the calculation of the interest.
CalculationFrequency *Frequency1Code `xml:"ClctnFrqcy,omitempty"`
// Specifies whether the collateral has been posted against the variation margin, the segregated independent amount or to cover any other risk defined with a proprietary code.
CollateralPurpose *CollateralPurpose1Choice `xml:"CollPurp"`
// Provides details about the opening collateral balance.
OpeningCollateralBalance *CollateralBalance1 `xml:"OpngCollBal,omitempty"`
// Provides details about the closing collateral balance.
ClosingCollateralBalance *CollateralBalance1 `xml:"ClsgCollBal"`
// Identifies the standard settlement instructions.
StandardSettlementInstructions *Max140Text `xml:"StdSttlmInstrs,omitempty"`
// Additionnal information related to interest request.
AdditionalInformation *Max210Text `xml:"AddtlInf,omitempty"`
}
func (i *InterestAmount2) SetAccruedInterestAmount(value, currency string) {
i.AccruedInterestAmount = NewActiveCurrencyAndAmount(value, currency)
}
func (i *InterestAmount2) AddValueDate() *DateAndDateTimeChoice {
i.ValueDate = new(DateAndDateTimeChoice)
return i.ValueDate
}
func (i *InterestAmount2) SetInterestMethod(value string) {
i.InterestMethod = (*InterestMethod1Code)(&value)
}
func (i *InterestAmount2) AddInterestPeriod() *DatePeriodDetails {
i.InterestPeriod = new(DatePeriodDetails)
return i.InterestPeriod
}
func (i *InterestAmount2) AddInterestRate() *InterestRate1Choice {
i.InterestRate = new(InterestRate1Choice)
return i.InterestRate
}
func (i *InterestAmount2) SetDayCountBasis(value string) {
i.DayCountBasis = (*InterestComputationMethod2Code)(&value)
}
func (i *InterestAmount2) SetAppliedWithholdingTax(value string) {
i.AppliedWithholdingTax = (*YesNoIndicator)(&value)
}
func (i *InterestAmount2) SetCalculationMethod(value string) {
i.CalculationMethod = (*CalculationMethod1Code)(&value)
}
func (i *InterestAmount2) SetCalculationFrequency(value string) {
i.CalculationFrequency = (*Frequency1Code)(&value)
}
func (i *InterestAmount2) AddCollateralPurpose() *CollateralPurpose1Choice {
i.CollateralPurpose = new(CollateralPurpose1Choice)
return i.CollateralPurpose
}
func (i *InterestAmount2) AddOpeningCollateralBalance() *CollateralBalance1 {
i.OpeningCollateralBalance = new(CollateralBalance1)
return i.OpeningCollateralBalance
}
func (i *InterestAmount2) AddClosingCollateralBalance() *CollateralBalance1 {
i.ClosingCollateralBalance = new(CollateralBalance1)
return i.ClosingCollateralBalance
}
func (i *InterestAmount2) SetStandardSettlementInstructions(value string) {
i.StandardSettlementInstructions = (*Max140Text)(&value)
}
func (i *InterestAmount2) SetAdditionalInformation(value string) {
i.AdditionalInformation = (*Max210Text)(&value)
} | InterestAmount2.go | 0.866472 | 0.614481 | InterestAmount2.go | starcoder |
package values
import (
"regexp"
"sort"
"github.com/influxdata/flux/codes"
"github.com/influxdata/flux/internal/errors"
"github.com/influxdata/flux/semantic"
)
// Array represents an sequence of elements
// All elements must be the same type
type Array interface {
Value
Get(i int) Value
Set(i int, v Value)
Append(v Value)
Len() int
Range(func(i int, v Value))
Sort(func(i, j Value) bool)
}
type array struct {
t semantic.MonoType
elements []Value
}
func NewArray(arrType semantic.MonoType) Array {
return NewArrayWithBacking(arrType, nil)
}
func NewArrayWithBacking(arrType semantic.MonoType, elements []Value) Array {
if arrType.Nature() != semantic.Array {
panic(UnexpectedKind(arrType.Nature(), semantic.Array))
}
return &array{
t: arrType,
elements: elements,
}
}
func (a *array) IsNull() bool {
return false
}
func (a *array) Type() semantic.MonoType {
return a.t
}
func (a *array) Get(i int) Value {
if i >= len(a.elements) {
panic(errors.Newf(codes.Internal, "index out of bounds: i:%d len:%d", i, len(a.elements)))
}
return a.elements[i]
}
func (a *array) Set(i int, v Value) {
if i >= len(a.elements) {
panic(errors.Newf(codes.Internal, "index out of bounds: i:%d len:%d", i, len(a.elements)))
}
a.elements[i] = v
}
func (a *array) Append(v Value) {
a.elements = append(a.elements, v)
}
func (a *array) Range(f func(i int, v Value)) {
for i, v := range a.elements {
f(i, v)
}
}
func (a *array) Len() int {
return len(a.elements)
}
func (a *array) Sort(f func(i, j Value) bool) {
sort.Slice(a.elements, func(i, j int) bool {
return f(a.elements[i], a.elements[j])
})
}
func (a *array) Str() string {
panic(UnexpectedKind(semantic.Array, semantic.String))
}
func (o *array) Bytes() []byte {
panic(UnexpectedKind(semantic.Array, semantic.Bytes))
}
func (a *array) Int() int64 {
panic(UnexpectedKind(semantic.Array, semantic.Int))
}
func (a *array) UInt() uint64 {
panic(UnexpectedKind(semantic.Array, semantic.UInt))
}
func (a *array) Float() float64 {
panic(UnexpectedKind(semantic.Array, semantic.Float))
}
func (a *array) Bool() bool {
panic(UnexpectedKind(semantic.Array, semantic.Bool))
}
func (a *array) Time() Time {
panic(UnexpectedKind(semantic.Array, semantic.Time))
}
func (a *array) Duration() Duration {
panic(UnexpectedKind(semantic.Array, semantic.Duration))
}
func (a *array) Regexp() *regexp.Regexp {
panic(UnexpectedKind(semantic.Array, semantic.Regexp))
}
func (a *array) Array() Array {
return a
}
func (a *array) Object() Object {
panic(UnexpectedKind(semantic.Array, semantic.Object))
}
func (a *array) Function() Function {
panic(UnexpectedKind(semantic.Array, semantic.Function))
}
func (a *array) Dict() Dictionary {
panic(UnexpectedKind(semantic.Array, semantic.Dictionary))
}
func (a *array) Equal(rhs Value) bool {
if !a.Type().Equal(rhs.Type()) {
return false
}
r := rhs.Array()
// XXX: remove when array/stream are different types <https://github.com/influxdata/flux/issues/4343>
if _, ok := r.(TableObject); ok {
// When RHS is a table stream instead of array, mark it false.
// This short-circuits the invalid `Len()` call below that would
// otherwise panic.
return false
}
if a.Len() != r.Len() {
return false
}
length := a.Len()
for i := 0; i < length; i++ {
aVal := a.Get(i)
rVal := r.Get(i)
if !aVal.Equal(rVal) {
return false
}
}
return true
} | values/array.go | 0.675872 | 0.503906 | array.go | starcoder |
package operators
import (
"github.com/galaxia-team/void/void/src/exception"
"github.com/galaxia-team/void/void/src/types"
"math"
"fmt"
)
var (
StringOps = map[string]func(string, string) string {
"+": AddS,
}
IntOps = map[string]func(int, int) int {
"+": AddI,
"-": SubI,
"*": MulI,
"**": PowI,
"/": DivI,
"//": FloorDivI,
"%": ModI,
}
FloatOps = map[string]func(float64, float64) float64 {
"+": AddF,
"-": SubF,
"*": MulF,
"**": PowF,
"/": DivF,
"//": FloorDivF,
}
BoolOps = map[string]func(bool, bool) bool {
"&&": And,
"||": Or,
}
)
func ApplyOperator(op string, x interface{}, y interface{}, n int) interface{} {
xt := types.GetType(x)
yt := types.GetType(y)
var xok bool
var yok bool
var sx string
var ix int
var fx float64
var bx bool
var sy string
var iy int
var fy float64
var by bool
switch (xt) {
case "string":
sx, xok = x.(string)
case "int":
ix, xok = x.(int)
case "float":
fx, xok = x.(float64)
case "bool":
bx, xok = x.(bool)
}
switch (yt) {
case "string":
sy, yok = y.(string)
case "int":
iy, yok = y.(int)
case "float":
fy, yok = y.(float64)
case "bool":
by, yok = y.(bool)
}
if !xok || !yok {
exception.Except("invalid_type", n)
}
if _, ok := BoolOps[op]; !ok {
if xt == "string" && yt == "string" {
if _, ok := StringOps[op]; ok {
StringOps[op](sx, sy)
} else {
exception.Except("invalid_op", n)
}
} else if yt == "int" && xt == "int" {
if _, ok := IntOps[op]; ok {
IntOps[op](ix, iy)
} else {
exception.Except("invalid_op", n)
}
} else if xt == "int" && yt == "float" {
fx = float64(ix)
if _, ok := FloatOps[op]; ok {
return FloatOps[op](fx, fy)
} else {
exception.Except("invalid_op", n)
}
} else if xt == "float" && yt == "int" {
fy = float64(iy)
if _, ok := FloatOps[op]; ok {
return FloatOps[op](fx, fy)
} else {
exception.Except("invalid_op", n)
}
} else if xt == "float" && yt == "float" {
if _, ok := FloatOps[op]; ok {
return FloatOps[op](fx, fy)
} else {
exception.Except("invalid_op", n)
}
} else {
exception.Except("invalid_type", n)
}
} else {
if sx != "" {
bx = types.GetBool(sx, n)
} else if ix != 0 {
bx = types.GetBool(fmt.Sprintf("%d", ix), n)
} else if fx != 0 {
bx = types.GetBool(fmt.Sprintf("%f", fx), n)
} else {
by = false
}
if sy != "" {
by = types.GetBool(sy, n)
} else if iy != 0 {
by = types.GetBool(fmt.Sprintf("%d", iy), n)
} else if fy != 0 {
by = types.GetBool(fmt.Sprintf("%f", fy), n)
} else {
by = false
}
return BoolOps[op](bx, by)
}
return ""
}
func AddI(x int, y int) int {
return x + y
}
func AddF(x float64, y float64) float64 {
return x + y
}
func AddS(x string, y string) string {
return x + y
}
func SubI(x int, y int) int {
return x - y
}
func SubF(x float64, y float64) float64 {
return x - y
}
func MulI(x int, y int) int {
return x * y
}
func MulF(x float64, y float64) float64 {
return x * y
}
func PowI(x int, y int) int {
return int(math.Pow(float64(x), float64(y)))
}
func PowF(x float64, y float64) float64 {
return math.Pow(x, y)
}
func DivI(x int, y int) int {
return x / y
}
func DivF(x float64, y float64) float64 {
return x / y
}
func FloorDivI(x int, y int) int {
return int(math.Floor(float64(x) / float64(y)))
}
func FloorDivF(x float64, y float64) float64 {
return math.Floor(x / y)
}
func ModI(x int, y int) int {
return x % y
}
func And(x bool, y bool) bool {
return x && y
}
func Or(x bool, y bool) bool {
return x || y
} | void/src/operators/operators.go | 0.546012 | 0.457621 | operators.go | starcoder |
// Package costs gets billing information from an ElasticSearch.
package es
import (
"time"
"github.com/olivere/elastic"
)
const maxAggregationSize = 0x7FFFFFFF
// getDateForDailyReport returns the end and the begining of the date of the report based on a date
func getDateForDailyReport(date time.Time) (begin, end time.Time) {
now := time.Now().UTC()
if date.Year() == now.Year() && date.Month() == now.Month() {
end = now
begin = time.Date(end.Year(), end.Month(), 1, 0, 0, 0, 0, end.Location()).UTC()
return
} else {
begin = date
end = time.Date(date.Year(), date.Month()+1, 0, 23, 59, 59, 999999999, date.Location()).UTC()
return
}
}
// createQueryAccountFilterEs creates and return a new *elastic.TermsQuery on the accountList array
func createQueryAccountFilterEs(accountList []string) *elastic.TermsQuery {
accountListFormatted := make([]interface{}, len(accountList))
for i, v := range accountList {
accountListFormatted[i] = v
}
return elastic.NewTermsQuery("account", accountListFormatted...)
}
// getElasticSearchEsDailyParams is used to construct an ElasticSearch *elastic.SearchService used to perform a request on ES
// It takes as paramters :
// - params EsQueryParams : contains the list of accounts and the date
// - client *elastic.Client : an instance of *elastic.Client that represent an Elastic Search client.
// It needs to be fully configured and ready to execute a client.Search()
// - index string : The Elastic Search index on wich to execute the query. In this context the default value
// should be "es-reports"
// This function excepts arguments passed to it to be sanitize. If they are not, the following cases will make
// it crash :
// - If the client is nil or malconfigured, it will crash
// - If the index is not an index present in the ES, it will crash
func getElasticSearchEsDailyParams(params EsQueryParams, client *elastic.Client, index string) *elastic.SearchService {
query := elastic.NewBoolQuery()
if len(params.AccountList) > 0 {
query = query.Filter(createQueryAccountFilterEs(params.AccountList))
}
query = query.Filter(elastic.NewTermQuery("reportType", "daily"))
dateStart, dateEnd := getDateForDailyReport(params.Date)
query = query.Filter(elastic.NewRangeQuery("reportDate").
From(dateStart).To(dateEnd))
search := client.Search().Index(index).Size(0).Query(query)
search.Aggregation("accounts", elastic.NewTermsAggregation().Field("account").
SubAggregation("dates", elastic.NewTermsAggregation().Field("reportDate").
SubAggregation("domains", elastic.NewTopHitsAggregation().Sort("reportDate", false).Size(maxAggregationSize))))
return search
}
// getElasticSearchEsMonthlyParams is used to construct an ElasticSearch *elastic.SearchService used to perform a request on ES
// It takes as parameters :
// - params EsQueryParams : contains the list of accounts and the date
// - client *elastic.Client : an instance of *elastic.Client that represent an Elastic Search client.
// It needs to be fully configured and ready to execute a client.Search()
// - index string : The Elastic Search index on which to execute the query. In this context the default value
// should be "es-reports"
// This function excepts arguments passed to it to be sanitize. If they are not, the following cases will make
// it crash :
// - If the client is nil or malconfigured, it will crash
// - If the index is not an index present in the ES, it will crash
func getElasticSearchEsMonthlyParams(params EsQueryParams, client *elastic.Client, index string) *elastic.SearchService {
query := elastic.NewBoolQuery()
if len(params.AccountList) > 0 {
query = query.Filter(createQueryAccountFilterEs(params.AccountList))
}
query = query.Filter(elastic.NewTermQuery("reportType", "monthly"))
query = query.Filter(elastic.NewTermQuery("reportDate", params.Date))
search := client.Search().Index(index).Size(0).Query(query)
search.Aggregation("accounts", elastic.NewTermsAggregation().Field("account").
SubAggregation("domains", elastic.NewTopHitsAggregation().Sort("reportDate", false).Size(maxAggregationSize)))
return search
}
// createQueryAccountFilterBill creates and return a new *elastic.TermsQuery on the accountList array
func createQueryAccountFilterBill(accountList []string) *elastic.TermsQuery {
accountListFormatted := make([]interface{}, len(accountList))
for i, v := range accountList {
accountListFormatted[i] = v
}
return elastic.NewTermsQuery("usageAccountId", accountListFormatted...)
}
// getElasticSearchCostParams is used to construct an ElasticSearch *elastic.SearchService used to perform a request on ES
// It takes as paramters :
// - params rdsQueryParams : contains the list of accounts and the date
// - client *elastic.Client : an instance of *elastic.Client that represent an Elastic Search client.
// It needs to be fully configured and ready to execute a client.Search()
// - index string : The Elastic Search index on wich to execute the query. In this context the default value
// should be "es-reports"
// This function excepts arguments passed to it to be sanitize. If they are not, the following cases will make
// it crash :
// - If the client is nil or malconfigured, it will crash
// - If the index is not an index present in the ES, it will crash
func getElasticSearchCostParams(params EsQueryParams, client *elastic.Client, index string) *elastic.SearchService {
query := elastic.NewBoolQuery()
if len(params.AccountList) > 0 {
query = query.Filter(createQueryAccountFilterBill(params.AccountList))
}
query = query.Filter(elastic.NewTermQuery("productCode", "AmazonES"))
dateStart, dateEnd := getDateForDailyReport(params.Date)
query = query.Filter(elastic.NewRangeQuery("usageStartDate").
From(dateStart).To(dateEnd))
search := client.Search().Index(index).Size(0).Query(query)
search.Aggregation("accounts", elastic.NewTermsAggregation().Field("usageAccountId").Size(maxAggregationSize).
SubAggregation("domains", elastic.NewTermsAggregation().Field("resourceId").Size(maxAggregationSize).
SubAggregation("cost", elastic.NewSumAggregation().Field("unblendedCost"))))
return search
} | usageReports/es/es_request_constructor.go | 0.618665 | 0.447762 | es_request_constructor.go | starcoder |
package engine
import (
"math"
"math/rand"
)
func init() {
DeclFunc("ext_makegrains", Voronoi, "Voronoi tesselation (grain size, num regions)")
}
func Voronoi(grainsize float64, numRegions, seed int) {
Refer("Lel2014")
SetBusy(true)
defer SetBusy(false)
t := newTesselation(grainsize, numRegions, int64(seed))
regions.hist = append(regions.hist, t.RegionOf)
regions.render(t.RegionOf)
}
type tesselation struct {
grainsize float64
tilesize float64
maxRegion int
cache map[int2][]center
seed int64
rnd *rand.Rand
}
// integer tile coordinate
type int2 struct{ x, y int }
// Voronoi center info
type center struct {
x, y float64 // center position (m)
region byte // region for all cells near center
}
// nRegion exclusive
func newTesselation(grainsize float64, nRegion int, seed int64) *tesselation {
return &tesselation{grainsize,
float64(float32(grainsize * TILE)), // expect 4 grains/block, 36 per 3x3 blocks = safe, relatively round number
nRegion,
make(map[int2][]center),
seed,
rand.New(rand.NewSource(0))}
}
const (
TILE = 2 // tile size in grains
LAMBDA = TILE * TILE // expected grains per tile
)
// Returns the region of the grain where cell at x,y,z belongs to
func (t *tesselation) RegionOf(x, y, z float64) int {
tile := t.tileOf(x, y) // tile containing x,y
// look for nearest center in tile + neighbors
nearest := center{x, y, 0} // dummy initial value, but safe should the infinite impossibility strike.
mindist := math.Inf(1)
for tx := tile.x - 1; tx <= tile.x+1; tx++ {
for ty := tile.y - 1; ty <= tile.y+1; ty++ {
centers := t.centersInTile(tx, ty)
for _, c := range centers {
dist := sqr(x-c.x) + sqr(y-c.y)
if dist < mindist {
nearest = c
mindist = dist
}
}
}
}
//fmt.Println("nearest", x, y, ":", nearest)
return int(nearest.region)
}
// Returns the list of Voronoi centers in tile(ix, iy), using only ix,iy to seed the random generator
func (t *tesselation) centersInTile(tx, ty int) []center {
pos := int2{tx, ty}
if c, ok := t.cache[pos]; ok {
return c
} else {
// tile-specific seed that works for positive and negative tx, ty
seed := (int64(ty)+(1<<24))*(1<<24) + (int64(tx) + (1 << 24))
t.rnd.Seed(seed ^ t.seed)
N := t.poisson(LAMBDA)
c := make([]center, N)
// absolute position of tile (m)
x0, y0 := float64(tx)*t.tilesize, float64(ty)*t.tilesize
for i := range c {
// random position inside tile
c[i].x = x0 + t.rnd.Float64()*t.tilesize
c[i].y = y0 + t.rnd.Float64()*t.tilesize
c[i].region = byte(t.rnd.Intn(t.maxRegion))
}
t.cache[pos] = c
return c
}
}
func sqr(x float64) float64 { return x * x }
func (t *tesselation) tileOf(x, y float64) int2 {
ix := int(math.Floor(x / t.tilesize))
iy := int(math.Floor(y / t.tilesize))
return int2{ix, iy}
}
// Generate poisson distributed numbers (according to Knuth)
func (t *tesselation) poisson(lambda float64) int {
L := math.Exp(-lambda)
k := 1
p := t.rnd.Float64()
for p > L {
k++
p *= t.rnd.Float64()
}
return k - 1
} | engine/ext_makegrains.go | 0.736211 | 0.576631 | ext_makegrains.go | starcoder |
package strmatcher
import (
"errors"
"regexp"
"strings"
)
// FullMatcher is an implementation of Matcher.
type FullMatcher string
func (FullMatcher) Type() Type {
return Full
}
func (m FullMatcher) Pattern() string {
return string(m)
}
func (m FullMatcher) String() string {
return "full:" + m.Pattern()
}
func (m FullMatcher) Match(s string) bool {
return string(m) == s
}
// DomainMatcher is an implementation of Matcher.
type DomainMatcher string
func (DomainMatcher) Type() Type {
return Domain
}
func (m DomainMatcher) Pattern() string {
return string(m)
}
func (m DomainMatcher) String() string {
return "domain:" + m.Pattern()
}
func (m DomainMatcher) Match(s string) bool {
pattern := m.Pattern()
if !strings.HasSuffix(s, pattern) {
return false
}
return len(s) == len(pattern) || s[len(s)-len(pattern)-1] == '.'
}
// SubstrMatcher is an implementation of Matcher.
type SubstrMatcher string
func (SubstrMatcher) Type() Type {
return Substr
}
func (m SubstrMatcher) Pattern() string {
return string(m)
}
func (m SubstrMatcher) String() string {
return "keyword:" + m.Pattern()
}
func (m SubstrMatcher) Match(s string) bool {
return strings.Contains(s, m.Pattern())
}
// RegexMatcher is an implementation of Matcher.
type RegexMatcher struct {
pattern *regexp.Regexp
}
func (*RegexMatcher) Type() Type {
return Regex
}
func (m *RegexMatcher) Pattern() string {
return m.pattern.String()
}
func (m *RegexMatcher) String() string {
return "regexp:" + m.Pattern()
}
func (m *RegexMatcher) Match(s string) bool {
return m.pattern.MatchString(s)
}
// New creates a new Matcher based on the given pattern.
func (t Type) New(pattern string) (Matcher, error) {
switch t {
case Full:
return FullMatcher(pattern), nil
case Substr:
return SubstrMatcher(pattern), nil
case Domain:
return DomainMatcher(pattern), nil
case Regex: // 1. regex matching is case-sensitive
regex, err := regexp.Compile(pattern)
if err != nil {
return nil, err
}
return &RegexMatcher{pattern: regex}, nil
default:
panic("Unknown type")
}
}
// MatcherGroupForAll is an interface indicating a MatcherGroup could accept all types of matchers.
type MatcherGroupForAll interface {
AddMatcher(matcher Matcher, value uint32)
}
// MatcherGroupForFull is an interface indicating a MatcherGroup could accept FullMatchers.
type MatcherGroupForFull interface {
AddFullMatcher(matcher FullMatcher, value uint32)
}
// MatcherGroupForDomain is an interface indicating a MatcherGroup could accept DomainMatchers.
type MatcherGroupForDomain interface {
AddDomainMatcher(matcher DomainMatcher, value uint32)
}
// MatcherGroupForSubstr is an interface indicating a MatcherGroup could accept SubstrMatchers.
type MatcherGroupForSubstr interface {
AddSubstrMatcher(matcher SubstrMatcher, value uint32)
}
// MatcherGroupForRegex is an interface indicating a MatcherGroup could accept RegexMatchers.
type MatcherGroupForRegex interface {
AddRegexMatcher(matcher *RegexMatcher, value uint32)
}
// AddMatcherToGroup is a helper function to try to add a Matcher to any kind of MatcherGroup.
// It returns error if the MatcherGroup does not accept the provided Matcher's type.
// This function is provided to help writing code to test a MatcherGroup.
func AddMatcherToGroup(g MatcherGroup, matcher Matcher, value uint32) error {
if g, ok := g.(MatcherGroupForAll); ok {
g.AddMatcher(matcher, value)
return nil
}
switch matcher := matcher.(type) {
case FullMatcher:
if g, ok := g.(MatcherGroupForFull); ok {
g.AddFullMatcher(matcher, value)
return nil
}
case DomainMatcher:
if g, ok := g.(MatcherGroupForDomain); ok {
g.AddDomainMatcher(matcher, value)
return nil
}
case SubstrMatcher:
if g, ok := g.(MatcherGroupForSubstr); ok {
g.AddSubstrMatcher(matcher, value)
return nil
}
case *RegexMatcher:
if g, ok := g.(MatcherGroupForRegex); ok {
g.AddRegexMatcher(matcher, value)
return nil
}
}
return errors.New("cannot add matcher to matcher group")
} | common/strmatcher/matchers.go | 0.822118 | 0.432723 | matchers.go | starcoder |
package topologyAlgorithm
import (
"github.com/astaxie/beego/orm"
"github.com/netsec-ethz/scion-coord/models"
)
// performance score thresholds
const (
BW1 = 0.05
BW2 = 0.1
BW3 = 0.5
RTT1 = 10
RTT2 = 50
RTT3 = 100
)
// number of neighbors chosen for each AS
const (
CHOSEN_NEIGHBORS uint16 = 3
)
// maximal number of neighbors chosen for each AS
const (
MAX_NEIGHBORS = 6
)
type Neighbor struct {
ISD int
AS int
IP string
BW float64
RTT float64
}
// Choose up to 3 of the best potential neighbors in the array
func ChooseNeighbors(potentialneighbors []Neighbor, freePorts uint16) []Neighbor {
var neighbors []Neighbor
var counter uint16 = 0
// compute number of new neighbors that will be chosen
newNeighbors := CHOSEN_NEIGHBORS
if freePorts < newNeighbors {
newNeighbors = freePorts
}
for counter < newNeighbors {
if len(potentialneighbors) == 0 {
break
}
counter++
bnb, index := chooseBestNeighbor(potentialneighbors)
neighbors = append(neighbors, bnb)
// remove the chosen neighbor from the list of potential neighbors
potentialneighbors = removeNeighbor(potentialneighbors, index)
}
return neighbors
}
// Choose the best Neighbor from a list of neighbors
// Best neighbor is the neighbor with lowest PF
// If PF are the same the neighbor with lower degree is chosen
func chooseBestNeighbor(potentialneighbors []Neighbor) (Neighbor, int) {
var bestNb = potentialneighbors[0]
var index = 0
for i, nb := range potentialneighbors {
if bestNb.getDegree() >= MAX_NEIGHBORS {
bestNb = nb
index = i
}
if nb.getDegree() >= MAX_NEIGHBORS {
continue
}
if bestNb.getPF() > nb.getPF() {
sbNb, err := models.FindSCIONBoxByIAint(nb.ISD, nb.AS)
if err != nil {
if err == orm.ErrNoRows {
bestNb = nb
index = i
}
} else {
var nbFreePorts = sbNb.OpenPorts - nb.getDegree()
if nbFreePorts > 0 {
bestNb = nb
index = i
}
}
}
if bestNb.getPF() == nb.getPF() {
// same PF score, AS with lower degree is chosen
sbNb, err := models.FindSCIONBoxByIAint(nb.ISD, nb.AS)
if err != nil {
if err == orm.ErrNoRows {
if bestNb.getDegree() > nb.getDegree() {
bestNb = nb
index = i
}
}
} else {
if bestNb.getDegree() > nb.getDegree() {
var nbFreePorts = sbNb.OpenPorts - nb.getDegree()
if nbFreePorts > 0 {
bestNb = nb
index = i
}
}
}
}
}
return bestNb, index
}
// Compute the Performance Class of a neighbors connection
// Four PF classes 1: best ,.. 4: worst
// Returns 5 if not classable (Error has occured)
func (nb Neighbor) getPF() int {
if nb.BW == -1 || nb.RTT == -1 {
return 5
}
var bw = nb.BW
var rtt = nb.RTT
if bw > BW3 {
return 4
}
if BW3 >= bw && bw > BW2 {
if rtt <= RTT3 {
return 3
} else {
return 4
}
}
if BW2 >= bw && bw > BW1 {
if rtt > RTT3 {
return 4
}
if RTT3 >= rtt && rtt > RTT2 {
return 3
}
if RTT2 >= rtt {
return 2
}
}
if BW1 >= bw {
if rtt > RTT3 {
return 4
}
if RTT3 >= rtt && rtt > RTT2 {
return 3
}
if RTT2 >= rtt && rtt > RTT1 {
return 2
}
if RTT1 >= rtt {
return 1
}
}
return 5
}
// Get the number of neighbors from the database
// If an error occurs return 9999
func (nb Neighbor) getDegree() uint16 {
dbEntry, err := models.FindSCIONLabASByIAInt(nb.ISD, nb.AS)
if err != nil {
return 9999
}
cns, err := dbEntry.GetConnectionInfo()
if err != nil {
return 9999
}
return uint16(len(cns))
}
// Remove element at index i from array
func removeNeighbor(neighbors []Neighbor, i int) []Neighbor {
neighbors[len(neighbors)-1], neighbors[i] = neighbors[i], neighbors[len(neighbors)-1]
return neighbors[:len(neighbors)-1]
} | utility/topologyAlgorithm/topology.go | 0.652906 | 0.447943 | topology.go | starcoder |
package day17
type dir int
const (
up dir = 0
right dir = 1
down dir = 2
left dir = 3
)
type coordinate struct {
x, y int
}
type robot struct {
x,y int
dir dir
dead bool
}
type scaffold struct {
cells [][]byte // row, col
robot *robot
}
func (s *scaffold) extent() (cols, rows int) {
rows = len(s.cells)
cols = 0
for _, row := range s.cells {
if len(row) > cols {
cols = len(row)
}
}
return
}
func newScaffold(r *robot) *scaffold {
cells := make([][]byte, 1)
cells[0] = make([]byte, 0)
return &scaffold{cells:cells, robot:r}
}
func (s *scaffold) square() {
cols, _ := s.extent()
for r, row := range s.cells {
if len(row) < cols {
for i := len(row); i < cols; i++ {
s.cells[r] = append(s.cells[r], '.')
}
}
}
}
func (s *scaffold) intersections() []coordinate {
intersections := make([]coordinate, 0)
cols, rows := s.extent()
for r := 1; r < rows-1; r++ {
for c := 1; c < cols-1; c++ {
if s.isIntersection(r, c) {
intersections = append(intersections, coordinate{c,r})
}
}
}
return intersections
}
func (s *scaffold) isIntersection(row, col int) bool {
if s.cells[row][col] != '#' {
return false
}
if s.cells[row-1][col] != '#' {
return false
}
if s.cells[row+1][col] != '#' {
return false
}
if s.cells[row][col-1] != '#' {
return false
}
if s.cells[row][col+1] != '#' {
return false
}
return true
}
type builder struct {
row, col int
scaffold *scaffold
}
func (b *builder) Output(o int) {
b.set(byte(o))
}
func (b *builder) Close() {
}
func (b *builder) set(c byte) {
switch c {
case '.', '#':
b.col++
b.scaffold.cells[b.row] = append(b.scaffold.cells[b.row], c)
case '\n':
b.row++
b.scaffold.cells = append(b.scaffold.cells, make([]byte, 0))
case '^':
b.col++
b.scaffold.cells[b.row] = append(b.scaffold.cells[b.row], '^')
b.scaffold.robot = &robot{x:b.col, y:b.row, dir:up}
case '>':
b.col++
b.scaffold.cells[b.row] = append(b.scaffold.cells[b.row], '>')
b.scaffold.robot = &robot{x:b.col, y:b.row, dir:right}
case 'v':
b.col++
b.scaffold.cells[b.row] = append(b.scaffold.cells[b.row], 'v')
b.scaffold.robot = &robot{x:b.col, y:b.row, dir:down}
case '<':
b.col++
b.scaffold.cells[b.row] = append(b.scaffold.cells[b.row], '<')
b.scaffold.robot = &robot{x:b.col, y:b.row, dir:left}
case 'X':
b.col++
b.scaffold.cells[b.row] = append(b.scaffold.cells[b.row], 'X')
b.scaffold.robot = &robot{x:b.col, y:b.row, dir:up, dead:true}
}
} | v19/internal/day17/map.go | 0.569972 | 0.40751 | map.go | starcoder |
Package twitscrape is a library for scraping tweets from the twitter archive.
The archive is publicly available and can be searched through at:
https://twitter.com/search-advanced?lang=en
No authentication is required and the package can be run without any
prior configurations.
You can start scraping by creating a new instance of the Scrape struct and
calling its Tweets method, by providing a search term, a start date and an end date.
scr := twitscrape.Scrape{}
start, _ := time.Parse("01/02/2006", "11/10/2009")
until, _ := time.Parse("01/02/2006", "11/11/2009")
// fetch tweets between start and until dates, which contain hashtag #golang
tweets, err := scr.Tweets("#golang", start, until)
Tweets returns a slice of Tweet, which is a struct with the following fields:
type Tweet struct {
// Link to tweet in the form https://www.twitter.com/user/status
Permalink string
// Screen name (twitter handle) of tweet author
Name string
// Timestamp of tweet in UTC
Timestamp time.Time
// Contents of tweet
Contents string
// Tweet ID
ID string
}
By default, the Tweets function will not log anything (such as a missing attribute when scraping).
To enable logging, pass a io.Writer into the Scrape struct initialization and logging
will be written to that writer:
scr := ts.Scrape{Info: os.Stdout}
In order to better refine your search, you may use any Query Operator (as defined by Twitter)
in your search term. The query operators can be found here:
https://dev.twitter.com/rest/public/search#query-operators
// Tweets by <NAME>
tweets, err := scr.Tweets("#golang from:davecheney", startDate, untilDate)
Since a Twitter search is paginated by Twitter (to 20 Tweets), this library abuses the fact
more tweets are loaded via AJAX. More information can be found in a great blog post by <NAME>:
http://tomkdickinson.co.uk/2015/01/scraping-tweets-directly-from-twitters-search-page-part-1/
*/
package twitscrape | doc.go | 0.678433 | 0.4206 | doc.go | starcoder |
package event
type Point struct {
X, Y int
}
func (p Point) Magnitude() int {
return abs(p.X) + abs(p.Y)
}
func (p Point) Distance(o Point) int {
return abs(p.X - o.X) + abs(p.Y - o.Y)
}
func abs(x int) int {
if x < 0 {
return -x
}
return x
}
func (p Point) Offset(x, y int) Point {
return Point{p.X + x, p.Y + y}
}
func EqualPoints(lhs, rhs PointSlice) bool {
if len(lhs) != len(rhs) { return false}
for i, p := range lhs {
if rhs[i] != p {
return false
}
}
return true
}
type PointSlice []Point
func (p PointSlice) Len() int {
return len(p)
}
func (p PointSlice) Less(i, j int) bool {
iDist := p[i].Magnitude()
jDist := p[j].Magnitude()
return iDist < jDist
}
func (p PointSlice) Swap(i, j int) {
t := p[i]
p[i] = p[j]
p[j] = t
}
type Segment struct {
Head, Tail Point
}
func (s Segment) Length() int {
return s.Head.Distance(s.Tail)
}
func Intersections(a, b Segment) []Point {
if a.H() && b.V() {
if intersects, p := orthoIntersect(a.norm(), b.norm()); intersects {
return []Point{p}
}
return nil
}
if a.V() && b.H() {
return Intersections(b, a)
}
if a.H() && b.H() {
return hIntersects(a.norm(), b.norm())
}
if a.V() && b.V() {
return vIntersects(a.norm(), b.norm())
}
return nil
}
func (s Segment) H() bool {
return s.Head.Y == s.Tail.Y
}
func (s Segment) V() bool {
return s.Head.X == s.Tail.X
}
func hIntersects(a, b Segment) []Point {
if a.Head.X > b.Head.X { return hIntersects(b, a) }
if a.Head.Y != b.Head.Y { return nil }
points := make([]Point, 0)
for i := b.Head.X; i <= a.Tail.X; i++ {
points = append(points, Point{i, a.Head.Y})
}
return points
}
func vIntersects(a, b Segment) []Point {
if a.Head.Y > b.Head.Y { return vIntersects(b, a) }
if a.Head.X != b.Head.X { return nil }
points := make([]Point, 0)
for i := b.Head.Y; i <= a.Tail.Y; i++ {
points = append(points, Point{a.Head.X, i})
}
return points
}
func orthoIntersect(h, v Segment) (bool, Point) {
if (h.Head.X <= v.Head.X && v.Head.X <= h.Tail.X) &&
(v.Head.Y <= h.Head.Y && h.Head.Y <= v.Tail.Y) {
return true, Point{v.Head.X, h.Head.Y}
}
return false, Point{}
}
func (s Segment) norm() Segment {
if s.H() {
if s.Head.X > s.Tail.X {
return s.swap()
}
}
if s.Head.Y > s.Tail.Y {
return s.swap()
}
return s
}
func (s Segment) swap() Segment {
return Segment{s.Tail, s.Head}
} | v19/internal/event/segment.go | 0.79158 | 0.476762 | segment.go | starcoder |
package pcpeasy
import (
"github.com/ryandoyle/pcpeasygo/pmapi"
"fmt"
"reflect"
)
type metricInfo struct {
semantics string
units metricUnits
_type reflect.Kind
}
type metricUnits struct {
domain string
_range string
}
type pmDescAdapter interface {
toMetricInfo(pm_desc pmapi.PmDesc) metricInfo
}
type pmDescAdapterImpl struct {}
func (a pmDescAdapterImpl) toMetricInfo(pm_desc pmapi.PmDesc) metricInfo {
return metricInfo{
semantics:semanticsString(pm_desc.Sem),
units:createMetricUnits(pm_desc.Units),
_type:createType(pm_desc.Type),
}
}
func createType(_type int) reflect.Kind {
switch _type {
case pmapi.PmType32:
return reflect.Int32
case pmapi.PmTypeU32:
return reflect.Uint32
case pmapi.PmType64:
return reflect.Int64
case pmapi.PmTypeU64:
return reflect.Uint64
case pmapi.PmTypeFloat:
return reflect.Float32
case pmapi.PmTypeDouble:
return reflect.Float64
case pmapi.PmTypeString:
return reflect.String
}
return reflect.Invalid
}
func createMetricUnits(units pmapi.PmUnits) metricUnits {
if(units.DimSpace == 1 && units.DimTime == 0 && units.DimCount == 0) {
return metricUnits{domain:spaceUnits(units)}
} else if(units.DimSpace == 1 && units.DimTime == -1 && units.DimCount == 0) {
return metricUnits{domain:spaceUnits(units), _range:timeUnits(units)}
} else if(units.DimSpace == 1 && units.DimTime == 0 && units.DimCount == -1) {
return metricUnits{domain:spaceUnits(units), _range:countUnits(units)}
} else if(units.DimSpace == 0 && units.DimTime == 1 && units.DimCount == 0) {
return metricUnits{domain:timeUnits(units)}
} else if(units.DimSpace == -1 && units.DimTime == 1 && units.DimCount == 0) {
return metricUnits{domain:timeUnits(units), _range:spaceUnits(units)}
} else if(units.DimSpace == 0 && units.DimTime == 1 && units.DimCount == -1) {
return metricUnits{domain:timeUnits(units), _range:countUnits(units)}
} else if(units.DimSpace == 0 && units.DimTime == 0 && units.DimCount == 1) {
return metricUnits{domain:countUnits(units)}
} else if(units.DimSpace == -1 && units.DimTime == 0 && units.DimCount == 1) {
return metricUnits{domain:countUnits(units), _range:spaceUnits(units)}
} else if(units.DimSpace == 0 && units.DimTime == -1 && units.DimCount == 1) {
return metricUnits{domain:countUnits(units), _range:timeUnits(units)}
}
return metricUnits{}
}
func countUnits(units pmapi.PmUnits) string {
return fmt.Sprintf("count%v", units.ScaleCount)
}
func timeUnits(units pmapi.PmUnits) string {
switch units.ScaleTime {
case pmapi.PmTimeNSec:
return "nanoseconds"
case pmapi.PmTimeUSec:
return "microseconds"
case pmapi.PmTimeMSec:
return "milliseconds"
case pmapi.PmTimeSec:
return "seconds"
case pmapi.PmTimeMin:
return "minutes"
case pmapi.PmTimeHour:
return "hours"
}
return "unknown time unit"
}
func spaceUnits(units pmapi.PmUnits) string {
switch units.ScaleSpace {
case pmapi.PmSpaceByte:
return "bytes"
case pmapi.PmSpaceKByte:
return "kilobytes"
case pmapi.PmSpaceMByte:
return "megabytes"
case pmapi.PmSpaceGByte:
return "gigabytes"
case pmapi.PmSpaceTByte:
return "terabytes"
case pmapi.PmSpacePByte:
return "petabytes"
case pmapi.PmSpaceEByte:
return "exabytes"
}
return "unknown bytes"
}
func semanticsString(i int) string {
switch i {
case pmapi.PmSemCounter:
return "counter"
case pmapi.PmSemDiscrete:
return "discrete"
case pmapi.PmSemInstant:
return "instant"
}
return "unknown"
} | pcpeasy/metric_units.go | 0.639061 | 0.537102 | metric_units.go | starcoder |
package msboard
import (
"errors"
"fmt"
"io"
"math/rand"
"os"
)
// Location : zero-based cell location, {0,0} is upper left
type Location struct {
row, col int
}
// NewLocation -- public interface to create a Location struct
func NewLocation(row, col int) Location {
retval := Location{row, col}
return retval
}
// cell : manage state for a single cell on the board
type cell struct {
location Location // cell position in grid, zero based, {0,0} is upper left
hasMine bool // cell holds mine
score int // cache static score for this cell
flagged bool // user flag
revealed bool // all cells start hidden
}
// BoardSaveState : Persistable board state object, read/written as JSON
type boardSaveState struct {
initialized bool // board starts uninitialized, and then gets populated after player's first 'guaranteed safe' move
difficulty string
rows int
cols int
mines []Location
explosionOccured bool
}
// Board struct manages state of the Minesweeper board
type Board struct {
boardSaveState // persistable state
cells [][]*cell // cells of initialized board
safeRemaining int // cache number of non-mine cells remaining to be revealed
mineCount int // number of mines defined for this board
}
/************************************\
** cell Methods
\************************************/
// HasMine : return Mine status for a cell
func (c *cell) HasMine() bool {
if nil == c {
return false
}
return c.hasMine
}
// Render : return a rune representing the current state of the cell
var scoreRunes = [...]rune{'_', '1', '2', '3', '4', '5', '6', '7', '8'}
func (c *cell) Render() rune {
if nil == c {
return '~'
}
if !c.revealed {
return '.'
} else if c.flagged {
return '+'
} else if c.hasMine {
return '*'
}
return scoreRunes[c.score]
}
/************************************\
** Board Methods
\************************************/
type boardparams struct {
difficulty string
rows, cols, mineCount int
}
// static map function of board difficulty parameters
var boardDefinitionsDict = func() map[string]boardparams {
return map[string]boardparams{
// name : difficulty, rows, cols, mines
"easy": {"easy", 9, 9, 10},
"medium": {"medium", 16, 16, 30},
"hard": {"hard", 30, 16, 72},
}
}
// NewBoard : allocate new, uninitialized board. Supported sizes are "easy" (9x9), "medium", (16x16) and "hard" (30x16)
func NewBoard(difficulty string) *Board {
params, ok := boardDefinitionsDict()[difficulty]
// unrecognized board types rejected
if !ok {
return nil
}
retval := new(Board)
retval.difficulty, retval.rows, retval.cols, retval.mineCount = difficulty, params.rows, params.cols, params.mineCount
return retval
}
// Initialize : construct a new Board with consideratioon for user's selected 'safe' Location
func (b *Board) Initialize(safespot Location) error {
// Create default cells, then loop over grid and place bombs randomly at 10% probbality until bomb supply exhausted
b.cells = make([][]*cell, b.rows)
for row := range b.cells {
b.cells[row] = make([]*cell, b.cols)
for col := range b.cells[row] {
b.cells[row][col] = new(cell)
b.cells[row][col].location = NewLocation(row, col)
}
}
b.safeRemaining = b.rows * b.cols
minesToPlace := b.mineCount
for minesToPlace > 0 {
for row := range b.cells {
for col := range b.cells[row] {
if minesToPlace == 0 {
continue
}
currloc := Location{row, col}
if currloc == safespot {
continue // can't place mine at user's safe starting cell
}
mineshot := rand.Intn(100)
if mineshot < 2 {
currcell := b.getCell(currloc)
if currcell.hasMine {
continue // we already placed a mine here
}
// place and record mine at current Location
b.cells[row][col].hasMine = true
b.mines = append(b.mines, currloc)
minesToPlace--
b.safeRemaining--
}
}
}
}
// once mines are placed, go ahead and calculate cell scores
initializeScores(b)
b.initialized = true
return nil
}
// initializeScores - calculate and set mine proximity scores for each cell
func initializeScores(b *Board) {
for row := range b.cells {
for col := range b.cells[row] {
currloc := Location{row, col}
currcell := b.getCell(currloc)
cellScore := 0
// iterate over all neighbor cells
neighbors := b.getNeighborCells(currloc)
if nil == neighbors {
fmt.Fprintln(os.Stderr, "Board init failure for cell (this should not happen :() : ", currloc)
}
for _, neighbor := range neighbors {
if neighbor.hasMine {
cellScore++
}
}
currcell.score = cellScore
}
}
}
// GetNeighborCells - return array of pointers to all valid neighbor cells given a cell location
func (b *Board) getNeighborCells(loc Location) []*cell {
// sanity check
center := b.getCell(loc)
if nil == center {
return nil
}
retval := make([]*cell, 0, 8)
// iterate over all potential neighbor cell position
for nrow := loc.row - 1; nrow <= (loc.row + 1); nrow++ {
for ncol := loc.col - 1; ncol <= (loc.col + 1); ncol++ {
neighborloc := Location{nrow, ncol}
// don't include center point
if loc == neighborloc {
continue
}
neighbor := b.getCell(neighborloc)
if nil == neighbor { // invalid Location outside grid
continue
}
retval = append(retval, neighbor)
}
}
return retval
}
// Initialized : return board initilization status
func (b *Board) Initialized() bool {
if nil == b {
return false
}
return b.initialized
}
// GetCell : return a reference to a particular cell
func (b *Board) getCell(selected Location) *cell {
// bunch of preconditions
if selected.row < 0 || selected.row >= b.rows || selected.col < 0 || selected.col >= b.cols {
return nil
}
return b.cells[selected.row][selected.col]
}
// SafeRemaining : report number of unrevealed non-mine cells remaining. Win condition is when this number reaches 0
func (b *Board) SafeRemaining() int {
if nil == b || !b.initialized {
return 0
}
return b.safeRemaining
}
// RevealAll : set all cells to revealed (for debugging or surrender); this is irreversible
func (b *Board) RevealAll() error {
if nil == b || !b.initialized {
return errors.New("called RevealAll() on an uninitialized board")
}
for row := range b.cells {
for col := range b.cells[row] {
b.cells[row][col].revealed = true
}
}
return nil
}
// ConsoleRender -- render a console image of the board state
func (b *Board) ConsoleRender(cout io.Writer) error {
if nil == b || !b.initialized {
return errors.New("called Render() on an uninitialized board")
}
// top line is header
headingLine := ""
switch b.difficulty {
case "easy":
headingLine = " A B C D E F G H I"
case "medium", "hard":
headingLine = " A B C D E F G H I J K L M N O P"
}
fmt.Fprintln(cout, headingLine)
for row := range b.cells {
// index column along left side
nextLine := fmt.Sprintf("%2d ", row+1)
for col := range b.cells[row] {
if col != 0 {
nextLine += " "
}
nextLine += string(b.cells[row][col].Render())
}
fmt.Fprintln(cout, nextLine)
}
return nil
}
// Click -- Calculate and apply board state changes for a cell click event
func (b *Board) Click(l Location) {
c := b.getCell(l)
if nil == c {
return
}
// flagged cells are protected from inadvertant clicks
if c.flagged {
return
}
// already revealed cells do not respond to clicks
if c.revealed {
return
}
// reveal cell
c.revealed = true
// Mine? Explode
if c.hasMine {
b.explosionOccured = true
return
}
// non-zero score cells do not propagate (I think)
if c.score == 0 {
// propagate reveals for zero score cells
b.PropagateReveals(c)
}
}
// PropagateReveals -- clicking on a zero score cell reveals all connected zero score cells
func (b *Board) PropagateReveals(c *cell) {
if nil == c {
return
}
neighbors := b.getNeighborCells(c.location)
// fmt.Fprintln(os.Stderr, "PropagateReveals: ", c.location, " has ", len(neighbors), " neighbors.")
if nil == neighbors {
fmt.Fprintln(os.Stderr, "PropogateReveals failure for cell (this should not happen :() : ", c.location)
}
// reveal unrevealed neighbors and recurse for any zero-scored ones
for _, n := range neighbors {
if n.revealed {
continue
}
n.revealed = true
// debug
// fmt.Fprintln(os.Stderr, "Revealing ", n.location, " (score = ", n.score, ") from ", c.location)
if n.score == 0 {
b.PropagateReveals(n)
}
}
}
// MineHit -- convenience function for game loop
func (b *Board) MineHit() bool {
return b.explosionOccured
}
// ToggleFlag -- toggle flag status for a cell, ignored for non-hidden cells
func (b *Board) ToggleFlag(l Location) {
c := b.getCell(l)
if nil != c && c.revealed == false {
c.flagged = !c.flagged
}
}
// ValidLocation -- return true if selected location is valid for the board
func (b *Board) ValidLocation(l Location) bool {
if l.row >= 0 && l.row < b.rows && l.col >= 0 && l.col < b.cols {
return true
}
return false
} | msboard/Board.go | 0.676192 | 0.489381 | Board.go | starcoder |
package sgd
import (
"fmt"
"math"
"gonum.org/v1/gonum/mat"
"github.com/jamOne-/kiwi-zero/utils"
)
type OptimizeFn func(Xs []*mat.VecDense, ys []float64, weights *mat.VecDense) (float64, *mat.VecDense)
type SGDReturn struct {
BestWeights *mat.VecDense
TestSetErrorRate float64
BestValidErrorRate float64
TotalEpochs int
BestWeightsEpoch int
TrainErrorsHistory []float64
ValidationErrorsHistory []float64
}
var DEFAULT_PARAMETERS = map[string]float64{
"alpha0": 1e-4,
"alpha_const": 1e-5,
"batch_size": 32,
"momentum": 0.9,
"epochs": 50.0,
"max_epochs": 20000.0,
"patience_expansion": 1.5,
"validation_set_ratio": 0.2,
"test_set_ratio": 0.2,
"weights_decay": 5e-4,
"debug": 1}
func SGD(f OptimizeFn, weights *mat.VecDense, Xs []*mat.VecDense, ys []float64, parameters map[string]float64) *SGDReturn {
parameters = utils.MergeMaps(DEFAULT_PARAMETERS, parameters)
alpha0, alphaConst := parameters["alpha0"], parameters["alpha_const"]
batchSize := int(parameters["batch_size"])
momentum := parameters["momentum"]
numberOfEpochs := int(parameters["epochs"])
maxEpochs := int(parameters["max_epochs"])
patienceExpansion := parameters["patience_expansion"]
weightsDecay := parameters["weightsDecay"]
i := 0
epoch := 0
velocities := mat.NewVecDense(weights.Len(), nil)
weightsAux := mat.NewVecDense(weights.Len(), nil)
weightsAux.CloneVec(weights)
weights = weightsAux
bestValidErrorRate := math.MaxFloat64
bestWeights := mat.NewVecDense(weights.Len(), nil)
bestWeights.CloneVec(weights)
bestWeightsEpoch := 0
trainErrors := make([]float64, 0)
// trainLoss := make([]float64, 0)
validationErrors := make([]float64, 0)
// rand.Shuffle(len(Xs), func(i int, j int) {
// Xs[i], Xs[j] = Xs[j], Xs[i]
// ys[i], ys[j] = ys[j], ys[i]
// })
testSetSize := int(math.Floor(parameters["test_set_ratio"] * float64(len(Xs))))
testX, testy := Xs[:testSetSize], ys[:testSetSize]
restX, resty := Xs[testSetSize:], ys[testSetSize:]
validationSetSize := int(math.Floor(parameters["validation_set_ratio"] * float64(len(restX))))
validationX, validationy := restX[:validationSetSize], resty[:validationSetSize]
trainX, trainy := restX[validationSetSize:], resty[validationSetSize:]
debugMode := parameters["debug"]
numberOfBatches := int(math.Ceil(float64(len(trainX)) / float64(batchSize)))
for epoch < numberOfEpochs {
epoch += 1
for batchIndex := 0; batchIndex < numberOfBatches; batchIndex++ {
i += 1
batchStart := batchIndex * batchSize
batchEnd := int(math.Min(float64(batchStart+batchSize), float64(len(trainX))))
batchX, batchy := trainX[batchStart:batchEnd], trainy[batchStart:batchEnd]
errorRate, gradient := f(batchX, batchy, weights)
trainErrors = append(trainErrors, errorRate)
if debugMode >= 3 && i%100 == 0 {
fmt.Printf("After batch %d: errorRate: %f\n", i, errorRate)
}
gradient.AddScaledVec(gradient, weightsDecay, weights)
alpha := alpha0 / (1.0 + alphaConst*float64(i))
velocities.ScaleVec(momentum, velocities)
velocities.AddScaledVec(velocities, alpha, gradient)
weights.SubVec(weights, velocities)
}
validationError, _ := f(validationX, validationy, weights)
validationErrors = append(validationErrors, validationError)
if validationError < bestValidErrorRate {
numberOfEpochs = int(math.Max(float64(numberOfEpochs), float64(epoch)*patienceExpansion+1.0))
numberOfEpochs = int(math.Min(float64(maxEpochs), float64(numberOfEpochs)))
bestValidErrorRate = validationError
bestWeights.CloneVec(weights)
bestWeightsEpoch = epoch
}
if debugMode >= 2 {
fmt.Printf("After epoch %d: validationError: %f currently going to do %d epochs\n", epoch, validationError, numberOfEpochs)
}
}
testErrorRate, _ := f(testX, testy, bestWeights)
if debugMode >= 1 {
fmt.Printf("SGD ended after %d epochs having %f error on test set\n", numberOfEpochs, testErrorRate)
}
return &SGDReturn{
bestWeights,
testErrorRate,
bestValidErrorRate,
numberOfEpochs,
bestWeightsEpoch,
trainErrors,
validationErrors}
} | sgd/sgd.go | 0.598077 | 0.416381 | sgd.go | starcoder |
package padx
import (
"time"
"github.com/rakyll/launchpad"
)
// Custom represents a custom widget and all its hits (x, y) for behing paint
type Custom struct {
OffsetX int
OffsetY int
Width int
Height int
Hits []launchpad.Hit
}
// NewCustom initializes a custom widget
func NewCustom(lines []string) Custom {
hits := []launchpad.Hit{}
for y, line := range lines {
for x, pixel := range line {
if pixel == '0' {
hits = append(hits, launchpad.Hit{X: x, Y: y})
}
}
}
return Custom{
Width: len(lines[0]),
Height: len(lines),
Hits: hits,
}
}
// Paint colors a custom widget on launchpad
func (c Custom) Paint(pad *launchpad.Launchpad, color Color, d time.Duration) []launchpad.Hit {
hits := []launchpad.Hit{}
for _, h := range c.Hits {
x := h.X + c.OffsetX
y := h.Y + c.OffsetY
if x >= 0 && x < 8 && y >= 0 && y < 8 {
pad.Light(x, y, color.Green, color.Red)
hits = append(hits, launchpad.Hit{X: x, Y: y})
time.Sleep(d)
}
}
return hits
}
// Clear paint a custom widget with colorOff
func (c Custom) Clear(pad *launchpad.Launchpad, d time.Duration) {
c.Paint(pad, ColorOff, d)
}
// Blink blinks a custom widget with two colors
// duration int Duration of transition between colorA and colorB
// repeats int Number of repetitions
func (c Custom) Blink(pad *launchpad.Launchpad, colorA, colorB Color, duration time.Duration, repeats int) {
for r := 0; r < repeats; r++ {
if r%2 == 0 {
c.Paint(pad, colorA, 0)
} else {
c.Paint(pad, colorB, 0)
}
time.Sleep(duration)
}
}
// Move moves a custom widget to another position
func (c *Custom) Move(pad *launchpad.Launchpad, toX, toY int, color Color, d time.Duration) {
for m := 1; m <= c.Width*3; m++ {
newPos := launchpad.Hit{X: c.OffsetX, Y: c.OffsetY}
if toX > c.OffsetX {
newPos.X = c.OffsetX + 1
} else if toX < c.OffsetX {
newPos.X = c.OffsetX - 1
}
if toY > c.OffsetY {
newPos.Y = c.OffsetY + 1
} else if toY < c.OffsetY {
newPos.Y = c.OffsetY - 1
}
originalX := c.OffsetX
originalY := c.OffsetY
c.OffsetX = newPos.X
c.OffsetY = newPos.Y
hits := c.Paint(pad, color, 0)
for x := originalX; x < originalX+c.Width; x++ {
for y := originalY; y < originalY+c.Height; y++ {
var isOn bool
for _, hit := range hits {
if hit.X == x && hit.Y == y {
// do not off this led
isOn = true
}
}
if !isOn && x >= 0 && x < 8 && y >= 0 && y < 8 {
pad.Light(x, y, 0, 0)
}
}
}
time.Sleep(d)
c.OffsetX = newPos.X
c.OffsetY = newPos.Y
if c.OffsetX == toX && c.OffsetY == toY {
// destination reached
break
}
}
c.OffsetX = toX
c.OffsetY = toY
} | custom.go | 0.584983 | 0.415966 | custom.go | starcoder |
package linear
import (
"math"
)
const (
BLOCK_SIZE int = 52
)
/**
* Cache-friendly implementation of RealMatrix using a flat arrays to store
* square blocks of the matrix.
*
* This implementation is specially designed to be cache-friendly. Square blocks are
* stored as small arrays and allow efficient traversal of data both in row major direction
* and columns major direction, one block at a time. This greatly increases performances
* for algorithms that use crossed directions loops like multiplication or transposition.
*
* The size of square blocks is a static parameter. It may be tuned according to the cache
* size of the target computer processor. As a rule of thumbs, it should be the largest
* value that allows three blocks to be simultaneously cached (this is necessary for example
* for matrix multiplication). The default value is to use 52x52 blocks which is well suited
* for processors with 64k L1 cache (one block holds 2704 values or 21632 bytes). This value
* could be lowered to 36x36 for processors with 32k L1 cache.
*
* The regular blocks represent BLOCK_SIZE x BLOCK_SIZE squares. Blocks
* at right hand side and bottom side which may be smaller to fit matrix dimensions. The square
* blocks are flattened in row major order in single dimension arrays which are therefore
* BLOCK_SIZE<sup>2</sup> elements long for regular blocks. The blocks are themselves
* organized in row major order.
*
* As an example, for a block size of 52x52, a 100x60 matrix would be stored in 4 blocks.
* Block 0 would be a double[2704] array holding the upper left 52x52 square, block 1 would be
* a double[416] array holding the upper right 52x8 rectangle, block 2 would be a double[2496]
* array holding the lower left 48x52 rectangle and block 3 would be a double[384] array
* holding the lower right 48x8 rectangle.
*
* The layout complexity overhead versus simple mapping of matrices to java
* arrays is negligible for small matrices (about 1%). The gain from cache efficiency leads
* to up to 3-fold improvements for matrices of moderate to large size.
*/
type BlockRealMatrix struct {
blocks [][]float64
rows, columns, blockRows, blockColumns int
}
func NewBlockRealMatrix(rowDimension, columnDimension int) (*BlockRealMatrix, error) {
if rowDimension < 1 {
return nil, notStrictlyPositiveErrorf(float64(rowDimension))
}
if columnDimension < 1 {
return nil, notStrictlyPositiveErrorf(float64(columnDimension))
}
ans := new(BlockRealMatrix)
ans.rows = rowDimension
ans.columns = columnDimension
ans.blockRows = (rowDimension + BLOCK_SIZE - 1) / BLOCK_SIZE
ans.blockColumns = (columnDimension + BLOCK_SIZE - 1) / BLOCK_SIZE
ans.blocks = createBlocksLayout(rowDimension, columnDimension)
return ans, nil
}
func NewBlockRealMatrixFromSlices(rawData [][]float64) (*BlockRealMatrix, error) {
return NewBlockRealMatrixFromBlockData(len(rawData), len(rawData[0]), toBlocksLayout(rawData))
}
func NewBlockRealMatrixFromBlockData(rows, columns int, blockData [][]float64) (*BlockRealMatrix, error) {
if rows < 1 {
return nil, notStrictlyPositiveErrorf(float64(rows))
}
if columns < 1 {
return nil, notStrictlyPositiveErrorf(float64(columns))
}
ans := new(BlockRealMatrix)
ans.rows = rows
ans.columns = columns
// number of blocks
ans.blockRows = (rows + BLOCK_SIZE - 1) / BLOCK_SIZE
ans.blockColumns = (columns + BLOCK_SIZE - 1) / BLOCK_SIZE
ans.blocks = blockData
var index int
for iBlock := 0; iBlock < ans.blockRows; iBlock++ {
iHeight := ans.blockHeight(iBlock)
for jBlock := 0; jBlock < ans.blockColumns; jBlock++ {
if len(ans.blocks[index]) != iHeight*ans.blockWidth(jBlock) {
return nil, dimensionsMismatchSimpleErrorf(len(ans.blocks[index]), iHeight*ans.blockWidth(jBlock))
}
index++
}
}
return ans, nil
}
func toBlocksLayout(rawData [][]float64) [][]float64 {
rows := len(rawData)
columns := len(rawData[0])
blockRows := (rows + BLOCK_SIZE - 1) / BLOCK_SIZE
blockColumns := (columns + BLOCK_SIZE - 1) / BLOCK_SIZE
// safety checks
for i := 0; i < len(rawData); i++ {
length := len(rawData[i])
if length != columns {
panic(dimensionsMismatchSimpleErrorf(columns, length))
}
}
// convert array
blocks := make([][]float64, blockRows*blockColumns)
var blockIndex int
for iBlock := 0; iBlock < blockRows; iBlock++ {
pStart := iBlock * BLOCK_SIZE
pEnd := int(math.Min(float64(pStart+BLOCK_SIZE), float64(rows)))
iHeight := pEnd - pStart
for jBlock := 0; jBlock < blockColumns; jBlock++ {
qStart := jBlock * BLOCK_SIZE
qEnd := int(math.Min(float64(qStart+BLOCK_SIZE), float64(columns)))
jWidth := qEnd - qStart
// allocate new block
blocks[blockIndex] = make([]float64, iHeight*jWidth)
// copy data
var index int
for p := pStart; p < pEnd; p++ {
copy(blocks[blockIndex][index:index+jWidth], rawData[p][qStart:qStart+jWidth])
index += jWidth
}
blockIndex++
}
}
return blocks
}
func createBlocksLayout(rows, columns int) [][]float64 {
blockRows := (rows + BLOCK_SIZE - 1) / BLOCK_SIZE
blockColumns := (columns + BLOCK_SIZE - 1) / BLOCK_SIZE
blocks := make([][]float64, blockRows*blockColumns)
var blockIndex int
for iBlock := 0; iBlock < blockRows; iBlock++ {
pStart := iBlock * BLOCK_SIZE
pEnd := int(math.Min(float64(pStart+BLOCK_SIZE), float64(rows)))
iHeight := pEnd - pStart
for jBlock := 0; jBlock < blockColumns; jBlock++ {
qStart := jBlock * BLOCK_SIZE
qEnd := int(math.Min(float64(qStart+BLOCK_SIZE), float64(columns)))
jWidth := qEnd - qStart
blocks[blockIndex] = make([]float64, iHeight*jWidth)
blockIndex++
}
}
return blocks
}
func (brm *BlockRealMatrix) Copy() RealMatrix {
c := new(BlockRealMatrix)
c.rows = brm.rows
c.columns = brm.columns
c.blockRows = (brm.rows + BLOCK_SIZE - 1) / BLOCK_SIZE
c.blockColumns = (brm.columns + BLOCK_SIZE - 1) / BLOCK_SIZE
c.blocks = createBlocksLayout(brm.rows, brm.columns)
for i := 0; i < len(brm.blocks); i++ {
copy(c.blocks[i], brm.blocks[i])
}
return c
}
func (brm *BlockRealMatrix) Add(mat RealMatrix) RealMatrix {
if err := checkAdditionCompatible(brm, mat); err != nil {
panic(err)
}
out, err := NewBlockRealMatrix(brm.rows, brm.columns)
if err != nil {
panic(err)
}
if m, ok := mat.(*BlockRealMatrix); ok {
// perform addition block-wise, to ensure good cache behavior
for blockIndex := 0; blockIndex < len(out.blocks); blockIndex++ {
tBlock := brm.blocks[blockIndex]
mBlock := m.blocks[blockIndex]
for k := 0; k < len(out.blocks[blockIndex]); k++ {
out.blocks[blockIndex][k] = tBlock[k] + mBlock[k]
}
}
} else {
// perform addition block-wise, to ensure good cache behavior
blockIndex := 0
for iBlock := 0; iBlock < out.blockRows; iBlock++ {
for jBlock := 0; jBlock < out.blockColumns; jBlock++ {
// perform addition on the current block
tBlock := brm.blocks[blockIndex]
pStart := iBlock * BLOCK_SIZE
pEnd := int(math.Min(float64(pStart+BLOCK_SIZE), float64(brm.rows)))
qStart := jBlock * BLOCK_SIZE
qEnd := int(math.Min(float64(qStart+BLOCK_SIZE), float64(brm.columns)))
k := 0
for p := pStart; p < pEnd; p++ {
for q := qStart; q < qEnd; q++ {
out.blocks[blockIndex][k] = tBlock[k] + mat.At(p, q)
k++
}
}
// go to next block
blockIndex++
}
}
}
return out
}
func (brm *BlockRealMatrix) Subtract(mat RealMatrix) RealMatrix {
if err := checkAdditionCompatible(brm, mat); err != nil {
panic(err)
}
out, err := NewBlockRealMatrix(brm.rows, brm.columns)
if err != nil {
panic(err)
}
if m, ok := mat.(*BlockRealMatrix); ok {
// perform addition block-wise, to ensure good cache behavior
for blockIndex := 0; blockIndex < len(out.blocks); blockIndex++ {
tBlock := brm.blocks[blockIndex]
mBlock := m.blocks[blockIndex]
for k := 0; k < len(out.blocks[blockIndex]); k++ {
out.blocks[blockIndex][k] = tBlock[k] - mBlock[k]
}
}
} else {
// perform addition block-wise, to ensure good cache behavior
blockIndex := 0
for iBlock := 0; iBlock < out.blockRows; iBlock++ {
for jBlock := 0; jBlock < out.blockColumns; jBlock++ {
// perform addition on the current block
tBlock := brm.blocks[blockIndex]
pStart := iBlock * BLOCK_SIZE
pEnd := int(math.Min(float64(pStart+BLOCK_SIZE), float64(brm.rows)))
qStart := jBlock * BLOCK_SIZE
qEnd := int(math.Min(float64(qStart+BLOCK_SIZE), float64(brm.columns)))
k := 0
for p := pStart; p < pEnd; p++ {
for q := qStart; q < qEnd; q++ {
out.blocks[blockIndex][k] = tBlock[k] - mat.At(p, q)
k++
}
}
// go to next block
blockIndex++
}
}
}
return out
}
func (brm *BlockRealMatrix) ScalarAdd(d float64) RealMatrix {
out, err := NewBlockRealMatrix(brm.rows, brm.columns)
if err != nil {
panic(err)
}
// perform subtraction block-wise, to ensure good cache behavior
for blockIndex := 0; blockIndex < len(out.blocks); blockIndex++ {
tBlock := brm.blocks[blockIndex]
for k := 0; k < len(out.blocks[blockIndex]); k++ {
out.blocks[blockIndex][k] = tBlock[k] + d
}
}
return out
}
func (brm *BlockRealMatrix) ScalarMultiply(d float64) RealMatrix {
out, err := NewBlockRealMatrix(brm.rows, brm.columns)
if err != nil {
panic(err)
}
// perform subtraction block-wise, to ensure good cache behavior
for blockIndex := 0; blockIndex < len(out.blocks); blockIndex++ {
tBlock := brm.blocks[blockIndex]
for k := 0; k < len(out.blocks[blockIndex]); k++ {
out.blocks[blockIndex][k] = tBlock[k] * d
}
}
return out
}
func (brm *BlockRealMatrix) Multiply(mat RealMatrix) RealMatrix {
if err := checkMultiplicationCompatible(brm, mat); err != nil {
panic(err)
}
if m, ok := mat.(*BlockRealMatrix); ok {
out, err := NewBlockRealMatrix(brm.rows, m.columns)
if err != nil {
panic(err)
}
// perform multiplication block-wise, to ensure good cache behavior
var blockIndex int
for iBlock := 0; iBlock < out.blockRows; iBlock++ {
pStart := iBlock * BLOCK_SIZE
pEnd := int(math.Min(float64(pStart+BLOCK_SIZE), float64(brm.rows)))
for jBlock := 0; jBlock < out.blockColumns; jBlock++ {
jWidth := out.blockWidth(jBlock)
jWidth2 := jWidth + jWidth
jWidth3 := jWidth2 + jWidth
jWidth4 := jWidth3 + jWidth
for kBlock := 0; kBlock < brm.blockColumns; kBlock++ {
kWidth := brm.blockWidth(kBlock)
tBlock := brm.blocks[iBlock*brm.blockColumns+kBlock]
mBlock := m.blocks[kBlock*m.blockColumns+jBlock]
var k int
for p := pStart; p < pEnd; p++ {
lStart := (p - pStart) * kWidth
lEnd := lStart + kWidth
for nStart := 0; nStart < jWidth; nStart++ {
var sum float64
l := lStart
n := nStart
for l < lEnd-3 {
sum += tBlock[l]*mBlock[n] +
tBlock[l+1]*mBlock[n+jWidth] +
tBlock[l+2]*mBlock[n+jWidth2] +
tBlock[l+3]*mBlock[n+jWidth3]
l += 4
n += jWidth4
}
for l < lEnd {
sum += tBlock[l] * mBlock[n]
l++
n += jWidth
}
out.blocks[blockIndex][k] += sum
k++
}
}
}
blockIndex++
}
}
return out
} else {
out, err := NewBlockRealMatrix(brm.rows, mat.ColumnDimension())
if err != nil {
panic(err)
}
var blockIndex int
for iBlock := 0; iBlock < out.blockRows; iBlock++ {
pStart := iBlock * BLOCK_SIZE
pEnd := int(math.Min(float64(pStart+BLOCK_SIZE), float64(brm.rows)))
for jBlock := 0; jBlock < out.blockColumns; jBlock++ {
qStart := jBlock * BLOCK_SIZE
qEnd := int(math.Min(float64(qStart+BLOCK_SIZE), float64(mat.ColumnDimension())))
// perform multiplication on current block
for kBlock := 0; kBlock < brm.blockColumns; kBlock++ {
kWidth := brm.blockWidth(kBlock)
tBlock := brm.blocks[iBlock*brm.blockColumns+kBlock]
rStart := kBlock * BLOCK_SIZE
var k int
for p := pStart; p < pEnd; p++ {
lStart := (p - pStart) * kWidth
lEnd := lStart + kWidth
for q := qStart; q < qEnd; q++ {
var sum float64
r := rStart
for l := lStart; l < lEnd; l++ {
sum += tBlock[l] * mat.At(r, q)
r++
}
out.blocks[blockIndex][k] += sum
k++
}
}
}
// go to next block
blockIndex++
}
}
return out
}
}
func (brm *BlockRealMatrix) Data() [][]float64 {
data := make([][]float64, brm.RowDimension())
lastColumns := brm.columns - (brm.blockColumns-1)*BLOCK_SIZE
for iBlock := 0; iBlock < brm.blockRows; iBlock++ {
pStart := iBlock * BLOCK_SIZE
pEnd := int(math.Min(float64(pStart+BLOCK_SIZE), float64(brm.rows)))
var regularPos, lastPos int
for p := pStart; p < pEnd; p++ {
data[p] = make([]float64, brm.ColumnDimension())
blockIndex := iBlock * brm.blockColumns
dataPos := 0
for jBlock := 0; jBlock < brm.blockColumns-1; jBlock++ {
copy(data[p][dataPos:dataPos+BLOCK_SIZE], brm.blocks[blockIndex][regularPos:regularPos+BLOCK_SIZE])
blockIndex++
dataPos += BLOCK_SIZE
}
copy(data[p][dataPos:dataPos+lastColumns], brm.blocks[blockIndex][lastPos:lastPos+lastColumns])
regularPos += BLOCK_SIZE
lastPos += lastColumns
}
}
return data
}
func (brm *BlockRealMatrix) Trace() float64 {
nRows := brm.RowDimension()
nCols := brm.ColumnDimension()
if nRows != nCols {
panic(nonSquareMatrixSimpleErrorf(nRows, nCols))
}
trace := 0.
for i := 0; i < nRows; i++ {
trace += brm.At(i, i)
}
return trace
}
func (brm *BlockRealMatrix) SubMatrix(startRow, endRow, startColumn, endColumn int) RealMatrix {
if err := checkSubMatrixIndex(brm, startRow, endRow, startColumn, endColumn); err != nil {
panic(err)
}
// create the output matrix
out, err := NewBlockRealMatrix(endRow-startRow+1, endColumn-startColumn+1)
if err != nil {
panic(err)
}
// compute blocks shifts
blockStartRow := startRow / BLOCK_SIZE
rowsShift := startRow % BLOCK_SIZE
blockStartColumn := startColumn / BLOCK_SIZE
columnsShift := startColumn % BLOCK_SIZE
// perform extraction block-wise, to ensure good cache behavior
pBlock := blockStartRow
for iBlock := 0; iBlock < out.blockRows; iBlock++ {
iHeight := out.blockHeight(iBlock)
qBlock := blockStartColumn
for jBlock := 0; jBlock < out.blockColumns; jBlock++ {
jWidth := out.blockWidth(jBlock)
// handle one block of the output matrix
outIndex := iBlock*out.blockColumns + jBlock
index := pBlock*brm.blockColumns + qBlock
width := brm.blockWidth(qBlock)
heightExcess := iHeight + rowsShift - BLOCK_SIZE
widthExcess := jWidth + columnsShift - BLOCK_SIZE
if heightExcess > 0 {
// the submatrix block spans on two blocks rows from the original matrix
if widthExcess > 0 {
// the submatrix block spans on two blocks columns from the original matrix
width2 := brm.blockWidth(qBlock + 1)
brm.copyBlockPart(brm.blocks[index], width, rowsShift, BLOCK_SIZE, columnsShift, BLOCK_SIZE, out.blocks[outIndex], jWidth, 0, 0)
brm.copyBlockPart(brm.blocks[index+1], width2, rowsShift, BLOCK_SIZE, 0, widthExcess, out.blocks[outIndex], jWidth, 0, jWidth-widthExcess)
brm.copyBlockPart(brm.blocks[index+brm.blockColumns], width, 0, heightExcess, columnsShift, BLOCK_SIZE, out.blocks[outIndex], jWidth, iHeight-heightExcess, 0)
brm.copyBlockPart(brm.blocks[index+brm.blockColumns+1], width2, 0, heightExcess, 0, widthExcess, out.blocks[outIndex], jWidth, iHeight-heightExcess, jWidth-widthExcess)
} else {
// the submatrix block spans on one block column from the original matrix
brm.copyBlockPart(brm.blocks[index], width, rowsShift, BLOCK_SIZE, columnsShift, jWidth+columnsShift, out.blocks[outIndex], jWidth, 0, 0)
brm.copyBlockPart(brm.blocks[index+brm.blockColumns], width, 0, heightExcess, columnsShift, jWidth+columnsShift, out.blocks[outIndex], jWidth, iHeight-heightExcess, 0)
}
} else {
// the submatrix block spans on one block row from the original matrix
if widthExcess > 0 {
// the submatrix block spans on two blocks columns from the original matrix
width2 := brm.blockWidth(qBlock + 1)
brm.copyBlockPart(brm.blocks[index], width, rowsShift, iHeight+rowsShift, columnsShift, BLOCK_SIZE, out.blocks[outIndex], jWidth, 0, 0)
brm.copyBlockPart(brm.blocks[index+1], width2, rowsShift, iHeight+rowsShift, 0, widthExcess, out.blocks[outIndex], jWidth, 0, jWidth-widthExcess)
} else {
// the submatrix block spans on one block column from the original matrix
brm.copyBlockPart(brm.blocks[index], width, rowsShift, iHeight+rowsShift, columnsShift, jWidth+columnsShift, out.blocks[outIndex], jWidth, 0, 0)
}
}
qBlock++
}
pBlock++
}
return out
}
func (brm *BlockRealMatrix) copyBlockPart(srcBlock []float64, srcWidth, srcStartRow, srcEndRow, srcStartColumn, srcEndColumn int, dstBlock []float64, dstWidth, dstStartRow, dstStartColumn int) {
length := srcEndColumn - srcStartColumn
srcPos := srcStartRow*srcWidth + srcStartColumn
dstPos := dstStartRow*dstWidth + dstStartColumn
for srcRow := srcStartRow; srcRow < srcEndRow; srcRow++ {
copy(dstBlock[dstPos:dstPos+length], srcBlock[srcPos:srcPos+length])
srcPos += srcWidth
dstPos += dstWidth
}
}
func (brm *BlockRealMatrix) SetSubMatrix(subMatrix [][]float64, row, column int) {
if subMatrix == nil {
panic(invalidArgumentSimpleErrorf())
}
refLength := len(subMatrix[0])
if refLength == 0 {
panic(noDataErrorf(at_least_one_column))
}
endRow := row + len(subMatrix) - 1
endColumn := column + refLength - 1
if err := checkSubMatrixIndex(brm, row, endRow, column, endColumn); err != nil {
panic(err)
}
for _, subRow := range subMatrix {
if len(subRow) != refLength {
panic(dimensionsMismatchSimpleErrorf(refLength, len(subRow)))
}
}
// compute blocks bounds
blockStartRow := row / BLOCK_SIZE
blockEndRow := (endRow + BLOCK_SIZE) / BLOCK_SIZE
blockStartColumn := column / BLOCK_SIZE
blockEndColumn := (endColumn + BLOCK_SIZE) / BLOCK_SIZE
// perform copy block-wise, to ensure good cache behavior
for iBlock := blockStartRow; iBlock < blockEndRow; iBlock++ {
iHeight := brm.blockHeight(iBlock)
firstRow := iBlock * BLOCK_SIZE
iStart := int(math.Max(float64(row), float64(firstRow)))
iEnd := int(math.Min(float64(endRow+1), float64(firstRow+iHeight)))
for jBlock := blockStartColumn; jBlock < blockEndColumn; jBlock++ {
jWidth := brm.blockWidth(jBlock)
firstColumn := jBlock * BLOCK_SIZE
jStart := int(math.Max(float64(column), float64(firstColumn)))
jEnd := int(math.Min(float64(endColumn+1), float64(firstColumn+jWidth)))
jLength := jEnd - jStart
// handle one block, row by row
for i := iStart; i < iEnd; i++ {
pos := (i-firstRow)*jWidth + (jStart - firstColumn)
copy(brm.blocks[iBlock*brm.blockColumns+jBlock][pos:pos+jLength], subMatrix[i-row][jStart-column:(jStart-column)+jLength])
}
}
}
}
func (brm *BlockRealMatrix) RowMatrixAt(row int) RealMatrix {
if err := checkRowIndex(brm, row); err != nil {
panic(err)
}
out, err := NewBlockRealMatrix(1, brm.columns)
if err != nil {
panic(err)
}
// perform copy block-wise, to ensure good cache behavior
iBlock := row / BLOCK_SIZE
iRow := row - iBlock*BLOCK_SIZE
outBlockIndex := 0
outIndex := 0
for jBlock := 0; jBlock < brm.blockColumns; jBlock++ {
jWidth := brm.blockWidth(jBlock)
available := len(out.blocks[outBlockIndex]) - outIndex
if jWidth > available {
copy(out.blocks[outBlockIndex][outIndex:outIndex+available], brm.blocks[iBlock*brm.blockColumns+jBlock][iRow*jWidth:(iRow*jWidth)+available])
outBlockIndex++
copy(out.blocks[outBlockIndex][0:jWidth-available], brm.blocks[iBlock*brm.blockColumns+jBlock][iRow*jWidth:(iRow*jWidth)+(jWidth-available)])
outIndex = jWidth - available
} else {
copy(out.blocks[outBlockIndex][outIndex:outIndex+jWidth], brm.blocks[iBlock*brm.blockColumns+jBlock][iRow*jWidth:(iRow*jWidth)+jWidth])
outIndex += jWidth
}
}
return out
}
func (brm *BlockRealMatrix) SetRowMatrix(row int, mat RealMatrix) {
if err := checkRowIndex(brm, row); err != nil {
panic(err)
}
nCols := brm.ColumnDimension()
if (mat.RowDimension() != 1) || (mat.ColumnDimension() != nCols) {
panic(matrixDimensionMismatchErrorf(mat.RowDimension(), mat.ColumnDimension(), 1, nCols))
}
if m, ok := mat.(*BlockRealMatrix); ok {
// perform copy block-wise, to ensure good cache behavior
iBlock := row / BLOCK_SIZE
iRow := row - iBlock*BLOCK_SIZE
mBlockIndex := 0
mIndex := 0
for jBlock := 0; jBlock < brm.blockColumns; jBlock++ {
jWidth := brm.blockWidth(jBlock)
available := len(m.blocks[mBlockIndex]) - mIndex
if jWidth > available {
copy(brm.blocks[iBlock*brm.blockColumns+jBlock][iRow*jWidth:(iRow*jWidth)+available], m.blocks[mBlockIndex][mIndex:mIndex+available])
mBlockIndex++
copy(brm.blocks[iBlock*brm.blockColumns+jBlock][iRow*jWidth:(iRow*jWidth)+(jWidth-available)], m.blocks[mBlockIndex][0:jWidth-available])
mIndex = jWidth - available
} else {
copy(brm.blocks[iBlock*brm.blockColumns+jBlock][iRow*jWidth:(iRow*jWidth)+jWidth], m.blocks[mBlockIndex][mIndex:mIndex+jWidth])
mIndex += jWidth
}
}
} else {
for i := 0; i < nCols; i++ {
brm.SetEntry(row, i, mat.At(0, i))
}
}
}
func (brm *BlockRealMatrix) ColumnMatrixAt(column int) RealMatrix {
if err := checkColumnIndex(brm, column); err != nil {
panic(err)
}
out, err := NewBlockRealMatrix(brm.rows, 1)
if err != nil {
panic(err)
}
// perform copy block-wise, to ensure good cache behavior
jBlock := column / BLOCK_SIZE
jColumn := column - jBlock*BLOCK_SIZE
jWidth := brm.blockWidth(jBlock)
outBlockIndex := 0
outIndex := 0
for iBlock := 0; iBlock < brm.blockRows; iBlock++ {
iHeight := brm.blockHeight(iBlock)
for i := 0; i < iHeight; i++ {
if outIndex >= len(out.blocks[outBlockIndex]) {
outBlockIndex++
outIndex = 0
}
out.blocks[outBlockIndex][outIndex] = brm.blocks[iBlock*brm.blockColumns+jBlock][i*jWidth+jColumn]
outIndex++
}
}
return out
}
func (brm *BlockRealMatrix) SetColumnMatrix(column int, mat RealMatrix) {
if err := checkColumnIndex(brm, column); err != nil {
panic(err)
}
nRows := brm.RowDimension()
if (mat.RowDimension() != nRows) || (mat.ColumnDimension() != 1) {
panic(matrixDimensionMismatchErrorf(mat.RowDimension(), mat.ColumnDimension(), nRows, 1))
}
if m, ok := mat.(*BlockRealMatrix); ok {
// perform copy block-wise, to ensure good cache behavior
jBlock := column / BLOCK_SIZE
jColumn := column - jBlock*BLOCK_SIZE
jWidth := brm.blockWidth(jBlock)
mBlockIndex := 0
mIndex := 0
for iBlock := 0; iBlock < brm.blockRows; iBlock++ {
iHeight := brm.blockHeight(iBlock)
for i := 0; i < iHeight; i++ {
if mIndex >= len(m.blocks[mBlockIndex]) {
mBlockIndex++
mIndex = 0
}
brm.blocks[iBlock*brm.blockColumns+jBlock][i*jWidth+jColumn] = m.blocks[mBlockIndex][mIndex]
mIndex++
}
}
} else {
for i := 0; i < nRows; i++ {
brm.SetEntry(i, column, mat.At(i, 0))
}
}
}
func (brm *BlockRealMatrix) RowVectorAt(row int) RealVector {
if err := checkRowIndex(brm, row); err != nil {
panic(err)
}
outData := make([]float64, brm.columns)
// perform copy block-wise, to ensure good cache behavior
iBlock := row / BLOCK_SIZE
iRow := row - iBlock*BLOCK_SIZE
outIndex := 0
for jBlock := 0; jBlock < brm.blockColumns; jBlock++ {
jWidth := brm.blockWidth(jBlock)
copy(outData[outIndex:outIndex+jWidth], brm.blocks[iBlock*brm.blockColumns+jBlock][iRow*jWidth:(iRow*jWidth)+jWidth])
outIndex += jWidth
}
v := new(ArrayRealVector)
v.data = outData
return v
}
func (brm *BlockRealMatrix) SetRowVector(row int, vec RealVector) {
if vec, ok := vec.(*ArrayRealVector); ok {
brm.SetRow(row, vec.DataRef())
} else {
if err := checkRowIndex(brm, row); err != nil {
panic(err)
}
nCols := brm.ColumnDimension()
if vec.Dimension() != nCols {
panic(matrixDimensionMismatchErrorf(1, vec.Dimension(), 1, nCols))
}
for i := 0; i < nCols; i++ {
brm.SetEntry(row, i, vec.At(i))
}
}
}
func (brm *BlockRealMatrix) ColumnVectorAt(column int) RealVector {
if err := checkColumnIndex(brm, column); err != nil {
panic(err)
}
outData := make([]float64, brm.rows)
// perform copy block-wise, to ensure good cache behavior
jBlock := column / BLOCK_SIZE
jColumn := column - jBlock*BLOCK_SIZE
jWidth := brm.blockWidth(jBlock)
outIndex := 0
for iBlock := 0; iBlock < brm.blockRows; iBlock++ {
iHeight := brm.blockHeight(iBlock)
for i := 0; i < iHeight; i++ {
outData[outIndex] = brm.blocks[iBlock*brm.blockColumns+jBlock][i*jWidth+jColumn]
outIndex++
}
}
v := new(ArrayRealVector)
v.data = outData
return v
}
func (brm *BlockRealMatrix) SetColumnVector(column int, vec RealVector) {
if vec, ok := vec.(*ArrayRealVector); ok {
brm.SetColumn(column, vec.DataRef())
} else {
if err := checkColumnIndex(brm, column); err != nil {
panic(err)
}
nRows := brm.RowDimension()
if vec.Dimension() != nRows {
panic(matrixDimensionMismatchErrorf(vec.Dimension(), 1, nRows, 1))
}
for i := 0; i < nRows; i++ {
brm.SetEntry(i, column, vec.At(i))
}
}
}
func (brm *BlockRealMatrix) RowAt(row int) []float64 {
if err := checkRowIndex(brm, row); err != nil {
panic(err)
}
out := make([]float64, brm.columns)
// perform copy block-wise, to ensure good cache behavior
iBlock := row / BLOCK_SIZE
iRow := row - iBlock*BLOCK_SIZE
outIndex := 0
for jBlock := 0; jBlock < brm.blockColumns; jBlock++ {
jWidth := brm.blockWidth(jBlock)
copy(out[outIndex:outIndex+jWidth], brm.blocks[iBlock*brm.blockColumns+jBlock][iRow*jWidth:(iRow*jWidth)+jWidth])
outIndex += jWidth
}
return out
}
func (brm *BlockRealMatrix) SetRow(row int, array []float64) {
if err := checkRowIndex(brm, row); err != nil {
panic(err)
}
nCols := brm.ColumnDimension()
if len(array) != nCols {
panic(matrixDimensionMismatchErrorf(1, len(array), 1, nCols))
}
// perform copy block-wise, to ensure good cache behavior
iBlock := row / BLOCK_SIZE
iRow := row - iBlock*BLOCK_SIZE
outIndex := 0
for jBlock := 0; jBlock < brm.blockColumns; jBlock++ {
jWidth := brm.blockWidth(jBlock)
copy(brm.blocks[iBlock*brm.blockColumns+jBlock][iRow*jWidth:(iRow*jWidth)+jWidth], array[outIndex:outIndex+jWidth])
outIndex += jWidth
}
}
func (brm *BlockRealMatrix) ColumnAt(column int) []float64 {
if err := checkColumnIndex(brm, column); err != nil {
panic(err)
}
out := make([]float64, brm.rows)
// perform copy block-wise, to ensure good cache behavior
jBlock := column / BLOCK_SIZE
jColumn := column - jBlock*BLOCK_SIZE
jWidth := brm.blockWidth(jBlock)
outIndex := 0
for iBlock := 0; iBlock < brm.blockRows; iBlock++ {
iHeight := brm.blockHeight(iBlock)
for i := 0; i < iHeight; i++ {
out[outIndex] = brm.blocks[iBlock*brm.blockColumns+jBlock][i*jWidth+jColumn]
outIndex++
}
}
return out
}
func (brm *BlockRealMatrix) SetColumn(column int, array []float64) {
if err := checkColumnIndex(brm, column); err != nil {
panic(err)
}
nRows := brm.RowDimension()
if len(array) != nRows {
panic(matrixDimensionMismatchErrorf(len(array), 1, nRows, 1))
}
// perform copy block-wise, to ensure good cache behavior
jBlock := column / BLOCK_SIZE
jColumn := column - jBlock*BLOCK_SIZE
jWidth := brm.blockWidth(jBlock)
outIndex := 0
for iBlock := 0; iBlock < brm.blockRows; iBlock++ {
iHeight := brm.blockHeight(iBlock)
for i := 0; i < iHeight; i++ {
brm.blocks[iBlock*brm.blockColumns+jBlock][i*jWidth+jColumn] = array[outIndex]
outIndex++
}
}
}
func (brm *BlockRealMatrix) At(row, column int) float64 {
if err := checkMatrixIndex(brm, row, column); err != nil {
panic(err)
}
iBlock := row / BLOCK_SIZE
jBlock := column / BLOCK_SIZE
k := (row-iBlock*BLOCK_SIZE)*brm.blockWidth(jBlock) + (column - jBlock*BLOCK_SIZE)
return brm.blocks[iBlock*brm.blockColumns+jBlock][k]
}
func (brm *BlockRealMatrix) SetEntry(row, column int, value float64) {
if err := checkMatrixIndex(brm, row, column); err != nil {
panic(err)
}
iBlock := row / BLOCK_SIZE
jBlock := column / BLOCK_SIZE
k := (row-iBlock*BLOCK_SIZE)*brm.blockWidth(jBlock) + (column - jBlock*BLOCK_SIZE)
brm.blocks[iBlock*brm.blockColumns+jBlock][k] = value
}
func (brm *BlockRealMatrix) AddToEntry(row, column int, increment float64) {
if err := checkMatrixIndex(brm, row, column); err != nil {
panic(err)
}
iBlock := row / BLOCK_SIZE
jBlock := column / BLOCK_SIZE
k := (row-iBlock*BLOCK_SIZE)*brm.blockWidth(jBlock) + (column - jBlock*BLOCK_SIZE)
brm.blocks[iBlock*brm.blockColumns+jBlock][k] += increment
}
func (brm *BlockRealMatrix) MultiplyEntry(row, column int, factor float64) {
if err := checkMatrixIndex(brm, row, column); err != nil {
panic(err)
}
iBlock := row / BLOCK_SIZE
jBlock := column / BLOCK_SIZE
k := (row-iBlock*BLOCK_SIZE)*brm.blockWidth(jBlock) + (column - jBlock*BLOCK_SIZE)
brm.blocks[iBlock*brm.blockColumns+jBlock][k] *= factor
}
func (brm *BlockRealMatrix) Transpose() RealMatrix {
nRows := brm.RowDimension()
nCols := brm.ColumnDimension()
copy := new(BlockRealMatrix)
copy.rows = nCols
copy.columns = nRows
copy.blockRows = (nCols + BLOCK_SIZE - 1) / BLOCK_SIZE
copy.blockColumns = (nRows + BLOCK_SIZE - 1) / BLOCK_SIZE
copy.blocks = createBlocksLayout(nCols, nRows)
// perform transpose block-wise, to ensure good cache behavior
blockIndex := 0
for iBlock := 0; iBlock < brm.blockColumns; iBlock++ {
for jBlock := 0; jBlock < brm.blockRows; jBlock++ {
// transpose current block
pStart := iBlock * BLOCK_SIZE
pEnd := int(math.Min(float64(pStart+BLOCK_SIZE), float64(brm.columns)))
qStart := jBlock * BLOCK_SIZE
qEnd := int(math.Min(float64(qStart+BLOCK_SIZE), float64(brm.rows)))
k := 0
for p := pStart; p < pEnd; p++ {
lInc := pEnd - pStart
l := p - pStart
for q := qStart; q < qEnd; q++ {
copy.blocks[blockIndex][k] = brm.blocks[jBlock*brm.blockColumns+iBlock][l]
k++
l += lInc
}
}
// go to next block
blockIndex++
}
}
return copy
}
func (brm *BlockRealMatrix) RowDimension() int {
return brm.rows
}
func (brm *BlockRealMatrix) ColumnDimension() int {
return brm.columns
}
func (brm *BlockRealMatrix) OperateVector(vec RealVector) RealVector {
var out []float64
if v, ok := vec.(*ArrayRealVector); ok {
out = brm.Operate(v.DataRef())
} else {
nRows := brm.RowDimension()
nCols := brm.ColumnDimension()
if v.Dimension() != nCols {
panic(dimensionsMismatchSimpleErrorf(v.Dimension(), nCols))
}
out = make([]float64, nRows)
for row := 0; row < nRows; row++ {
var sum float64
for i := 0; i < nCols; i++ {
sum += brm.At(row, i) * vec.At(i)
}
out[row] = sum
}
}
v := new(ArrayRealVector)
v.data = out
return v
}
func (brm *BlockRealMatrix) Operate(v []float64) []float64 {
if len(v) != brm.columns {
panic(dimensionsMismatchSimpleErrorf(len(v), brm.columns))
}
out := make([]float64, brm.rows)
// perform multiplication block-wise, to ensure good cache behavior
for iBlock := 0; iBlock < brm.blockRows; iBlock++ {
pStart := iBlock * BLOCK_SIZE
pEnd := int(math.Min(float64(pStart+BLOCK_SIZE), float64(brm.rows)))
for jBlock := 0; jBlock < brm.blockColumns; jBlock++ {
qStart := jBlock * BLOCK_SIZE
qEnd := int(math.Min(float64(qStart+BLOCK_SIZE), float64(brm.columns)))
k := 0
for p := pStart; p < pEnd; p++ {
sum := 0.
q := qStart
for q < qEnd-3 {
sum += brm.blocks[iBlock*brm.blockColumns+jBlock][k]*v[q] +
brm.blocks[iBlock*brm.blockColumns+jBlock][k+1]*v[q+1] +
brm.blocks[iBlock*brm.blockColumns+jBlock][k+2]*v[q+2] +
brm.blocks[iBlock*brm.blockColumns+jBlock][k+3]*v[q+3]
k += 4
q += 4
}
for q < qEnd {
sum += brm.blocks[iBlock*brm.blockColumns+jBlock][k] * v[q]
k++
q++
}
out[p] += sum
}
}
}
return out
}
func (brm *BlockRealMatrix) PreMultiplyMatrix(m RealMatrix) RealMatrix {
return m.Multiply(brm)
}
func (brm *BlockRealMatrix) PreMultiplyVector(vec RealVector) RealVector {
var out []float64
if v, ok := vec.(*ArrayRealVector); ok {
out = brm.PreMultiply(v.DataRef())
} else {
nRows := brm.RowDimension()
nCols := brm.ColumnDimension()
if v.Dimension() != nRows {
panic(dimensionsMismatchSimpleErrorf(v.Dimension(), nRows))
}
out := make([]float64, nCols)
for col := 0; col < nCols; col++ {
var sum float64
for i := 0; i < nRows; i++ {
sum += brm.At(i, col) * v.At(i)
}
out[col] = sum
}
}
v := new(ArrayRealVector)
v.data = out
return v
}
func (brm *BlockRealMatrix) PreMultiply(v []float64) []float64 {
if len(v) != brm.rows {
panic(dimensionsMismatchSimpleErrorf(len(v), brm.rows))
}
out := make([]float64, brm.columns)
// perform multiplication block-wise, to ensure good cache behavior
for jBlock := 0; jBlock < brm.blockColumns; jBlock++ {
jWidth := brm.blockWidth(jBlock)
jWidth2 := jWidth + jWidth
jWidth3 := jWidth2 + jWidth
jWidth4 := jWidth3 + jWidth
qStart := jBlock * BLOCK_SIZE
qEnd := int(math.Min(float64(qStart+BLOCK_SIZE), float64(brm.columns)))
for iBlock := 0; iBlock < brm.blockRows; iBlock++ {
pStart := iBlock * BLOCK_SIZE
pEnd := int(math.Min(float64(pStart+BLOCK_SIZE), float64(brm.rows)))
for q := qStart; q < qEnd; q++ {
k := q - qStart
sum := 0.
p := pStart
for p < pEnd-3 {
sum += brm.blocks[iBlock*brm.blockColumns+jBlock][k]*v[p] +
brm.blocks[iBlock*brm.blockColumns+jBlock][k+jWidth]*v[p+1] +
brm.blocks[iBlock*brm.blockColumns+jBlock][k+jWidth2]*v[p+2] +
brm.blocks[iBlock*brm.blockColumns+jBlock][k+jWidth3]*v[p+3]
k += jWidth4
p += 4
}
for p < pEnd {
sum += brm.blocks[iBlock*brm.blockColumns+jBlock][k] * v[p]
p++
k += jWidth
}
out[q] += sum
}
}
}
return out
}
func (brm *BlockRealMatrix) WalkInUpdateRowOrder(visitor RealMatrixChangingVisitor) float64 {
visitor.Start(brm.rows, brm.columns, 0, brm.rows-1, 0, brm.columns-1)
for iBlock := 0; iBlock < brm.blockRows; iBlock++ {
pStart := iBlock * BLOCK_SIZE
pEnd := int(math.Min(float64(pStart+BLOCK_SIZE), float64(brm.rows)))
for p := pStart; p < pEnd; p++ {
for jBlock := 0; jBlock < brm.blockColumns; jBlock++ {
jWidth := brm.blockWidth(jBlock)
qStart := jBlock * BLOCK_SIZE
qEnd := int(math.Min(float64(qStart+BLOCK_SIZE), float64(brm.columns)))
k := (p - pStart) * jWidth
for q := qStart; q < qEnd; q++ {
brm.blocks[iBlock*brm.blockColumns+jBlock][k] = visitor.Visit(p, q, brm.blocks[iBlock*brm.blockColumns+jBlock][k])
k++
}
}
}
}
return visitor.End()
}
func (brm *BlockRealMatrix) WalkInRowOrder(visitor RealMatrixPreservingVisitor) float64 {
visitor.Start(brm.rows, brm.columns, 0, brm.rows-1, 0, brm.columns-1)
for iBlock := 0; iBlock < brm.blockRows; iBlock++ {
pStart := iBlock * BLOCK_SIZE
pEnd := int(math.Min(float64(pStart+BLOCK_SIZE), float64(brm.rows)))
for p := pStart; p < pEnd; p++ {
for jBlock := 0; jBlock < brm.blockColumns; jBlock++ {
jWidth := brm.blockWidth(jBlock)
qStart := jBlock * BLOCK_SIZE
qEnd := int(math.Min(float64(qStart+BLOCK_SIZE), float64(brm.columns)))
k := (p - pStart) * jWidth
for q := qStart; q < qEnd; q++ {
visitor.Visit(p, q, brm.blocks[iBlock*brm.blockColumns+jBlock][k])
k++
}
}
}
}
return visitor.End()
}
func (brm *BlockRealMatrix) WalkInUpdateRowOrderBounded(visitor RealMatrixChangingVisitor, startRow, endRow, startColumn, endColumn int) float64 {
if err := checkSubMatrixIndex(brm, startRow, endRow, startColumn, endColumn); err != nil {
panic(err)
}
visitor.Start(brm.rows, brm.columns, startRow, endRow, startColumn, endColumn)
for iBlock := startRow / BLOCK_SIZE; iBlock < 1+endRow/BLOCK_SIZE; iBlock++ {
p0 := iBlock * BLOCK_SIZE
pStart := int(math.Max(float64(startRow), float64(p0)))
pEnd := int(math.Min(float64((iBlock+1)*BLOCK_SIZE), float64(1+endRow)))
for p := pStart; p < pEnd; p++ {
for jBlock := startColumn / BLOCK_SIZE; jBlock < 1+endColumn/BLOCK_SIZE; jBlock++ {
jWidth := brm.blockWidth(jBlock)
q0 := jBlock * BLOCK_SIZE
qStart := int(math.Max(float64(startColumn), float64(q0)))
qEnd := int(math.Min(float64((jBlock+1)*BLOCK_SIZE), float64(1+endColumn)))
k := (p-p0)*jWidth + qStart - q0
for q := qStart; q < qEnd; q++ {
brm.blocks[iBlock*brm.blockColumns+jBlock][k] = visitor.Visit(p, q, brm.blocks[iBlock*brm.blockColumns+jBlock][k])
k++
}
}
}
}
return visitor.End()
}
func (brm *BlockRealMatrix) WalkInRowOrderBounded(visitor RealMatrixPreservingVisitor, startRow, endRow, startColumn, endColumn int) float64 {
if err := checkSubMatrixIndex(brm, startRow, endRow, startColumn, endColumn); err != nil {
panic(err)
}
visitor.Start(brm.rows, brm.columns, startRow, endRow, startColumn, endColumn)
for iBlock := startRow / BLOCK_SIZE; iBlock < 1+endRow/BLOCK_SIZE; iBlock++ {
p0 := iBlock * BLOCK_SIZE
pStart := int(math.Max(float64(startRow), float64(p0)))
pEnd := int(math.Min(float64((iBlock+1)*BLOCK_SIZE), float64(1+endRow)))
for p := pStart; p < pEnd; p++ {
for jBlock := startColumn / BLOCK_SIZE; jBlock < 1+endColumn/BLOCK_SIZE; jBlock++ {
jWidth := brm.blockWidth(jBlock)
q0 := jBlock * BLOCK_SIZE
qStart := int(math.Max(float64(startColumn), float64(q0)))
qEnd := int(math.Min(float64((jBlock+1)*BLOCK_SIZE), float64(1+endColumn)))
k := (p-p0)*jWidth + qStart - q0
for q := qStart; q < qEnd; q++ {
visitor.Visit(p, q, brm.blocks[iBlock*brm.blockColumns+jBlock][k])
k++
}
}
}
}
return visitor.End()
}
func (brm *BlockRealMatrix) WalkInUpdateColumnOrder(visitor RealMatrixChangingVisitor) float64 {
rows := brm.RowDimension()
columns := brm.ColumnDimension()
visitor.Start(rows, columns, 0, rows-1, 0, columns-1)
for column := 0; column < columns; column++ {
for row := 0; row < rows; row++ {
oldValue := brm.At(row, column)
newValue := visitor.Visit(row, column, oldValue)
brm.SetEntry(row, column, newValue)
}
}
return visitor.End()
}
func (brm *BlockRealMatrix) WalkInColumnOrder(visitor RealMatrixPreservingVisitor) float64 {
rows := brm.RowDimension()
columns := brm.ColumnDimension()
visitor.Start(rows, columns, 0, rows-1, 0, columns-1)
for column := 0; column < columns; column++ {
for row := 0; row < rows; row++ {
visitor.Visit(row, column, brm.At(row, column))
}
}
return visitor.End()
}
func (brm *BlockRealMatrix) WalkInUpdateColumnOrderBounded(visitor RealMatrixChangingVisitor, startRow, endRow, startColumn, endColumn int) float64 {
if err := checkSubMatrixIndex(brm, startRow, endRow, startColumn, endColumn); err != nil {
panic(err)
}
visitor.Start(brm.RowDimension(), brm.ColumnDimension(), startRow, endRow, startColumn, endColumn)
for column := startColumn; column <= endColumn; column++ {
for row := startRow; row <= endRow; row++ {
oldValue := brm.At(row, column)
newValue := visitor.Visit(row, column, oldValue)
brm.SetEntry(row, column, newValue)
}
}
return visitor.End()
}
func (brm *BlockRealMatrix) WalkInColumnOrderBounded(visitor RealMatrixPreservingVisitor, startRow, endRow, startColumn, endColumn int) float64 {
if err := checkSubMatrixIndex(brm, startRow, endRow, startColumn, endColumn); err != nil {
panic(err)
}
visitor.Start(brm.RowDimension(), brm.ColumnDimension(), startRow, endRow, startColumn, endColumn)
for column := startColumn; column <= endColumn; column++ {
for row := startRow; row <= endRow; row++ {
visitor.Visit(row, column, brm.At(row, column))
}
}
return visitor.End()
}
func (brm *BlockRealMatrix) WalkInUpdateOptimizedOrder(visitor RealMatrixChangingVisitor) float64 {
visitor.Start(brm.rows, brm.columns, 0, brm.rows-1, 0, brm.columns-1)
blockIndex := 0
for iBlock := 0; iBlock < brm.blockRows; iBlock++ {
pStart := iBlock * BLOCK_SIZE
pEnd := int(math.Min(float64(pStart+BLOCK_SIZE), float64(brm.rows)))
for jBlock := 0; jBlock < brm.blockColumns; jBlock++ {
qStart := jBlock * BLOCK_SIZE
qEnd := int(math.Min(float64(qStart+BLOCK_SIZE), float64(brm.columns)))
k := 0
for p := pStart; p < pEnd; p++ {
for q := qStart; q < qEnd; q++ {
brm.blocks[blockIndex][k] = visitor.Visit(p, q, brm.blocks[blockIndex][k])
k++
}
}
blockIndex++
}
}
return visitor.End()
}
func (brm *BlockRealMatrix) WalkInOptimizedOrder(visitor RealMatrixPreservingVisitor) float64 {
visitor.Start(brm.rows, brm.columns, 0, brm.rows-1, 0, brm.columns-1)
blockIndex := 0
for iBlock := 0; iBlock < brm.blockRows; iBlock++ {
pStart := iBlock * BLOCK_SIZE
pEnd := int(math.Min(float64(pStart+BLOCK_SIZE), float64(brm.rows)))
for jBlock := 0; jBlock < brm.blockColumns; jBlock++ {
qStart := jBlock * BLOCK_SIZE
qEnd := int(math.Min(float64(qStart+BLOCK_SIZE), float64(brm.columns)))
k := 0
for p := pStart; p < pEnd; p++ {
for q := qStart; q < qEnd; q++ {
visitor.Visit(p, q, brm.blocks[blockIndex][k])
k++
}
}
blockIndex++
}
}
return visitor.End()
}
func (brm *BlockRealMatrix) WalkInUpdateOptimizedOrderBounded(visitor RealMatrixChangingVisitor, startRow, endRow, startColumn, endColumn int) float64 {
if err := checkSubMatrixIndex(brm, startRow, endRow, startColumn, endColumn); err != nil {
panic(err)
}
visitor.Start(brm.rows, brm.columns, startRow, endRow, startColumn, endColumn)
for iBlock := startRow / BLOCK_SIZE; iBlock < 1+endRow/BLOCK_SIZE; iBlock++ {
p0 := iBlock * BLOCK_SIZE
pStart := int(math.Max(float64(startRow), float64(p0)))
pEnd := int(math.Min(float64((iBlock+1)*BLOCK_SIZE), float64(1+endRow)))
for jBlock := startColumn / BLOCK_SIZE; jBlock < 1+endColumn/BLOCK_SIZE; jBlock++ {
jWidth := brm.blockWidth(jBlock)
q0 := jBlock * BLOCK_SIZE
qStart := int(math.Max(float64(startColumn), float64(q0)))
qEnd := int(math.Min(float64((jBlock+1)*BLOCK_SIZE), float64(1+endColumn)))
for p := pStart; p < pEnd; p++ {
k := (p-p0)*jWidth + qStart - q0
for q := qStart; q < qEnd; q++ {
brm.blocks[iBlock*brm.blockColumns+jBlock][k] = visitor.Visit(p, q, brm.blocks[iBlock*brm.blockColumns+jBlock][k])
k++
}
}
}
}
return visitor.End()
}
func (brm *BlockRealMatrix) WalkInOptimizedOrderBounded(visitor RealMatrixPreservingVisitor, startRow, endRow, startColumn, endColumn int) float64 {
if err := checkSubMatrixIndex(brm, startRow, endRow, startColumn, endColumn); err != nil {
panic(err)
}
visitor.Start(brm.rows, brm.columns, startRow, endRow, startColumn, endColumn)
for iBlock := startRow / BLOCK_SIZE; iBlock < 1+endRow/BLOCK_SIZE; iBlock++ {
p0 := iBlock * BLOCK_SIZE
pStart := int(math.Max(float64(startRow), float64(p0)))
pEnd := int(math.Min(float64((iBlock+1)*BLOCK_SIZE), float64(1+endRow)))
for jBlock := startColumn / BLOCK_SIZE; jBlock < 1+endColumn/BLOCK_SIZE; jBlock++ {
jWidth := brm.blockWidth(jBlock)
q0 := jBlock * BLOCK_SIZE
qStart := int(math.Max(float64(startColumn), float64(q0)))
qEnd := int(math.Min(float64((jBlock+1)*BLOCK_SIZE), float64(1+endColumn)))
for p := pStart; p < pEnd; p++ {
k := (p-p0)*jWidth + qStart - q0
for q := qStart; q < qEnd; q++ {
visitor.Visit(p, q, brm.blocks[iBlock*brm.blockColumns+jBlock][k])
k++
}
}
}
}
return visitor.End()
}
func (brm *BlockRealMatrix) blockHeight(blockRow int) int {
if blockRow == brm.blockRows-1 {
return brm.rows - blockRow*BLOCK_SIZE
}
return BLOCK_SIZE
}
func (brm *BlockRealMatrix) blockWidth(blockColumn int) int {
if blockColumn == brm.blockColumns-1 {
return brm.columns - blockColumn*BLOCK_SIZE
}
return BLOCK_SIZE
}
func (brm *BlockRealMatrix) Equals(object interface{}) bool {
if object == brm {
return true
}
if _, ok := object.(RealMatrix); !ok {
return false
}
m := object.(RealMatrix)
nRows := brm.RowDimension()
nCols := brm.ColumnDimension()
if m.ColumnDimension() != nCols || m.RowDimension() != nRows {
return false
}
for row := 0; row < nRows; row++ {
for col := 0; col < nCols; col++ {
if brm.At(row, col) != m.At(row, col) {
return false
}
}
}
return true
} | block_real_matrix.go | 0.81841 | 0.721167 | block_real_matrix.go | starcoder |
package octopi
type Int2 struct {
x int
y int
}
func make_int2(x int, y int) Int2 {
return Int2{x, y}
}
func (a Int2) Add(b Int2) Int2 {
return Int2{a.x + b.x, a.y + b.y}
}
type Octopus struct {
pos Int2
energy int
virtual bool
}
type Stack struct {
stack []Int2
}
func (s *Stack) Push(o Int2) {
s.stack = append(s.stack, o)
}
func (s *Stack) Pop() Int2 {
v := s.stack[len(s.stack)-1]
s.stack = s.stack[:len(s.stack)-1]
return v
}
func (s *Stack) Empty() bool {
return len(s.stack) == 0
}
type Field struct {
area [][]Octopus
size Int2
}
func MakeField(data [][]int) Field {
var f Field
size_x := len(data)
size_y := len(data[0])
f.Init(Int2{size_x, size_y})
for x := 0; x < f.size.x; x++ {
for y := 0; y < f.size.y; y++ {
f.SetEnergy(Int2{x, y}, data[x][y])
}
}
return f
}
func (f *Field) Init(size Int2) {
f.area = make([][]Octopus, size.x)
for i := 0; i < size.y; i++ {
f.area[i] = make([]Octopus, size.y)
}
f.size = size
}
func (f *Field) Add(pos Int2, energy int) {
f.area[pos.x][pos.y].energy += energy
}
func (f *Field) Set(pos Int2, energy int) {
f.area[pos.x][pos.y].energy = energy
}
func (f *Field) Get(pos Int2) Octopus {
out_of_bounds := false
if pos.x < 0 || pos.x >= f.size.x {
out_of_bounds = true
}
if pos.y < 0 || pos.y >= f.size.y {
out_of_bounds = true
}
if out_of_bounds {
return Octopus{pos: pos, energy: 0, virtual: true}
}
f.area[pos.x][pos.y].pos = pos
f.area[pos.x][pos.y].virtual = false
return f.area[pos.x][pos.y]
}
func (f *Field) At(pos Int2) *Octopus {
return &f.area[pos.x][pos.y]
}
func (f *Field) GetEnergy(pos Int2) int {
return f.Get(pos).energy
}
func (f *Field) SetEnergy(pos Int2, energy int) {
(*(*f).At(pos)).energy = energy
}
func (f *Field) GetNeighbors(pos Int2) []Octopus {
up := Int2{1, 0}
down := Int2{-1, 0}
left := Int2{0, -1}
right := Int2{0, 1}
result := make([]Octopus, 0)
result = append(result, f.Get(pos.Add(up).Add(left)))
result = append(result, f.Get(pos.Add(up)))
result = append(result, f.Get(pos.Add(up).Add(right)))
result = append(result, f.Get(pos.Add(left)))
result = append(result, f.Get(pos.Add(right)))
result = append(result, f.Get(pos.Add(down).Add(left)))
result = append(result, f.Get(pos.Add(down)))
result = append(result, f.Get(pos.Add(down).Add(right)))
return result
}
func (f *Field) ToString() []byte {
result := make([]byte, 0, (f.size.x+1)*f.size.y)
for x := 0; x < f.size.x; x++ {
for y := 0; y < f.size.y; y++ {
o := f.At(Int2{x, y})
if o.energy > 9 {
result = append(result, 'X')
} else if o.energy < 0 {
result = append(result, 'E')
} else {
result = append(result, byte(o.energy+48))
}
}
result = append(result, '\n')
}
return result
}
func SimulateStep(f Field) []Int2 {
blinks := make([]Int2, 0)
var stack Stack
// Increase everyones energy
for x := 0; x < f.size.x; x++ {
for y := 0; y < f.size.y; y++ {
pos := Int2{x, y}
energy := f.GetEnergy(pos)
if energy == 9 {
stack.Push(pos)
}
f.Add(pos, 1)
}
}
// propagate blinks
for !stack.Empty() {
pos := stack.Pop()
neighbors := f.GetNeighbors(pos)
for _, n := range neighbors {
if n.virtual { // outside the bounds
continue
} else if n.energy > 9 { // already blinking
continue
} else if n.energy == 9 { // about to blink
stack.Push(n.pos)
}
f.Add(n.pos, 1)
}
}
// count blinks
for x := 0; x < f.size.x; x++ {
for y := 0; y < f.size.y; y++ {
o := f.At(Int2{x, y})
if o.energy > 9 {
(*o).energy = 0
blinks = append(blinks, Int2{x, y})
}
}
}
return blinks
} | Day_11/octopi/common.go | 0.553747 | 0.441372 | common.go | starcoder |
package cmd
import (
"fmt"
"sort"
"github.com/jaredbancroft/aoc2020/pkg/boarding"
"github.com/jaredbancroft/aoc2020/pkg/helpers"
"github.com/spf13/cobra"
)
// day5Cmd represents the day5 command
var day5Cmd = &cobra.Command{
Use: "day5",
Short: "Advent of Code 2020 - Day 5: Binary Boarding",
Long: `
Advent of Code 2020
--- Day 5: Binary Boarding ---
You board your plane only to discover a new problem: you dropped your boarding pass! You aren't sure
which seat is yours, and all of the flight attendants are busy with the flood of people that suddenly
made it through passport control.
You write a quick program to use your phone's camera to scan all of the nearby boarding passes
(your puzzle input); perhaps you can find your seat through process of elimination.
Instead of zones or groups, this airline uses binary space partitioning to seat people. A seat might
be specified like FBFBBFFRLR, where F means "front", B means "back", L means "left", and R means "right".
The first 7 characters will either be F or B; these specify exactly one of the 128 rows on the plane
(numbered 0 through 127). Each letter tells you which half of a region the given seat is in. Start with
the whole list of rows; the first letter indicates whether the seat is in the front (0 through 63) or the
back (64 through 127). The next letter indicates which half of that region the seat is in, and so on until
you're left with exactly one row.
For example, consider just the first seven characters of FBFBBFFRLR:
Start by considering the whole range, rows 0 through 127.
F means to take the lower half, keeping rows 0 through 63.
B means to take the upper half, keeping rows 32 through 63.
F means to take the lower half, keeping rows 32 through 47.
B means to take the upper half, keeping rows 40 through 47.
B keeps rows 44 through 47.
F keeps rows 44 through 45.
The final F keeps the lower of the two, row 44.
The last three characters will be either L or R; these specify exactly one of the 8 columns of seats on
the plane (numbered 0 through 7). The same process as above proceeds again, this time with only three steps.
L means to keep the lower half, while R means to keep the upper half.
For example, consider just the last 3 characters of FBFBBFFRLR:
Start by considering the whole range, columns 0 through 7.
R means to take the upper half, keeping columns 4 through 7.
L means to take the lower half, keeping columns 4 through 5.
The final R keeps the upper of the two, column 5.
So, decoding FBFBBFFRLR reveals that it is the seat at row 44, column 5.
Every seat also has a unique seat ID: multiply the row by 8, then add the column. In this example, the seat
has ID 44 * 8 + 5 = 357.
Here are some other boarding passes:
BFFFBBFRRR: row 70, column 7, seat ID 567.
FFFBBBFRRR: row 14, column 7, seat ID 119.
BBFFBBFRLL: row 102, column 4, seat ID 820.
As a sanity check, look through your list of boarding passes. What is the highest
seat ID on a boarding pass?
--- Part Two ---
Ding! The "fasten seat belt" signs have turned on. Time to find your seat.
It's a completely full flight, so your seat should be the only missing boarding pass
in your list. However, there's a catch: some of the seats at the very front and back
of the plane don't exist on this aircraft, so they'll be missing from your list as well.
Your seat wasn't at the very front or back, though; the seats with IDs +1 and -1
from yours will be in your list.
What is the ID of your seat?`,
RunE: func(cmd *cobra.Command, args []string) error {
codes, err := helpers.ReadStringFile(input)
if err != nil {
return err
}
passes := []boarding.Pass{}
for _, code := range codes {
pass := boarding.NewPass(code)
passes = append(passes, pass)
}
sort.Sort(boarding.BySeatID(passes))
fmt.Println("Max: ", passes[len(passes)-1].SeatID)
expected := 0
for i := passes[0].SeatID; i <= len(passes)+passes[0].SeatID; i++ {
expected = expected + i
}
actual := 0
for _, pass := range passes {
actual = actual + pass.SeatID
}
fmt.Println("Seat: ", expected-actual)
return nil
},
}
func init() {
rootCmd.AddCommand(day5Cmd)
} | cmd/day5.go | 0.708213 | 0.577912 | day5.go | starcoder |
package output
import (
"fmt"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/message/batch"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/output/writer"
"github.com/Jeffail/benthos/v3/lib/types"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeElasticsearch] = TypeSpec{
constructor: NewElasticsearch,
Description: `
Publishes messages into an Elasticsearch index. If the index does not exist then
it is created with a dynamic mapping.
Both the ` + "`id` and `index`" + ` fields can be dynamically set using function
interpolations described [here](/docs/configuration/interpolation#functions). When
sending batched messages these interpolations are performed per message part.
### AWS Credentials
By default Benthos will use a shared credentials file when connecting to AWS
services. It's also possible to set them explicitly at the component level,
allowing you to transfer data across accounts. You can find out more
[in this document](/docs/guides/aws).
If the configured target is a managed AWS Elasticsearch cluster, you may need
to set ` + "`sniff` and `healthcheck`" + ` to false for connections to succeed.`,
sanitiseConfigFunc: func(conf Config) (interface{}, error) {
return sanitiseWithBatch(conf.Elasticsearch, conf.Elasticsearch.Batching)
},
Async: true,
Batches: true,
}
}
//------------------------------------------------------------------------------
// NewElasticsearch creates a new Elasticsearch output type.
func NewElasticsearch(conf Config, mgr types.Manager, log log.Modular, stats metrics.Type) (Type, error) {
elasticWriter, err := writer.NewElasticsearch(conf.Elasticsearch, log, stats)
if err != nil {
return nil, err
}
var w Type
if conf.Elasticsearch.MaxInFlight == 1 {
w, err = NewWriter(
TypeElasticsearch, elasticWriter, log, stats,
)
} else {
w, err = NewAsyncWriter(
TypeElasticsearch, conf.Elasticsearch.MaxInFlight, elasticWriter, log, stats,
)
}
if bconf := conf.Elasticsearch.Batching; err == nil && !bconf.IsNoop() {
policy, err := batch.NewPolicy(bconf, mgr, log.NewModule(".batching"), metrics.Namespaced(stats, "batching"))
if err != nil {
return nil, fmt.Errorf("failed to construct batch policy: %v", err)
}
w = NewBatcher(policy, w, log, stats)
}
return w, err
}
//------------------------------------------------------------------------------ | lib/output/elasticsearch.go | 0.752468 | 0.440349 | elasticsearch.go | starcoder |
package bsonkit
import (
"bytes"
"math"
"strings"
"github.com/shopspring/decimal"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
)
// Compare will compare two bson values and return their order according to the
// BSON type comparison order specification:
// https://docs.mongodb.com/manual/reference/bson-type-comparison-order.
func Compare(lv, rv interface{}) int {
// get types
lc, _ := Inspect(lv)
rc, _ := Inspect(rv)
// check class equality
if lc > rc {
return 1
} else if lc < rc {
return -1
}
// check value equality
switch lc {
case Null:
return 0
case Number:
return compareNumbers(lv, rv)
case String:
return compareStrings(lv, rv)
case Document:
return compareDocuments(lv, rv)
case Array:
return compareArrays(lv, rv)
case Binary:
return compareBinaries(lv, rv)
case ObjectID:
return compareObjectIDs(lv, rv)
case Boolean:
return compareBooleans(lv, rv)
case Date:
return compareDates(lv, rv)
case Timestamp:
return compareTimestamps(lv, rv)
case Regex:
return compareRegexes(lv, rv)
default:
panic("bsonkit: unreachable")
}
}
func compareNumbers(lv, rv interface{}) int {
switch l := lv.(type) {
case float64:
switch r := rv.(type) {
case float64:
return compareFloat64s(l, r)
case int32:
return compareFloat64s(l, float64(r))
case int64:
return compareFloat64ToInt64(l, r)
case primitive.Decimal128:
return decimal.NewFromFloat(l).Cmp(d128ToDec(r))
}
case int32:
switch r := rv.(type) {
case float64:
return compareFloat64s(float64(l), r)
case int32:
return compareInt32s(l, r)
case int64:
return compareInt64s(int64(l), r)
case primitive.Decimal128:
return decimal.NewFromInt32(l).Cmp(d128ToDec(r))
}
case int64:
switch r := rv.(type) {
case float64:
return compareInt64ToFloat64(l, r)
case int32:
return compareInt64s(l, int64(r))
case int64:
return compareInt64s(l, r)
case primitive.Decimal128:
return decimal.NewFromInt(l).Cmp(d128ToDec(r))
}
case primitive.Decimal128:
switch r := rv.(type) {
case float64:
return d128ToDec(l).Cmp(decimal.NewFromFloat(r))
case int32:
return d128ToDec(l).Cmp(decimal.NewFromInt32(r))
case int64:
return d128ToDec(l).Cmp(decimal.NewFromInt(r))
case primitive.Decimal128:
return d128ToDec(l).Cmp(d128ToDec(r))
}
}
panic("bsonkit: unreachable")
}
func compareStrings(lv, rv interface{}) int {
// get strings
l := lv.(string)
r := rv.(string)
// compare strings
res := strings.Compare(l, r)
return res
}
func compareDocuments(lv, rv interface{}) int {
// get documents
l := lv.(bson.D)
r := rv.(bson.D)
// handle emptiness
if len(l) == 0 {
if len(r) == 0 {
return 0
}
return -1
} else if len(r) == 0 {
return 1
}
// compare document elements
for i := 0; ; i++ {
// handle exhaustion
if i == len(l) {
if i == len(r) {
return 0
}
return -1
} else if i == len(r) {
return 1
}
// compare keys
res := strings.Compare(l[i].Key, r[i].Key)
if res != 0 {
return res
}
// compare values
res = Compare(l[i].Value, r[i].Value)
if res != 0 {
return res
}
}
}
func compareArrays(lv, rv interface{}) int {
// get array
l := lv.(bson.A)
r := rv.(bson.A)
// handle emptiness
if len(l) == 0 {
if len(r) == 0 {
return 0
}
return -1
} else if len(r) == 0 {
return 1
}
// compare array elements
for i := 0; ; i++ {
// handle exhaustion
if i == len(l) {
if i == len(r) {
return 0
}
return -1
} else if i == len(r) {
return 1
}
// compare elements
res := Compare(l[i], r[i])
if res != 0 {
return res
}
}
}
func compareBinaries(lv, rv interface{}) int {
// get binaries
l := lv.(primitive.Binary)
r := rv.(primitive.Binary)
// compare length
if len(l.Data) > len(r.Data) {
return 1
} else if len(l.Data) < len(r.Data) {
return -1
}
// compare sub type
if l.Subtype > r.Subtype {
return 1
} else if l.Subtype < r.Subtype {
return -1
}
// compare bytes
res := bytes.Compare(l.Data, r.Data)
return res
}
func compareObjectIDs(lv, rv interface{}) int {
// get object ids
l := lv.(primitive.ObjectID)
r := rv.(primitive.ObjectID)
// compare object ids
res := bytes.Compare(l[:], r[:])
return res
}
func compareBooleans(lv, rv interface{}) int {
// get booleans
l := lv.(bool)
r := rv.(bool)
// compare booleans
if l == r {
return 0
} else if l {
return 1
} else {
return -1
}
}
func compareDates(lv, rv interface{}) int {
// get times
l := lv.(primitive.DateTime)
r := rv.(primitive.DateTime)
// compare times
if l == r {
return 0
} else if l > r {
return 1
} else {
return -1
}
}
func compareTimestamps(lv, rv interface{}) int {
// get timestamps
l := lv.(primitive.Timestamp)
r := rv.(primitive.Timestamp)
// compare timestamps
ret := primitive.CompareTimestamp(l, r)
return ret
}
func compareRegexes(lv, rv interface{}) int {
// get regexes
l := lv.(primitive.Regex)
r := rv.(primitive.Regex)
// compare patterns
ret := strings.Compare(l.Pattern, r.Pattern)
if ret > 0 {
return ret
}
// compare options
ret = strings.Compare(l.Options, r.Options)
return ret
}
func compareInt32s(l int32, r int32) int {
if l == r {
return 0
} else if l > r {
return 1
}
return -1
}
func compareInt64s(l int64, r int64) int {
if l == r {
return 0
} else if l > r {
return 1
}
return -1
}
func compareFloat64s(l float64, r float64) int {
if l == r {
return 0
} else if l > r {
return 1
} else if l < r {
return -1
}
// NaN values are smaller
if math.IsNaN(l) {
if math.IsNaN(r) {
return 0
}
return -1
}
return 1
}
func compareInt64ToFloat64(l int64, r float64) int {
// see the official mongodb implementation for details:
// https://github.com/mongodb/mongo/blob/master/src/mongo/base/compare_numbers.h#L79
// define constants
const maxPreciseFloat64 = int64(1 << 53)
const boundOfLongRange = float64(2 << 63)
// non-numbers are always smaller
if math.IsNaN(r) {
return 1
}
// compare as floats64 if not too big
if l <= maxPreciseFloat64 && l >= -maxPreciseFloat64 {
return compareFloat64s(float64(l), r)
}
// large doubles (including +/- Inf) are strictly > or < all longs
if r >= boundOfLongRange {
return -1
} else if r < -boundOfLongRange {
return 1
}
return compareInt64s(l, int64(r))
}
func compareFloat64ToInt64(l float64, r int64) int {
return -compareInt64ToFloat64(r, l)
} | bsonkit/compare.go | 0.710226 | 0.441673 | compare.go | starcoder |
package nutriscore
// ScoreType is the type of the scored product
type ScoreType int
const (
// Food is used when calculating nutritional score for general food items
Food ScoreType = iota
// Beverage is used when calculating nutritional score for beverages
Beverage
// Water is used when calculating nutritional score for water
Water
// Cheese is used for calculating the nutritional score for cheeses
Cheese
)
var scoreToLetter = []string{"A", "B", "C", "D", "E"}
var energyLevels = []float64{3350, 3015, 2680, 2345, 2010, 1675, 1340, 1005, 670, 335}
var sugarsLevels = []float64{45, 40, 36, 31, 27, 22.5, 18, 13.5, 9, 4.5}
var saturatedFattyAcidsLevels = []float64{10, 9, 8, 7, 6, 5, 4, 3, 2, 1}
var sodiumLevels = []float64{900, 810, 720, 630, 540, 450, 360, 270, 180, 90}
var fibreLevels = []float64{4.7, 3.7, 2.8, 1.9, 0.9}
var proteinLevels = []float64{8, 6.4, 4.8, 3.2, 1.6}
var energyLevelsBeverage = []float64{270, 240, 210, 180, 150, 120, 90, 60, 30, 0}
var sugarsLevelsBeverage = []float64{13.5, 12, 10.5, 9, 7.5, 6, 4.5, 3, 1.5, 0}
// NutritionalScore contains the numeric nutritional score value and type of product
type NutritionalScore struct {
Value int
Positive int
Negative int
ScoreType ScoreType
}
// EnergyKJ represents the energy density in kJ/100g
type EnergyKJ float64
// SugarGram represents amount of sugars in grams/100g
type SugarGram float64
// SaturatedFattyAcidsGram represents amount of saturated fatty acids in grams/100g
type SaturatedFattyAcidsGram float64
// SodiumMilligram represents amount of sodium in mg/100g
type SodiumMilligram float64
// FruitsPercent represents fruits, vegetables, pulses, nuts, and rapeseed, walnut and olive oils
// as percentage of the total
type FruitsPercent float64
// FibreGram represents amount of fibre in grams/100g
type FibreGram float64
// ProteinGram represents amount of protein in grams/100g
type ProteinGram float64
// EnergyFromKcal converts energy density from kcal to EnergyKJ
func EnergyFromKcal(kcal float64) EnergyKJ {
return EnergyKJ(kcal * 4.184)
}
// SodiumFromSalt converts salt mg/100g content to sodium content
func SodiumFromSalt(saltMg float64) SodiumMilligram {
return SodiumMilligram(saltMg / 2.5)
}
// GetPoints returns the nutritional score
func (e EnergyKJ) GetPoints(st ScoreType) int {
if st == Beverage {
return getPointsFromRange(float64(e), energyLevelsBeverage)
}
return getPointsFromRange(float64(e), energyLevels)
}
// GetPoints returns the nutritional score
func (s SugarGram) GetPoints(st ScoreType) int {
if st == Beverage {
return getPointsFromRange(float64(s), sugarsLevelsBeverage)
}
return getPointsFromRange(float64(s), sugarsLevels)
}
// GetPoints returns the nutritional score
func (sfa SaturatedFattyAcidsGram) GetPoints(st ScoreType) int {
return getPointsFromRange(float64(sfa), saturatedFattyAcidsLevels)
}
// GetPoints returns the nutritional score
func (s SodiumMilligram) GetPoints(st ScoreType) int {
return getPointsFromRange(float64(s), sodiumLevels)
}
// GetPoints returns the nutritional score
func (f FruitsPercent) GetPoints(st ScoreType) int {
if st == Beverage {
if f > 80 {
return 10
} else if f > 60 {
return 4
} else if f > 40 {
return 2
}
return 0
}
if f > 80 {
return 5
} else if f > 60 {
return 2
} else if f > 40 {
return 1
}
return 0
}
// GetPoints returns the nutritional score
func (f FibreGram) GetPoints(st ScoreType) int {
return getPointsFromRange(float64(f), fibreLevels)
}
// GetPoints returns the nutritional score
func (p ProteinGram) GetPoints(st ScoreType) int {
return getPointsFromRange(float64(p), proteinLevels)
}
// NutritionalData represents the source nutritional data used for the calculation
type NutritionalData struct {
Energy EnergyKJ
Sugars SugarGram
SaturatedFattyAcids SaturatedFattyAcidsGram
Sodium SodiumMilligram
Fruits FruitsPercent
Fibre FibreGram
Protein ProteinGram
IsWater bool
}
// GetNutritionalScore calculates the nutritional score for nutritional data n of type st
func GetNutritionalScore(n NutritionalData, st ScoreType) NutritionalScore {
value := 0
positive := 0
negative := 0
// Water is always graded A page 30
if st != Water {
fruitPoints := n.Fruits.GetPoints(st)
fibrePoints := n.Fibre.GetPoints(st)
negative = n.Energy.GetPoints(st) + n.Sugars.GetPoints(st) + n.SaturatedFattyAcids.GetPoints(st) + n.Sodium.GetPoints(st)
positive = fruitPoints + fibrePoints + n.Protein.GetPoints(st)
if st == Cheese {
// Cheeses always use (negative - positive) page 29
value = negative - positive
} else {
// page 27
if negative >= 11 && fruitPoints < 5 {
value = negative - fibrePoints - fruitPoints
} else {
value = negative - positive
}
}
}
return NutritionalScore{
Value: value,
Positive: positive,
Negative: negative,
ScoreType: st,
}
}
// GetNutriScore returns the Nutri-Score rating
func (ns NutritionalScore) GetNutriScore() string {
if ns.ScoreType == Food {
return scoreToLetter[getPointsFromRange(float64(ns.Value), []float64{18, 10, 2, -1})]
}
if ns.ScoreType == Water {
return scoreToLetter[0]
}
return scoreToLetter[getPointsFromRange(float64(ns.Value), []float64{9, 5, 1, -2})]
}
func getPointsFromRange(v float64, steps []float64) int {
lenSteps := len(steps)
for i, l := range steps {
if v > l {
return lenSteps - i
}
}
return 0
} | nutriscore.go | 0.699152 | 0.463505 | nutriscore.go | starcoder |
// Package checks contains checks for differentially private functions.
package checks
import (
"fmt"
"math"
log "github.com/golang/glog"
)
// CheckEpsilonVeryStrict returns an error if ε is +∞ or less than 2⁻⁵⁰.
func CheckEpsilonVeryStrict(epsilon float64) error {
if epsilon < math.Exp2(-50.0) || math.IsInf(epsilon, 0) || math.IsNaN(epsilon) {
return fmt.Errorf("Epsilon is %f, must be at least 2^-50 and finite", epsilon)
}
return nil
}
// CheckEpsilonStrict returns an error if ε is nonpositive or +∞.
func CheckEpsilonStrict(epsilon float64) error {
if epsilon <= 0 || math.IsInf(epsilon, 0) || math.IsNaN(epsilon) {
return fmt.Errorf("Epsilon is %f, must be strictly positive and finite", epsilon)
}
return nil
}
// CheckEpsilon returns an error if ε is strictly negative or +∞.
func CheckEpsilon(epsilon float64) error {
if epsilon < 0 || math.IsInf(epsilon, 0) || math.IsNaN(epsilon) {
return fmt.Errorf("Epsilon is %f, must be nonnegative and finite", epsilon)
}
return nil
}
// CheckDelta returns an error if δ is negative or greater than or equal to 1.
func CheckDelta(delta float64) error {
if math.IsNaN(delta) {
return fmt.Errorf("Delta is %e, cannot be NaN", delta)
}
if delta < 0 {
return fmt.Errorf("Delta is %e, cannot be negative", delta)
}
if delta >= 1 {
return fmt.Errorf("Delta is %e, must be strictly less than 1", delta)
}
return nil
}
// CheckDeltaStrict returns an error if δ is nonpositive or greater than or equal to 1.
func CheckDeltaStrict(delta float64) error {
if math.IsNaN(delta) {
return fmt.Errorf("Delta is %e, cannot be NaN", delta)
}
if delta <= 0 {
return fmt.Errorf("Delta is %e, must be strictly positive", delta)
}
if delta >= 1 {
return fmt.Errorf("Delta is %e, must be strictly less than 1", delta)
}
return nil
}
// CheckNoDelta returns an error if δ is non-zero.
func CheckNoDelta(delta float64) error {
if delta != 0 {
return fmt.Errorf("Delta is %e, must be 0", delta)
}
return nil
}
// CheckThresholdDelta returns an error if δ_threshold is nonpositive or greater than or
// equal to 1 or δ_threshold+δ_noise is greater than or equal to 1.
func CheckThresholdDelta(thresholdDelta, noiseDelta float64) error {
if math.IsNaN(thresholdDelta) {
return fmt.Errorf("ThresholdDelta is %e, cannot be NaN", thresholdDelta)
}
if thresholdDelta <= 0 {
return fmt.Errorf("ThresholdDelta is %e, must be strictly positive", thresholdDelta)
}
if thresholdDelta >= 1 {
return fmt.Errorf("ThresholdDelta is %e, must be strictly less than 1", thresholdDelta)
}
if thresholdDelta+noiseDelta >= 1 {
return fmt.Errorf("ThresholdDelta+NoiseDelta is %e, must be strictly less than 1", thresholdDelta+noiseDelta)
}
return nil
}
// CheckL0Sensitivity returns an error if l0Sensitivity is nonpositive.
func CheckL0Sensitivity(l0Sensitivity int64) error {
if l0Sensitivity <= 0 {
return fmt.Errorf("L0Sensitivity is %d, must be strictly positive", l0Sensitivity)
}
return nil
}
// CheckLInfSensitivity returns an error if lInfSensitivity is nonpositive or +∞.
func CheckLInfSensitivity(lInfSensitivity float64) error {
if lInfSensitivity <= 0 || math.IsInf(lInfSensitivity, 0) || math.IsNaN(lInfSensitivity) {
return fmt.Errorf("LInfSensitivity is %f, must be strictly positive and finite", lInfSensitivity)
}
return nil
}
// CheckBoundsInt64 returns an error if lower is larger than upper, and ensures it won't lead to sensitivity overflow.
func CheckBoundsInt64(lower, upper int64) error {
if lower == math.MinInt64 || upper == math.MinInt64 {
return fmt.Errorf("Lower bound (%d) and upper bound (%d) must be strictly larger than MinInt64=%d to avoid sensitivity overflow", lower, upper, math.MinInt64)
}
if lower > upper {
return fmt.Errorf("Upper bound (%d) must be larger than lower bound (%d)", upper, lower)
}
if lower == upper {
log.Warningf("Lower bound is equal to upper bound: all added elements will be clamped to %d", upper)
}
return nil
}
// CheckBoundsInt64IgnoreOverflows returns an error if lower is larger than upper but ignores sensitivity overflows.
// This is used when noise is unrecognised.
func CheckBoundsInt64IgnoreOverflows(lower, upper int64) error {
if lower > upper {
return fmt.Errorf("Upper bound (%d) must be larger than lower bound (%d)", upper, lower)
}
if lower == upper {
log.Warningf("Lower bound is equal to upper bound: all added elements will be clamped to %d", upper)
}
return nil
}
// CheckBoundsFloat64 returns an error if lower is larger than upper, or if either parameter is ±∞.
func CheckBoundsFloat64(lower, upper float64) error {
if math.IsNaN(lower) {
return fmt.Errorf("Lower bound cannot be NaN")
}
if math.IsNaN(upper) {
return fmt.Errorf("Upper bound cannot be NaN")
}
if math.IsInf(lower, 0) {
return fmt.Errorf("Lower bound cannot be infinity")
}
if math.IsInf(upper, 0) {
return fmt.Errorf("Upper bound cannot be infinity")
}
if lower > upper {
return fmt.Errorf("Upper bound (%f) must be larger than lower bound (%f)", upper, lower)
}
if lower == upper {
log.Warningf("Lower bound is equal to upper bound: all added elements will be clamped to %f", upper)
}
return nil
}
// CheckBoundsFloat64IgnoreOverflows returns an error if lower is larger than upper but accepts either parameter being ±∞.
func CheckBoundsFloat64IgnoreOverflows(lower, upper float64) error {
if math.IsNaN(lower) {
return fmt.Errorf("Lower bound cannot be NaN")
}
if math.IsNaN(upper) {
return fmt.Errorf("Upper bound cannot be NaN")
}
if lower > upper {
return fmt.Errorf("Upper bound (%f) must be larger than lower bound(%f)", upper, lower)
}
if lower == upper {
log.Warningf("Lower bound is equal to upper bound: all added elements will be clamped to %f", upper)
}
return nil
}
// CheckBoundsFloat64AsInt64 returns an error if lower is larger are NaN, or if either parameter overflow after conversion to int64.
func CheckBoundsFloat64AsInt64(lower, upper float64) error {
if math.IsNaN(lower) {
return fmt.Errorf("Lower bound cannot be NaN")
}
if math.IsNaN(upper) {
return fmt.Errorf("Upper bound cannot be NaN")
}
maxInt := float64(math.MaxInt64)
minInt := float64(math.MinInt64)
if lower < minInt || lower > maxInt {
return fmt.Errorf("Lower bound (%f) must be within [MinInt64=%f, MaxInt64=%f]", lower, minInt, maxInt)
}
if upper < minInt || upper > maxInt {
return fmt.Errorf("Upper bound (%f) must be within [MinInt64=%f, MaxInt64=%f]", upper, minInt, maxInt)
}
return CheckBoundsInt64(int64(lower), int64(upper))
}
// CheckMaxContributionsPerPartition returns an error if maxContributionsPerPartition is nonpositive.
func CheckMaxContributionsPerPartition(maxContributionsPerPartition int64) error {
if maxContributionsPerPartition <= 0 {
return fmt.Errorf("MaxContributionsPerPartition (%d) must be set to a positive value", maxContributionsPerPartition)
}
return nil
}
// CheckAlpha returns an error if the supplied alpha is not between 0 and 1.
func CheckAlpha(alpha float64) error {
if alpha <= 0 || alpha >= 1 || math.IsNaN(alpha) || math.IsInf(alpha, 0) {
return fmt.Errorf("Alpha is %f, must be within (0, 1) and finite", alpha)
}
return nil
}
// CheckBoundsNotEqual returns an error if lower and upper bounds are equal.
func CheckBoundsNotEqual(lower, upper float64) error {
if lower == upper {
return fmt.Errorf("Lower and upper bounds are both %f, they cannot be equal to each other", lower)
}
return nil
}
// CheckTreeHeight returns an error if treeHeight is less than 1.
func CheckTreeHeight(treeHeight int) error {
if treeHeight < 1 {
return fmt.Errorf("Tree Height is %d, must be at least 1", treeHeight)
}
return nil
}
// CheckBranchingFactor returns an error if branchingFactor is less than 2.
func CheckBranchingFactor(branchingFactor int) error {
if branchingFactor < 2 {
return fmt.Errorf("Branching Factor is %d, must be at least 2", branchingFactor)
}
return nil
} | go/checks/checks.go | 0.868771 | 0.607285 | checks.go | starcoder |
package execute
import (
"fmt"
"strings"
"github.com/influxdata/flux"
"github.com/influxdata/flux/values"
)
type groupKey struct {
cols []flux.ColMeta
values []values.Value
}
func NewGroupKey(cols []flux.ColMeta, values []values.Value) flux.GroupKey {
return &groupKey{
cols: cols,
values: values,
}
}
func (k *groupKey) Cols() []flux.ColMeta {
return k.cols
}
func (k *groupKey) Values() []values.Value {
return k.values
}
func (k *groupKey) HasCol(label string) bool {
return ColIdx(label, k.cols) >= 0
}
func (k *groupKey) LabelValue(label string) values.Value {
if !k.HasCol(label) {
return nil
}
return k.Value(ColIdx(label, k.cols))
}
func (k *groupKey) IsNull(j int) bool {
return k.values[j].IsNull()
}
func (k *groupKey) Value(j int) values.Value {
return k.values[j]
}
func (k *groupKey) ValueBool(j int) bool {
return k.values[j].Bool()
}
func (k *groupKey) ValueUInt(j int) uint64 {
return k.values[j].UInt()
}
func (k *groupKey) ValueInt(j int) int64 {
return k.values[j].Int()
}
func (k *groupKey) ValueFloat(j int) float64 {
return k.values[j].Float()
}
func (k *groupKey) ValueString(j int) string {
return k.values[j].Str()
}
func (k *groupKey) ValueDuration(j int) Duration {
return k.values[j].Duration()
}
func (k *groupKey) ValueTime(j int) Time {
return k.values[j].Time()
}
func (k *groupKey) Equal(o flux.GroupKey) bool {
return groupKeyEqual(k, o)
}
func (k *groupKey) Less(o flux.GroupKey) bool {
return groupKeyLess(k, o)
}
func (k *groupKey) String() string {
var b strings.Builder
b.WriteRune('{')
for j, c := range k.cols {
if j != 0 {
b.WriteRune(',')
}
fmt.Fprintf(&b, "%s=%v", c.Label, k.values[j])
}
b.WriteRune('}')
return b.String()
}
func groupKeyEqual(a, b flux.GroupKey) bool {
aCols := a.Cols()
bCols := b.Cols()
if len(aCols) != len(bCols) {
return false
}
for j, c := range aCols {
if aCols[j] != bCols[j] {
return false
}
if a.IsNull(j) && b.IsNull(j) {
// Both key columns are null, consider them equal
// So that rows are assigned to the same table.
continue
} else if a.IsNull(j) || b.IsNull(j) {
return false
}
switch c.Type {
case flux.TBool:
if a.ValueBool(j) != b.ValueBool(j) {
return false
}
case flux.TInt:
if a.ValueInt(j) != b.ValueInt(j) {
return false
}
case flux.TUInt:
if a.ValueUInt(j) != b.ValueUInt(j) {
return false
}
case flux.TFloat:
if a.ValueFloat(j) != b.ValueFloat(j) {
return false
}
case flux.TString:
if a.ValueString(j) != b.ValueString(j) {
return false
}
case flux.TTime:
if a.ValueTime(j) != b.ValueTime(j) {
return false
}
}
}
return true
}
// groupKeyLess determines if the former key is lexicographically less than the
// latter.
func groupKeyLess(a, b flux.GroupKey) bool {
aCols := a.Cols()
bCols := b.Cols()
min := len(aCols)
if min > len(bCols) {
min = len(bCols)
}
for j := 0; j < min; j++ {
if av, bv := aCols[j].Label, bCols[j].Label; av != bv {
return av < bv
}
if av, bv := aCols[j].Type, bCols[j].Type; av != bv {
return av < bv
}
if av, bv := a.Value(j), b.Value(j); av.IsNull() && bv.IsNull() {
return false
} else if av.IsNull() {
// consider null values to be less than any value
return true
} else if bv.IsNull() {
return false
}
switch aCols[j].Type {
case flux.TBool:
if av, bv := a.ValueBool(j), b.ValueBool(j); av != bv {
return av
}
case flux.TInt:
if av, bv := a.ValueInt(j), b.ValueInt(j); av != bv {
return av < bv
}
case flux.TUInt:
if av, bv := a.ValueUInt(j), b.ValueUInt(j); av != bv {
return av < bv
}
case flux.TFloat:
if av, bv := a.ValueFloat(j), b.ValueFloat(j); av != bv {
return av < bv
}
case flux.TString:
if av, bv := a.ValueString(j), b.ValueString(j); av != bv {
return av < bv
}
case flux.TTime:
if av, bv := a.ValueTime(j), b.ValueTime(j); av != bv {
return av < bv
}
}
}
// In this case, min columns have been compared and found to be equal.
// Whichever key has the greater number of columns is lexicographically
// greater than the other.
return len(aCols) < len(bCols)
} | execute/group_key.go | 0.718693 | 0.430566 | group_key.go | starcoder |
package decisiontrees
import (
"code.google.com/p/goprotobuf/proto"
"fmt"
pb "github.com/ajtulloch/decisiontrees/protobufs"
"github.com/golang/glog"
"math"
"sort"
)
type labelledPrediction struct {
Label bool
Prediction float64
}
type labelledPredictions []labelledPrediction
func (l labelledPredictions) Len() int {
return len(l)
}
func (l labelledPredictions) Swap(i int, j int) {
l[i], l[j] = l[j], l[i]
}
func (l labelledPredictions) Less(i int, j int) bool {
return l[i].Prediction < l[j].Prediction
}
func (l labelledPredictions) ROC() float64 {
sort.Sort(l)
numPositives, numNegatives, weightedSum := 0, 0, 0
for _, e := range l {
if e.Label {
numPositives += 1
} else {
numNegatives += 1
weightedSum += numPositives
}
}
return float64(weightedSum) / float64(numPositives*numNegatives)
}
func (l labelledPredictions) String() string {
return fmt.Sprintf(
"Size: %v\nROC: %v\nCalibration: %v\nNormalized Entropy: %v\nPositives: %v",
l.Len(),
l.ROC(),
l.Calibration(),
l.NormalizedEntropy(),
l.numPositives())
}
func (l labelledPredictions) numPositives() int {
s := 0
for _, e := range l {
if e.Label {
s += 1
}
}
return s
}
func (l labelledPredictions) LogScore() float64 {
cumulativeLogLoss := 0.0
for _, e := range l {
if e.Label {
cumulativeLogLoss += math.Log2(e.Prediction)
} else {
cumulativeLogLoss += math.Log2(1 - e.Prediction)
}
}
return cumulativeLogLoss / float64(l.Len())
}
func (l labelledPredictions) Calibration() float64 {
numPositives, sumPredictions := 0, 0.0
for _, e := range l {
sumPredictions += e.Prediction
if e.Label {
numPositives += 1
}
}
return float64(sumPredictions) / float64(numPositives)
}
func (l labelledPredictions) NormalizedEntropy() float64 {
numPositives := 0
for _, e := range l {
if e.Label {
numPositives += 1
}
}
p := float64(numPositives) / float64(l.Len())
return l.LogScore() / (p*math.Log2(p) + (1-p)*math.Log2(1-p))
}
func computeEpochResult(e Evaluator, examples Examples) pb.EpochResult {
l := make([]labelledPrediction, 0, len(examples))
boolLabel := func(example *pb.Example) bool {
if example.GetLabel() > 0 {
return true
}
return false
}
for _, ex := range examples {
l = append(l, labelledPrediction{
Label: boolLabel(ex),
Prediction: e.Evaluate(ex.GetFeatures()),
})
}
lp := labelledPredictions(l)
return pb.EpochResult{
Roc: proto.Float64(lp.ROC()),
LogScore: proto.Float64(lp.LogScore()),
NormalizedEntropy: proto.Float64(lp.NormalizedEntropy()),
Calibration: proto.Float64(lp.Calibration()),
}
}
// LearningCurve computes the progressive learning curve after each epoch on the
// given examples
func LearningCurve(f *pb.Forest, e Examples) *pb.TrainingResults {
tr := &pb.TrainingResults{
EpochResults: make([]*pb.EpochResult, 0, len(f.GetTrees())),
}
for i := range f.GetTrees() {
evaluator, err := NewRescaledFastForestEvaluator(&pb.Forest{
Trees: f.GetTrees()[:i],
Rescaling: f.GetRescaling().Enum(),
})
if err != nil {
glog.Fatal(err)
}
er := computeEpochResult(evaluator, e)
tr.EpochResults = append(tr.EpochResults, &er)
}
return tr
} | evaluation_metrics.go | 0.727975 | 0.483344 | evaluation_metrics.go | starcoder |
package cbor
import (
"encoding/json"
"fmt"
"math"
)
// AppendNull inserts a 'Nil' object into the dst byte array.
func AppendNull(dst []byte) []byte {
return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeNull))
}
// AppendBeginMarker inserts a map start into the dst byte array.
func AppendBeginMarker(dst []byte) []byte {
return append(dst, byte(majorTypeMap|additionalTypeInfiniteCount))
}
// AppendEndMarker inserts a map end into the dst byte array.
func AppendEndMarker(dst []byte) []byte {
return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeBreak))
}
// AppendBool encodes and inserts a boolean value into the dst byte array.
func AppendBool(dst []byte, val bool) []byte {
b := additionalTypeBoolFalse
if val {
b = additionalTypeBoolTrue
}
return append(dst, byte(majorTypeSimpleAndFloat|b))
}
// AppendBools encodes and inserts an array of boolean values into the dst byte array.
func AppendBools(dst []byte, vals []bool) []byte {
major := majorTypeArray
l := len(vals)
if l == 0 {
return AppendArrayEnd(AppendArrayStart(dst))
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
for _, v := range vals {
dst = AppendBool(dst, v)
}
return dst
}
// AppendInt encodes and inserts an integer value into the dst byte array.
func AppendInt(dst []byte, val int) []byte {
major := majorTypeUnsignedInt
contentVal := val
if val < 0 {
major = majorTypeNegativeInt
contentVal = -val - 1
}
if contentVal <= additionalMax {
lb := byte(contentVal)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(contentVal))
}
return dst
}
// AppendInts encodes and inserts an array of integer values into the dst byte array.
func AppendInts(dst []byte, vals []int) []byte {
major := majorTypeArray
l := len(vals)
if l == 0 {
return AppendArrayEnd(AppendArrayStart(dst))
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
for _, v := range vals {
dst = AppendInt(dst, v)
}
return dst
}
// AppendInt8 encodes and inserts an int8 value into the dst byte array.
func AppendInt8(dst []byte, val int8) []byte {
return AppendInt(dst, int(val))
}
// AppendInts8 encodes and inserts an array of integer values into the dst byte array.
func AppendInts8(dst []byte, vals []int8) []byte {
major := majorTypeArray
l := len(vals)
if l == 0 {
return AppendArrayEnd(AppendArrayStart(dst))
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
for _, v := range vals {
dst = AppendInt(dst, int(v))
}
return dst
}
// AppendInt16 encodes and inserts a int16 value into the dst byte array.
func AppendInt16(dst []byte, val int16) []byte {
return AppendInt(dst, int(val))
}
// AppendInts16 encodes and inserts an array of int16 values into the dst byte array.
func AppendInts16(dst []byte, vals []int16) []byte {
major := majorTypeArray
l := len(vals)
if l == 0 {
return AppendArrayEnd(AppendArrayStart(dst))
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
for _, v := range vals {
dst = AppendInt(dst, int(v))
}
return dst
}
// AppendInt32 encodes and inserts a int32 value into the dst byte array.
func AppendInt32(dst []byte, val int32) []byte {
return AppendInt(dst, int(val))
}
// AppendInts32 encodes and inserts an array of int32 values into the dst byte array.
func AppendInts32(dst []byte, vals []int32) []byte {
major := majorTypeArray
l := len(vals)
if l == 0 {
return AppendArrayEnd(AppendArrayStart(dst))
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
for _, v := range vals {
dst = AppendInt(dst, int(v))
}
return dst
}
// AppendInt64 encodes and inserts a int64 value into the dst byte array.
func AppendInt64(dst []byte, val int64) []byte {
major := majorTypeUnsignedInt
contentVal := val
if val < 0 {
major = majorTypeNegativeInt
contentVal = -val - 1
}
if contentVal <= additionalMax {
lb := byte(contentVal)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(contentVal))
}
return dst
}
// AppendInts64 encodes and inserts an array of int64 values into the dst byte array.
func AppendInts64(dst []byte, vals []int64) []byte {
major := majorTypeArray
l := len(vals)
if l == 0 {
return AppendArrayEnd(AppendArrayStart(dst))
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
for _, v := range vals {
dst = AppendInt64(dst, v)
}
return dst
}
// AppendUint encodes and inserts an unsigned integer value into the dst byte array.
func AppendUint(dst []byte, val uint) []byte {
return AppendInt64(dst, int64(val))
}
// AppendUints encodes and inserts an array of unsigned integer values into the dst byte array.
func AppendUints(dst []byte, vals []uint) []byte {
major := majorTypeArray
l := len(vals)
if l == 0 {
return AppendArrayEnd(AppendArrayStart(dst))
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
for _, v := range vals {
dst = AppendUint(dst, v)
}
return dst
}
// AppendUint8 encodes and inserts a unsigned int8 value into the dst byte array.
func AppendUint8(dst []byte, val uint8) []byte {
return AppendUint(dst, uint(val))
}
// AppendUints8 encodes and inserts an array of uint8 values into the dst byte array.
func AppendUints8(dst []byte, vals []uint8) []byte {
major := majorTypeArray
l := len(vals)
if l == 0 {
return AppendArrayEnd(AppendArrayStart(dst))
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
for _, v := range vals {
dst = AppendUint8(dst, v)
}
return dst
}
// AppendUint16 encodes and inserts a uint16 value into the dst byte array.
func AppendUint16(dst []byte, val uint16) []byte {
return AppendUint(dst, uint(val))
}
// AppendUints16 encodes and inserts an array of uint16 values into the dst byte array.
func AppendUints16(dst []byte, vals []uint16) []byte {
major := majorTypeArray
l := len(vals)
if l == 0 {
return AppendArrayEnd(AppendArrayStart(dst))
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
for _, v := range vals {
dst = AppendUint16(dst, v)
}
return dst
}
// AppendUint32 encodes and inserts a uint32 value into the dst byte array.
func AppendUint32(dst []byte, val uint32) []byte {
return AppendUint(dst, uint(val))
}
// AppendUints32 encodes and inserts an array of uint32 values into the dst byte array.
func AppendUints32(dst []byte, vals []uint32) []byte {
major := majorTypeArray
l := len(vals)
if l == 0 {
return AppendArrayEnd(AppendArrayStart(dst))
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
for _, v := range vals {
dst = AppendUint32(dst, v)
}
return dst
}
// AppendUint64 encodes and inserts a uint64 value into the dst byte array.
func AppendUint64(dst []byte, val uint64) []byte {
major := majorTypeUnsignedInt
contentVal := val
if contentVal <= additionalMax {
lb := byte(contentVal)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(contentVal))
}
return dst
}
// AppendUints64 encodes and inserts an array of uint64 values into the dst byte array.
func AppendUints64(dst []byte, vals []uint64) []byte {
major := majorTypeArray
l := len(vals)
if l == 0 {
return AppendArrayEnd(AppendArrayStart(dst))
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
for _, v := range vals {
dst = AppendUint64(dst, v)
}
return dst
}
// AppendFloat32 encodes and inserts a single precision float value into the dst byte array.
func AppendFloat32(dst []byte, val float32) []byte {
switch {
case math.IsNaN(float64(val)):
return append(dst, "\xfa\x7f\xc0\x00\x00"...)
case math.IsInf(float64(val), 1):
return append(dst, "\xfa\x7f\x80\x00\x00"...)
case math.IsInf(float64(val), -1):
return append(dst, "\xfa\xff\x80\x00\x00"...)
}
major := majorTypeSimpleAndFloat
subType := additionalTypeFloat32
n := math.Float32bits(val)
var buf [4]byte
for i := uint(0); i < 4; i++ {
buf[i] = byte(n >> ((3 - i) * 8))
}
return append(append(dst, byte(major|subType)), buf[0], buf[1], buf[2], buf[3])
}
// AppendFloats32 encodes and inserts an array of single precision float value into the dst byte array.
func AppendFloats32(dst []byte, vals []float32) []byte {
major := majorTypeArray
l := len(vals)
if l == 0 {
return AppendArrayEnd(AppendArrayStart(dst))
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
for _, v := range vals {
dst = AppendFloat32(dst, v)
}
return dst
}
// AppendFloat64 encodes and inserts a double precision float value into the dst byte array.
func AppendFloat64(dst []byte, val float64) []byte {
switch {
case math.IsNaN(val):
return append(dst, "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00"...)
case math.IsInf(val, 1):
return append(dst, "\xfb\x7f\xf0\x00\x00\x00\x00\x00\x00"...)
case math.IsInf(val, -1):
return append(dst, "\xfb\xff\xf0\x00\x00\x00\x00\x00\x00"...)
}
major := majorTypeSimpleAndFloat
subType := additionalTypeFloat64
n := math.Float64bits(val)
dst = append(dst, byte(major|subType))
for i := uint(1); i <= 8; i++ {
b := byte(n >> ((8 - i) * 8))
dst = append(dst, b)
}
return dst
}
// AppendFloats64 encodes and inserts an array of double precision float values into the dst byte array.
func AppendFloats64(dst []byte, vals []float64) []byte {
major := majorTypeArray
l := len(vals)
if l == 0 {
return AppendArrayEnd(AppendArrayStart(dst))
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
for _, v := range vals {
dst = AppendFloat64(dst, v)
}
return dst
}
// AppendInterface takes an arbitrary object and converts it to JSON and embeds it dst.
func AppendInterface(dst []byte, i interface{}) []byte {
marshaled, err := json.Marshal(i)
if err != nil {
return AppendString(dst, fmt.Sprintf("marshaling error: %v", err))
}
return AppendEmbeddedJSON(dst, marshaled)
}
// AppendObjectData takes an object in form of a byte array and appends to dst.
func AppendObjectData(dst []byte, o []byte) []byte {
return append(dst, o...)
}
// AppendArrayStart adds markers to indicate the start of an array.
func AppendArrayStart(dst []byte) []byte {
return append(dst, byte(majorTypeArray|additionalTypeInfiniteCount))
}
// AppendArrayEnd adds markers to indicate the end of an array.
func AppendArrayEnd(dst []byte) []byte {
return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeBreak))
}
// AppendArrayDelim adds markers to indicate end of a particular array element.
func AppendArrayDelim(dst []byte) []byte {
//No delimiters needed in cbor
return dst
}
func AppendHex (dst []byte, val []byte) []byte {
dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16))
dst = append(dst, byte(additionalTypeTagHexString>>8))
dst = append(dst, byte(additionalTypeTagHexString&0xff))
return AppendBytes(dst, val)
} | vendor/github.com/rs/zerolog/internal/cbor/types.go | 0.703448 | 0.407274 | types.go | starcoder |
package v1
import (
"context"
"reflect"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
// Creates a cluster, consisting of the specified number and type of Google Compute Engine instances. By default, the cluster is created in the project's [default network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks). One firewall is added for the cluster. After cluster creation, the Kubelet creates routes for each node to allow the containers on that node to communicate with all other instances in the cluster. Finally, an entry is added to the project's global metadata indicating which CIDR range the cluster is using.
type Cluster struct {
pulumi.CustomResourceState
// Configurations for the various addons available to run in the cluster.
AddonsConfig AddonsConfigResponseOutput `pulumi:"addonsConfig"`
// Configuration controlling RBAC group membership information.
AuthenticatorGroupsConfig AuthenticatorGroupsConfigResponseOutput `pulumi:"authenticatorGroupsConfig"`
// Autopilot configuration for the cluster.
Autopilot AutopilotResponseOutput `pulumi:"autopilot"`
// Cluster-level autoscaling configuration.
Autoscaling ClusterAutoscalingResponseOutput `pulumi:"autoscaling"`
// Configuration for Binary Authorization.
BinaryAuthorization BinaryAuthorizationResponseOutput `pulumi:"binaryAuthorization"`
// The IP address range of the container pods in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`). Leave blank to have one automatically chosen or specify a `/14` block in `10.0.0.0/8`.
ClusterIpv4Cidr pulumi.StringOutput `pulumi:"clusterIpv4Cidr"`
// Which conditions caused the current cluster state.
Conditions StatusConditionResponseArrayOutput `pulumi:"conditions"`
// Configuration of Confidential Nodes
ConfidentialNodes ConfidentialNodesResponseOutput `pulumi:"confidentialNodes"`
// [Output only] The time the cluster was created, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
CreateTime pulumi.StringOutput `pulumi:"createTime"`
// [Output only] The current software version of the master endpoint.
CurrentMasterVersion pulumi.StringOutput `pulumi:"currentMasterVersion"`
// [Output only] Deprecated, use [NodePools.version](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters.nodePools) instead. The current version of the node software components. If they are currently at multiple versions because they're in the process of being upgraded, this reflects the minimum version of all nodes.
CurrentNodeVersion pulumi.StringOutput `pulumi:"currentNodeVersion"`
// Configuration of etcd encryption.
DatabaseEncryption DatabaseEncryptionResponseOutput `pulumi:"databaseEncryption"`
// The default constraint on the maximum number of pods that can be run simultaneously on a node in the node pool of this cluster. Only honored if cluster created with IP Alias support.
DefaultMaxPodsConstraint MaxPodsConstraintResponseOutput `pulumi:"defaultMaxPodsConstraint"`
// An optional description of this cluster.
Description pulumi.StringOutput `pulumi:"description"`
// Kubernetes alpha features are enabled on this cluster. This includes alpha API groups (e.g. v1alpha1) and features that may not be production ready in the kubernetes version of the master and nodes. The cluster has no SLA for uptime and master/node upgrades are disabled. Alpha enabled clusters are automatically deleted thirty days after creation.
EnableKubernetesAlpha pulumi.BoolOutput `pulumi:"enableKubernetesAlpha"`
// Enable the ability to use Cloud TPUs in this cluster.
EnableTpu pulumi.BoolOutput `pulumi:"enableTpu"`
// [Output only] The IP address of this cluster's master endpoint. The endpoint can be accessed from the internet at `https://username:password@endpoint/`. See the `masterAuth` property of this resource for username and password information.
Endpoint pulumi.StringOutput `pulumi:"endpoint"`
// [Output only] The time the cluster will be automatically deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
ExpireTime pulumi.StringOutput `pulumi:"expireTime"`
// The initial Kubernetes version for this cluster. Valid versions are those found in validMasterVersions returned by getServerConfig. The version can be upgraded over time; such upgrades are reflected in currentMasterVersion and currentNodeVersion. Users may specify either explicit versions offered by Kubernetes Engine or version aliases, which have the following behavior: - "latest": picks the highest valid Kubernetes version - "1.X": picks the highest valid patch+gke.N patch in the 1.X version - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - "1.X.Y-gke.N": picks an explicit Kubernetes version - "","-": picks the default Kubernetes version
InitialClusterVersion pulumi.StringOutput `pulumi:"initialClusterVersion"`
// Configuration for cluster IP allocation.
IpAllocationPolicy IPAllocationPolicyResponseOutput `pulumi:"ipAllocationPolicy"`
// The fingerprint of the set of labels for this cluster.
LabelFingerprint pulumi.StringOutput `pulumi:"labelFingerprint"`
// Configuration for the legacy ABAC authorization mode.
LegacyAbac LegacyAbacResponseOutput `pulumi:"legacyAbac"`
// [Output only] The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) or [region](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) in which the cluster resides.
Location pulumi.StringOutput `pulumi:"location"`
// The list of Google Compute Engine [zones](https://cloud.google.com/compute/docs/zones#available) in which the cluster's nodes should be located. This field provides a default value if [NodePool.Locations](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters.nodePools#NodePool.FIELDS.locations) are not specified during node pool creation. Warning: changing cluster locations will update the [NodePool.Locations](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters.nodePools#NodePool.FIELDS.locations) of all node pools and will result in nodes being added and/or removed.
Locations pulumi.StringArrayOutput `pulumi:"locations"`
// Logging configuration for the cluster.
LoggingConfig LoggingConfigResponseOutput `pulumi:"loggingConfig"`
// The logging service the cluster should use to write logs. Currently available options: * `logging.googleapis.com/kubernetes` - The Cloud Logging service with a Kubernetes-native resource model * `logging.googleapis.com` - The legacy Cloud Logging service (no longer available as of GKE 1.15). * `none` - no logs will be exported from the cluster. If left as an empty string,`logging.googleapis.com/kubernetes` will be used for GKE 1.14+ or `logging.googleapis.com` for earlier versions.
LoggingService pulumi.StringOutput `pulumi:"loggingService"`
// Configure the maintenance policy for this cluster.
MaintenancePolicy MaintenancePolicyResponseOutput `pulumi:"maintenancePolicy"`
// The authentication information for accessing the master endpoint. If unspecified, the defaults are used: For clusters before v1.12, if master_auth is unspecified, `username` will be set to "admin", a random password will be generated, and a client certificate will be issued.
MasterAuth MasterAuthResponseOutput `pulumi:"masterAuth"`
// The configuration options for master authorized networks feature.
MasterAuthorizedNetworksConfig MasterAuthorizedNetworksConfigResponseOutput `pulumi:"masterAuthorizedNetworksConfig"`
// Configuration for issuance of mTLS keys and certificates to Kubernetes pods.
MeshCertificates MeshCertificatesResponseOutput `pulumi:"meshCertificates"`
// Monitoring configuration for the cluster.
MonitoringConfig MonitoringConfigResponseOutput `pulumi:"monitoringConfig"`
// The monitoring service the cluster should use to write metrics. Currently available options: * "monitoring.googleapis.com/kubernetes" - The Cloud Monitoring service with a Kubernetes-native resource model * `monitoring.googleapis.com` - The legacy Cloud Monitoring service (no longer available as of GKE 1.15). * `none` - No metrics will be exported from the cluster. If left as an empty string,`monitoring.googleapis.com/kubernetes` will be used for GKE 1.14+ or `monitoring.googleapis.com` for earlier versions.
MonitoringService pulumi.StringOutput `pulumi:"monitoringService"`
// The name of this cluster. The name must be unique within this project and location (e.g. zone or region), and can be up to 40 characters with the following restrictions: * Lowercase letters, numbers, and hyphens only. * Must start with a letter. * Must end with a number or a letter.
Name pulumi.StringOutput `pulumi:"name"`
// The name of the Google Compute Engine [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks) to which the cluster is connected. If left unspecified, the `default` network will be used.
Network pulumi.StringOutput `pulumi:"network"`
// Configuration for cluster networking.
NetworkConfig NetworkConfigResponseOutput `pulumi:"networkConfig"`
// Configuration options for the NetworkPolicy feature.
NetworkPolicy NetworkPolicyResponseOutput `pulumi:"networkPolicy"`
// [Output only] The size of the address space on each node for hosting containers. This is provisioned from within the `container_ipv4_cidr` range. This field will only be set when cluster is in route-based network mode.
NodeIpv4CidrSize pulumi.IntOutput `pulumi:"nodeIpv4CidrSize"`
// Default NodePool settings for the entire cluster. These settings are overridden if specified on the specific NodePool object.
NodePoolDefaults NodePoolDefaultsResponseOutput `pulumi:"nodePoolDefaults"`
// The node pools associated with this cluster. This field should not be set if "node_config" or "initial_node_count" are specified.
NodePools NodePoolResponseArrayOutput `pulumi:"nodePools"`
// Notification configuration of the cluster.
NotificationConfig NotificationConfigResponseOutput `pulumi:"notificationConfig"`
// Configuration for private cluster.
PrivateClusterConfig PrivateClusterConfigResponseOutput `pulumi:"privateClusterConfig"`
// Release channel configuration.
ReleaseChannel ReleaseChannelResponseOutput `pulumi:"releaseChannel"`
// The resource labels for the cluster to use to annotate any related Google Compute Engine resources.
ResourceLabels pulumi.StringMapOutput `pulumi:"resourceLabels"`
// Configuration for exporting resource usages. Resource usage export is disabled when this config is unspecified.
ResourceUsageExportConfig ResourceUsageExportConfigResponseOutput `pulumi:"resourceUsageExportConfig"`
// [Output only] Server-defined URL for the resource.
SelfLink pulumi.StringOutput `pulumi:"selfLink"`
// [Output only] The IP address range of the Kubernetes services in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`). Service addresses are typically put in the last `/16` from the container CIDR.
ServicesIpv4Cidr pulumi.StringOutput `pulumi:"servicesIpv4Cidr"`
// Shielded Nodes configuration.
ShieldedNodes ShieldedNodesResponseOutput `pulumi:"shieldedNodes"`
// [Output only] The current status of this cluster.
Status pulumi.StringOutput `pulumi:"status"`
// The name of the Google Compute Engine [subnetwork](https://cloud.google.com/compute/docs/subnetworks) to which the cluster is connected.
Subnetwork pulumi.StringOutput `pulumi:"subnetwork"`
// [Output only] The IP address range of the Cloud TPUs in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`).
TpuIpv4CidrBlock pulumi.StringOutput `pulumi:"tpuIpv4CidrBlock"`
// Cluster-level Vertical Pod Autoscaling configuration.
VerticalPodAutoscaling VerticalPodAutoscalingResponseOutput `pulumi:"verticalPodAutoscaling"`
// Configuration for the use of Kubernetes Service Accounts in GCP IAM policies.
WorkloadIdentityConfig WorkloadIdentityConfigResponseOutput `pulumi:"workloadIdentityConfig"`
}
// NewCluster registers a new resource with the given unique name, arguments, and options.
func NewCluster(ctx *pulumi.Context,
name string, args *ClusterArgs, opts ...pulumi.ResourceOption) (*Cluster, error) {
if args == nil {
args = &ClusterArgs{}
}
var resource Cluster
err := ctx.RegisterResource("google-native:container/v1:Cluster", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// GetCluster gets an existing Cluster resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetCluster(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *ClusterState, opts ...pulumi.ResourceOption) (*Cluster, error) {
var resource Cluster
err := ctx.ReadResource("google-native:container/v1:Cluster", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// Input properties used for looking up and filtering Cluster resources.
type clusterState struct {
}
type ClusterState struct {
}
func (ClusterState) ElementType() reflect.Type {
return reflect.TypeOf((*clusterState)(nil)).Elem()
}
type clusterArgs struct {
// Configurations for the various addons available to run in the cluster.
AddonsConfig *AddonsConfig `pulumi:"addonsConfig"`
// Configuration controlling RBAC group membership information.
AuthenticatorGroupsConfig *AuthenticatorGroupsConfig `pulumi:"authenticatorGroupsConfig"`
// Autopilot configuration for the cluster.
Autopilot *Autopilot `pulumi:"autopilot"`
// Cluster-level autoscaling configuration.
Autoscaling *ClusterAutoscaling `pulumi:"autoscaling"`
// Configuration for Binary Authorization.
BinaryAuthorization *BinaryAuthorization `pulumi:"binaryAuthorization"`
// The IP address range of the container pods in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`). Leave blank to have one automatically chosen or specify a `/14` block in `10.0.0.0/8`.
ClusterIpv4Cidr *string `pulumi:"clusterIpv4Cidr"`
// Which conditions caused the current cluster state.
Conditions []StatusCondition `pulumi:"conditions"`
// Configuration of Confidential Nodes
ConfidentialNodes *ConfidentialNodes `pulumi:"confidentialNodes"`
// Configuration of etcd encryption.
DatabaseEncryption *DatabaseEncryption `pulumi:"databaseEncryption"`
// The default constraint on the maximum number of pods that can be run simultaneously on a node in the node pool of this cluster. Only honored if cluster created with IP Alias support.
DefaultMaxPodsConstraint *MaxPodsConstraint `pulumi:"defaultMaxPodsConstraint"`
// An optional description of this cluster.
Description *string `pulumi:"description"`
// Kubernetes alpha features are enabled on this cluster. This includes alpha API groups (e.g. v1alpha1) and features that may not be production ready in the kubernetes version of the master and nodes. The cluster has no SLA for uptime and master/node upgrades are disabled. Alpha enabled clusters are automatically deleted thirty days after creation.
EnableKubernetesAlpha *bool `pulumi:"enableKubernetesAlpha"`
// Enable the ability to use Cloud TPUs in this cluster.
EnableTpu *bool `pulumi:"enableTpu"`
// The initial Kubernetes version for this cluster. Valid versions are those found in validMasterVersions returned by getServerConfig. The version can be upgraded over time; such upgrades are reflected in currentMasterVersion and currentNodeVersion. Users may specify either explicit versions offered by Kubernetes Engine or version aliases, which have the following behavior: - "latest": picks the highest valid Kubernetes version - "1.X": picks the highest valid patch+gke.N patch in the 1.X version - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - "1.X.Y-gke.N": picks an explicit Kubernetes version - "","-": picks the default Kubernetes version
InitialClusterVersion *string `pulumi:"initialClusterVersion"`
// Configuration for cluster IP allocation.
IpAllocationPolicy *IPAllocationPolicy `pulumi:"ipAllocationPolicy"`
// Configuration for the legacy ABAC authorization mode.
LegacyAbac *LegacyAbac `pulumi:"legacyAbac"`
Location *string `pulumi:"location"`
// The list of Google Compute Engine [zones](https://cloud.google.com/compute/docs/zones#available) in which the cluster's nodes should be located. This field provides a default value if [NodePool.Locations](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters.nodePools#NodePool.FIELDS.locations) are not specified during node pool creation. Warning: changing cluster locations will update the [NodePool.Locations](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters.nodePools#NodePool.FIELDS.locations) of all node pools and will result in nodes being added and/or removed.
Locations []string `pulumi:"locations"`
// Logging configuration for the cluster.
LoggingConfig *LoggingConfig `pulumi:"loggingConfig"`
// The logging service the cluster should use to write logs. Currently available options: * `logging.googleapis.com/kubernetes` - The Cloud Logging service with a Kubernetes-native resource model * `logging.googleapis.com` - The legacy Cloud Logging service (no longer available as of GKE 1.15). * `none` - no logs will be exported from the cluster. If left as an empty string,`logging.googleapis.com/kubernetes` will be used for GKE 1.14+ or `logging.googleapis.com` for earlier versions.
LoggingService *string `pulumi:"loggingService"`
// Configure the maintenance policy for this cluster.
MaintenancePolicy *MaintenancePolicy `pulumi:"maintenancePolicy"`
// The authentication information for accessing the master endpoint. If unspecified, the defaults are used: For clusters before v1.12, if master_auth is unspecified, `username` will be set to "admin", a random password will be generated, and a client certificate will be issued.
MasterAuth *MasterAuth `pulumi:"masterAuth"`
// The configuration options for master authorized networks feature.
MasterAuthorizedNetworksConfig *MasterAuthorizedNetworksConfig `pulumi:"masterAuthorizedNetworksConfig"`
// Configuration for issuance of mTLS keys and certificates to Kubernetes pods.
MeshCertificates *MeshCertificates `pulumi:"meshCertificates"`
// Monitoring configuration for the cluster.
MonitoringConfig *MonitoringConfig `pulumi:"monitoringConfig"`
// The monitoring service the cluster should use to write metrics. Currently available options: * "monitoring.googleapis.com/kubernetes" - The Cloud Monitoring service with a Kubernetes-native resource model * `monitoring.googleapis.com` - The legacy Cloud Monitoring service (no longer available as of GKE 1.15). * `none` - No metrics will be exported from the cluster. If left as an empty string,`monitoring.googleapis.com/kubernetes` will be used for GKE 1.14+ or `monitoring.googleapis.com` for earlier versions.
MonitoringService *string `pulumi:"monitoringService"`
// The name of this cluster. The name must be unique within this project and location (e.g. zone or region), and can be up to 40 characters with the following restrictions: * Lowercase letters, numbers, and hyphens only. * Must start with a letter. * Must end with a number or a letter.
Name *string `pulumi:"name"`
// The name of the Google Compute Engine [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks) to which the cluster is connected. If left unspecified, the `default` network will be used.
Network *string `pulumi:"network"`
// Configuration for cluster networking.
NetworkConfig *NetworkConfig `pulumi:"networkConfig"`
// Configuration options for the NetworkPolicy feature.
NetworkPolicy *NetworkPolicy `pulumi:"networkPolicy"`
// Default NodePool settings for the entire cluster. These settings are overridden if specified on the specific NodePool object.
NodePoolDefaults *NodePoolDefaults `pulumi:"nodePoolDefaults"`
// The node pools associated with this cluster. This field should not be set if "node_config" or "initial_node_count" are specified.
NodePools []NodePoolType `pulumi:"nodePools"`
// Notification configuration of the cluster.
NotificationConfig *NotificationConfig `pulumi:"notificationConfig"`
// The parent (project and location) where the cluster will be created. Specified in the format `projects/*/locations/*`.
Parent *string `pulumi:"parent"`
// Configuration for private cluster.
PrivateClusterConfig *PrivateClusterConfig `pulumi:"privateClusterConfig"`
Project *string `pulumi:"project"`
// Release channel configuration.
ReleaseChannel *ReleaseChannel `pulumi:"releaseChannel"`
// The resource labels for the cluster to use to annotate any related Google Compute Engine resources.
ResourceLabels map[string]string `pulumi:"resourceLabels"`
// Configuration for exporting resource usages. Resource usage export is disabled when this config is unspecified.
ResourceUsageExportConfig *ResourceUsageExportConfig `pulumi:"resourceUsageExportConfig"`
// Shielded Nodes configuration.
ShieldedNodes *ShieldedNodes `pulumi:"shieldedNodes"`
// The name of the Google Compute Engine [subnetwork](https://cloud.google.com/compute/docs/subnetworks) to which the cluster is connected.
Subnetwork *string `pulumi:"subnetwork"`
// Cluster-level Vertical Pod Autoscaling configuration.
VerticalPodAutoscaling *VerticalPodAutoscaling `pulumi:"verticalPodAutoscaling"`
// Configuration for the use of Kubernetes Service Accounts in GCP IAM policies.
WorkloadIdentityConfig *WorkloadIdentityConfig `pulumi:"workloadIdentityConfig"`
}
// The set of arguments for constructing a Cluster resource.
type ClusterArgs struct {
// Configurations for the various addons available to run in the cluster.
AddonsConfig AddonsConfigPtrInput
// Configuration controlling RBAC group membership information.
AuthenticatorGroupsConfig AuthenticatorGroupsConfigPtrInput
// Autopilot configuration for the cluster.
Autopilot AutopilotPtrInput
// Cluster-level autoscaling configuration.
Autoscaling ClusterAutoscalingPtrInput
// Configuration for Binary Authorization.
BinaryAuthorization BinaryAuthorizationPtrInput
// The IP address range of the container pods in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`). Leave blank to have one automatically chosen or specify a `/14` block in `10.0.0.0/8`.
ClusterIpv4Cidr pulumi.StringPtrInput
// Which conditions caused the current cluster state.
Conditions StatusConditionArrayInput
// Configuration of Confidential Nodes
ConfidentialNodes ConfidentialNodesPtrInput
// Configuration of etcd encryption.
DatabaseEncryption DatabaseEncryptionPtrInput
// The default constraint on the maximum number of pods that can be run simultaneously on a node in the node pool of this cluster. Only honored if cluster created with IP Alias support.
DefaultMaxPodsConstraint MaxPodsConstraintPtrInput
// An optional description of this cluster.
Description pulumi.StringPtrInput
// Kubernetes alpha features are enabled on this cluster. This includes alpha API groups (e.g. v1alpha1) and features that may not be production ready in the kubernetes version of the master and nodes. The cluster has no SLA for uptime and master/node upgrades are disabled. Alpha enabled clusters are automatically deleted thirty days after creation.
EnableKubernetesAlpha pulumi.BoolPtrInput
// Enable the ability to use Cloud TPUs in this cluster.
EnableTpu pulumi.BoolPtrInput
// The initial Kubernetes version for this cluster. Valid versions are those found in validMasterVersions returned by getServerConfig. The version can be upgraded over time; such upgrades are reflected in currentMasterVersion and currentNodeVersion. Users may specify either explicit versions offered by Kubernetes Engine or version aliases, which have the following behavior: - "latest": picks the highest valid Kubernetes version - "1.X": picks the highest valid patch+gke.N patch in the 1.X version - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version - "1.X.Y-gke.N": picks an explicit Kubernetes version - "","-": picks the default Kubernetes version
InitialClusterVersion pulumi.StringPtrInput
// Configuration for cluster IP allocation.
IpAllocationPolicy IPAllocationPolicyPtrInput
// Configuration for the legacy ABAC authorization mode.
LegacyAbac LegacyAbacPtrInput
Location pulumi.StringPtrInput
// The list of Google Compute Engine [zones](https://cloud.google.com/compute/docs/zones#available) in which the cluster's nodes should be located. This field provides a default value if [NodePool.Locations](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters.nodePools#NodePool.FIELDS.locations) are not specified during node pool creation. Warning: changing cluster locations will update the [NodePool.Locations](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters.nodePools#NodePool.FIELDS.locations) of all node pools and will result in nodes being added and/or removed.
Locations pulumi.StringArrayInput
// Logging configuration for the cluster.
LoggingConfig LoggingConfigPtrInput
// The logging service the cluster should use to write logs. Currently available options: * `logging.googleapis.com/kubernetes` - The Cloud Logging service with a Kubernetes-native resource model * `logging.googleapis.com` - The legacy Cloud Logging service (no longer available as of GKE 1.15). * `none` - no logs will be exported from the cluster. If left as an empty string,`logging.googleapis.com/kubernetes` will be used for GKE 1.14+ or `logging.googleapis.com` for earlier versions.
LoggingService pulumi.StringPtrInput
// Configure the maintenance policy for this cluster.
MaintenancePolicy MaintenancePolicyPtrInput
// The authentication information for accessing the master endpoint. If unspecified, the defaults are used: For clusters before v1.12, if master_auth is unspecified, `username` will be set to "admin", a random password will be generated, and a client certificate will be issued.
MasterAuth MasterAuthPtrInput
// The configuration options for master authorized networks feature.
MasterAuthorizedNetworksConfig MasterAuthorizedNetworksConfigPtrInput
// Configuration for issuance of mTLS keys and certificates to Kubernetes pods.
MeshCertificates MeshCertificatesPtrInput
// Monitoring configuration for the cluster.
MonitoringConfig MonitoringConfigPtrInput
// The monitoring service the cluster should use to write metrics. Currently available options: * "monitoring.googleapis.com/kubernetes" - The Cloud Monitoring service with a Kubernetes-native resource model * `monitoring.googleapis.com` - The legacy Cloud Monitoring service (no longer available as of GKE 1.15). * `none` - No metrics will be exported from the cluster. If left as an empty string,`monitoring.googleapis.com/kubernetes` will be used for GKE 1.14+ or `monitoring.googleapis.com` for earlier versions.
MonitoringService pulumi.StringPtrInput
// The name of this cluster. The name must be unique within this project and location (e.g. zone or region), and can be up to 40 characters with the following restrictions: * Lowercase letters, numbers, and hyphens only. * Must start with a letter. * Must end with a number or a letter.
Name pulumi.StringPtrInput
// The name of the Google Compute Engine [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks) to which the cluster is connected. If left unspecified, the `default` network will be used.
Network pulumi.StringPtrInput
// Configuration for cluster networking.
NetworkConfig NetworkConfigPtrInput
// Configuration options for the NetworkPolicy feature.
NetworkPolicy NetworkPolicyPtrInput
// Default NodePool settings for the entire cluster. These settings are overridden if specified on the specific NodePool object.
NodePoolDefaults NodePoolDefaultsPtrInput
// The node pools associated with this cluster. This field should not be set if "node_config" or "initial_node_count" are specified.
NodePools NodePoolTypeArrayInput
// Notification configuration of the cluster.
NotificationConfig NotificationConfigPtrInput
// The parent (project and location) where the cluster will be created. Specified in the format `projects/*/locations/*`.
Parent pulumi.StringPtrInput
// Configuration for private cluster.
PrivateClusterConfig PrivateClusterConfigPtrInput
Project pulumi.StringPtrInput
// Release channel configuration.
ReleaseChannel ReleaseChannelPtrInput
// The resource labels for the cluster to use to annotate any related Google Compute Engine resources.
ResourceLabels pulumi.StringMapInput
// Configuration for exporting resource usages. Resource usage export is disabled when this config is unspecified.
ResourceUsageExportConfig ResourceUsageExportConfigPtrInput
// Shielded Nodes configuration.
ShieldedNodes ShieldedNodesPtrInput
// The name of the Google Compute Engine [subnetwork](https://cloud.google.com/compute/docs/subnetworks) to which the cluster is connected.
Subnetwork pulumi.StringPtrInput
// Cluster-level Vertical Pod Autoscaling configuration.
VerticalPodAutoscaling VerticalPodAutoscalingPtrInput
// Configuration for the use of Kubernetes Service Accounts in GCP IAM policies.
WorkloadIdentityConfig WorkloadIdentityConfigPtrInput
}
func (ClusterArgs) ElementType() reflect.Type {
return reflect.TypeOf((*clusterArgs)(nil)).Elem()
}
type ClusterInput interface {
pulumi.Input
ToClusterOutput() ClusterOutput
ToClusterOutputWithContext(ctx context.Context) ClusterOutput
}
func (*Cluster) ElementType() reflect.Type {
return reflect.TypeOf((**Cluster)(nil)).Elem()
}
func (i *Cluster) ToClusterOutput() ClusterOutput {
return i.ToClusterOutputWithContext(context.Background())
}
func (i *Cluster) ToClusterOutputWithContext(ctx context.Context) ClusterOutput {
return pulumi.ToOutputWithContext(ctx, i).(ClusterOutput)
}
type ClusterOutput struct{ *pulumi.OutputState }
func (ClusterOutput) ElementType() reflect.Type {
return reflect.TypeOf((**Cluster)(nil)).Elem()
}
func (o ClusterOutput) ToClusterOutput() ClusterOutput {
return o
}
func (o ClusterOutput) ToClusterOutputWithContext(ctx context.Context) ClusterOutput {
return o
}
func init() {
pulumi.RegisterInputType(reflect.TypeOf((*ClusterInput)(nil)).Elem(), &Cluster{})
pulumi.RegisterOutputType(ClusterOutput{})
} | sdk/go/google/container/v1/cluster.go | 0.731251 | 0.428951 | cluster.go | starcoder |
package iso20022
// Payment obligation contracted between two financial institutions related to the financing of a commercial transaction.
type PaymentObligation1 struct {
// Bank that has to pay under the obligation.
ObligorBank *BICIdentification1 `xml:"OblgrBk"`
// Bank that will be paid under the obligation.
RecipientBank *BICIdentification1 `xml:"RcptBk"`
// Maximum amount that will be paid under the obligation.
Amount *CurrencyAndAmount `xml:"Amt"`
// Maximum amount that will be paid under the obligation, expressed as a percentage of the purchase order net amount.
Percentage *PercentageRate `xml:"Pctg"`
// Amount of the charges taken by the obligor bank.
ChargesAmount *CurrencyAndAmount `xml:"ChrgsAmt,omitempty"`
// Amount of the charges expressed as a percentage of the amount paid by the obligor bank.
ChargesPercentage *PercentageRate `xml:"ChrgsPctg,omitempty"`
// Date at which the obligation will expire.
ExpiryDate *ISODate `xml:"XpryDt"`
// Country of which the law governs the bank payment obligation.
ApplicableLaw *CountryCode `xml:"AplblLaw,omitempty"`
// Payment processes required to transfer cash from the debtor to the creditor.
PaymentTerms []*PaymentTerms2 `xml:"PmtTerms,omitempty"`
// Instruction between two clearing agents stipulating the cash transfer characteristics between the two parties.
SettlementTerms *SettlementTerms2 `xml:"SttlmTerms,omitempty"`
}
func (p *PaymentObligation1) AddObligorBank() *BICIdentification1 {
p.ObligorBank = new(BICIdentification1)
return p.ObligorBank
}
func (p *PaymentObligation1) AddRecipientBank() *BICIdentification1 {
p.RecipientBank = new(BICIdentification1)
return p.RecipientBank
}
func (p *PaymentObligation1) SetAmount(value, currency string) {
p.Amount = NewCurrencyAndAmount(value, currency)
}
func (p *PaymentObligation1) SetPercentage(value string) {
p.Percentage = (*PercentageRate)(&value)
}
func (p *PaymentObligation1) SetChargesAmount(value, currency string) {
p.ChargesAmount = NewCurrencyAndAmount(value, currency)
}
func (p *PaymentObligation1) SetChargesPercentage(value string) {
p.ChargesPercentage = (*PercentageRate)(&value)
}
func (p *PaymentObligation1) SetExpiryDate(value string) {
p.ExpiryDate = (*ISODate)(&value)
}
func (p *PaymentObligation1) SetApplicableLaw(value string) {
p.ApplicableLaw = (*CountryCode)(&value)
}
func (p *PaymentObligation1) AddPaymentTerms() *PaymentTerms2 {
newValue := new(PaymentTerms2)
p.PaymentTerms = append(p.PaymentTerms, newValue)
return newValue
}
func (p *PaymentObligation1) AddSettlementTerms() *SettlementTerms2 {
p.SettlementTerms = new(SettlementTerms2)
return p.SettlementTerms
} | PaymentObligation1.go | 0.774498 | 0.63665 | PaymentObligation1.go | starcoder |
package graphics
import (
"github.com/markov/gojira2d/pkg/utils"
"log"
"github.com/go-gl/gl/v4.1-core/gl"
"github.com/go-gl/mathgl/mgl32"
)
const FLOAT32_SIZE = 4
type ModelMatrix struct {
mgl32.Mat4
size mgl32.Mat4
translation mgl32.Mat4
rotation mgl32.Mat4
scale mgl32.Mat4
anchor mgl32.Mat4
dirty bool
}
type Primitive2D struct {
Primitive
position mgl32.Vec3
scale mgl32.Vec2
size mgl32.Vec2
anchor mgl32.Vec2
angle float32
flipX bool
flipY bool
color Color
modelMatrix ModelMatrix
}
func (p *Primitive2D) SetPosition(position mgl32.Vec3) {
p.position = position
p.modelMatrix.translation = mgl32.Translate3D(p.position.X(), p.position.Y(), p.position.Z())
p.modelMatrix.dirty = true
}
func (p *Primitive2D) SetAnchor(anchor mgl32.Vec2) {
p.anchor = anchor
p.modelMatrix.anchor = mgl32.Translate3D(-p.anchor.X(), -p.anchor.Y(), 0)
p.modelMatrix.dirty = true
}
func (p *Primitive2D) SetAngle(radians float32) {
p.angle = radians
p.modelMatrix.rotation = mgl32.HomogRotate3DZ(p.angle)
p.modelMatrix.dirty = true
}
func (p *Primitive2D) SetSize(size mgl32.Vec2) {
p.size = size
p.modelMatrix.size = mgl32.Scale3D(p.size.X(), p.size.Y(), 1)
p.modelMatrix.dirty = true
}
func (p *Primitive2D) GetSize() mgl32.Vec2 {
return p.size
}
func (p *Primitive2D) SetScale(scale mgl32.Vec2) {
p.scale = scale
p.rebuildScaleMatrix()
}
func (p *Primitive2D) SetFlipX(flipX bool) {
p.flipX = flipX
p.rebuildScaleMatrix()
}
func (p *Primitive2D) SetFlipY(flipY bool) {
p.flipY = flipY
p.rebuildScaleMatrix()
}
func (p *Primitive2D) SetColor(color Color) {
p.color = color
}
func (p *Primitive2D) SetUniforms() {
p.shaderProgram.SetUniform("color", &p.color)
p.shaderProgram.SetUniform("mModel", p.ModelMatrix())
}
func (p *Primitive2D) SetSizeFromTexture() {
p.SetSize(mgl32.Vec2{float32(p.texture.width), float32(p.texture.height)})
}
func (p *Primitive2D) SetAnchorToCenter() {
p.SetAnchor(mgl32.Vec2{p.size[0] / 2.0, p.size[1] / 2.0})
}
func (p *Primitive2D) SetAnchorToBottomCenter() {
p.SetAnchor(mgl32.Vec2{p.size[0] / 2.0, p.size[1]})
}
func (p *Primitive2D) EnqueueForDrawing(context *Context) {
context.EnqueueForDrawing(p)
}
func (p *Primitive2D) Draw(context *Context) {
shaderId := p.shaderProgram.Id()
gl.BindTexture(gl.TEXTURE_2D, p.texture.Id())
gl.UseProgram(shaderId)
p.shaderProgram.SetUniform("mProjection", &context.projectionMatrix)
p.SetUniforms()
gl.BindVertexArray(p.vaoId)
gl.DrawArrays(p.arrayMode, 0, p.arraySize)
}
// Texture and shaders are already bound when this is called
func (p *Primitive2D) DrawInBatch(context *Context) {
p.SetUniforms()
gl.BindVertexArray(p.vaoId)
gl.DrawArrays(p.arrayMode, 0, p.arraySize)
}
func (p *Primitive2D) rebuildMatrices() {
p.modelMatrix.translation = mgl32.Translate3D(p.position.X(), p.position.Y(), p.position.Z())
p.modelMatrix.anchor = mgl32.Translate3D(-p.anchor.X(), -p.anchor.Y(), 0)
p.modelMatrix.rotation = mgl32.HomogRotate3DZ(p.angle)
p.modelMatrix.size = mgl32.Scale3D(p.size.X(), p.size.Y(), 1)
p.rebuildScaleMatrix()
p.modelMatrix.dirty = true
}
func (p *Primitive2D) rebuildScaleMatrix() {
scaleX := p.scale.X()
if p.flipX {
scaleX *= -1
}
scaleY := p.scale.Y()
if p.flipY {
scaleY *= -1
}
p.modelMatrix.scale = mgl32.Scale3D(scaleX, scaleY, 1)
p.modelMatrix.dirty = true
}
func (p *Primitive2D) ModelMatrix() *mgl32.Mat4 {
if p.modelMatrix.dirty {
p.modelMatrix.Mat4 = p.modelMatrix.translation.Mul4(p.modelMatrix.rotation).Mul4(p.modelMatrix.scale).Mul4(p.modelMatrix.anchor).Mul4(p.modelMatrix.size)
//p.modelMatrix.Mat4 = p.modelMatrix.translation.Mul4(p.modelMatrix.size)
}
return &p.modelMatrix.Mat4
}
func NewQuadPrimitive(position mgl32.Vec3, size mgl32.Vec2) *Primitive2D {
q := &Primitive2D{}
q.position = position
q.size = size
q.scale = mgl32.Vec2{1, 1}
q.shaderProgram = NewShaderProgram(VertexShaderPrimitive2D, "", FragmentShaderTexture)
q.rebuildMatrices()
q.arrayMode = gl.TRIANGLE_FAN
q.arraySize = 4
// Build the VAO
q.SetVertices([]float32{0, 0, 0, 1, 1, 1, 1, 0})
q.SetUVCoords([]float32{0, 0, 0, 1, 1, 1, 1, 0})
return q
}
func NewRegularPolygonPrimitive(position mgl32.Vec3, radius float32, numSegments int, filled bool) *Primitive2D {
circlePoints, err := utils.CircleToPolygon(mgl32.Vec2{0.5, 0.5}, 0.5, numSegments, 0)
if err != nil {
log.Panic(err)
return nil
}
q := &Primitive2D{}
q.position = position
q.size = mgl32.Vec2{radius * 2, radius * 2}
q.scale = mgl32.Vec2{1, 1}
q.shaderProgram = NewShaderProgram(VertexShaderPrimitive2D, "", FragmentShaderSolidColor)
q.rebuildMatrices()
// Vertices
vertices := make([]float32, 0, numSegments*2)
for _, v := range circlePoints {
vertices = append(vertices, v[0], v[1])
}
// Add one vertex for the last line
vertices = append(vertices, circlePoints[0][0], circlePoints[0][1])
if filled {
q.arrayMode = gl.TRIANGLE_FAN
} else {
q.arrayMode = gl.LINE_STRIP
}
q.SetVertices(vertices)
return q
}
func NewTriangles(
vertices []float32,
uvCoords []float32,
texture *Texture,
position mgl32.Vec3,
size mgl32.Vec2,
shaderProgram *ShaderProgram,
) *Primitive2D {
p := &Primitive2D{}
p.arrayMode = gl.TRIANGLES
p.arraySize = int32(len(vertices) / 2)
p.texture = texture
p.shaderProgram = shaderProgram
p.position = position
p.scale = mgl32.Vec2{1, 1}
p.size = size
p.rebuildMatrices()
gl.GenVertexArrays(1, &p.vaoId)
gl.BindVertexArray(p.vaoId)
p.SetVertices(vertices)
p.SetUVCoords(uvCoords)
gl.BindVertexArray(0)
return p
}
func NewPolylinePrimitive(position mgl32.Vec3, points []mgl32.Vec2, closed bool) *Primitive2D {
topLeft, bottomRight := utils.GetBoundingBox(points)
primitive := &Primitive2D{}
primitive.position = position
primitive.size = bottomRight.Sub(topLeft)
primitive.scale = mgl32.Vec2{1, 1}
primitive.shaderProgram = NewShaderProgram(VertexShaderPrimitive2D, "", FragmentShaderSolidColor)
primitive.rebuildMatrices()
// Vertices
vertices := make([]float32, 0, len(points)*2)
for _, p := range points {
// The vertices coordinates are relative to the top left and are scaled by size
vertices = append(vertices, (p[0]-topLeft[0])/primitive.size.X(), (p[1]-topLeft[1])/primitive.size.Y())
}
if closed {
// Add the first point again to close the loop
vertices = append(vertices, vertices[0], vertices[1])
}
primitive.arrayMode = gl.LINE_STRIP
primitive.arraySize = int32(len(vertices) / 2)
primitive.SetVertices(vertices)
return primitive
}
// SetVertices uploads new set of vertices into opengl buffer
func (p *Primitive2D) SetVertices(vertices []float32) {
if p.vaoId == 0 {
gl.GenVertexArrays(1, &p.vaoId)
}
gl.BindVertexArray(p.vaoId)
if p.vboVertices == 0 {
gl.GenBuffers(1, &p.vboVertices)
}
gl.BindBuffer(gl.ARRAY_BUFFER, p.vboVertices)
gl.BufferData(gl.ARRAY_BUFFER, len(vertices)*FLOAT32_SIZE, gl.Ptr(vertices), gl.STATIC_DRAW)
gl.EnableVertexAttribArray(0)
gl.VertexAttribPointer(0, 2, gl.FLOAT, false, 0, gl.PtrOffset(0))
p.arraySize = int32(len(vertices) / 2)
gl.BindVertexArray(0)
}
// SetUVCoords uploads new UV coordinates
func (p *Primitive2D) SetUVCoords(uvCoords []float32) {
if p.vaoId == 0 {
gl.GenVertexArrays(1, &p.vaoId)
}
gl.BindVertexArray(p.vaoId)
if p.vboUVCoords == 0 {
gl.GenBuffers(1, &p.vboUVCoords)
}
gl.BindBuffer(gl.ARRAY_BUFFER, p.vboUVCoords)
gl.BufferData(gl.ARRAY_BUFFER, len(uvCoords)*FLOAT32_SIZE, gl.Ptr(uvCoords), gl.STATIC_DRAW)
gl.EnableVertexAttribArray(1)
gl.VertexAttribPointer(1, 2, gl.FLOAT, false, 0, gl.PtrOffset(0))
gl.BindVertexArray(0)
}
const (
VertexShaderPrimitive2D = `
#version 410 core
uniform mat4 mModel;
uniform mat4 mProjection;
layout(location=0) in vec2 vertex;
layout(location=1) in vec2 uv;
out vec2 uv_out;
void main() {
vec4 vertex_world = mModel * vec4(vertex, 0, 1);
gl_Position = mProjection * vertex_world;
uv_out = uv;
}
` + "\x00"
) | pkg/graphics/primitive_2d.go | 0.689828 | 0.477311 | primitive_2d.go | starcoder |
package bitset
import (
"fmt"
)
// Bitset represents a bitset of fixed length
type Bitset struct {
bitvec []int32
length int
bitlength int
}
// New creates a new bitset instance with length l.
func New(l int) Bitset {
return Bitset{
bitvec: make([]int32, l),
length: (l / 32) + 1,
bitlength: l,
}
}
// Set will set a bit at pos.
func (b *Bitset) Set(pos int) error {
if pos < 0 || pos >= b.length*32 {
return fmt.Errorf("invalid position for bitset of length %d", b.length)
}
// Pos will take a value between 0 and length
// each chunck of the bitset is a 4 byte integer
// so for e.g Set(10) will need to set the 10th
// bit which is found in the first chunck i.e bitvec[0].
// By reducing modulo 32 we find the local position in the bitvec.
rpos := pos / 32 // (relative position inside the integer slice)
bpos := pos % 32 // (local bit position inside bitvec[rpos])
flag := int32(1) << bpos
b.bitvec[rpos] = b.bitvec[rpos] | flag
return nil
}
// Clear will clear the bit at pos.
func (b *Bitset) Clear(pos int) error {
var flag int32 = 1
if pos < 0 || pos >= b.length*32 {
return fmt.Errorf("invalid position for bitset of length %d", b.length)
}
rpos := int32(pos) / 32 // (relative position inside the integer slice)
bpos := int32(pos) % 32 // (local bit position inside bitvec[rpos])
flag = flag << bpos
flag = ^flag
b.bitvec[rpos] = b.bitvec[rpos] & flag
return nil
}
// IsSet checks if bit at pos is set.
func (b *Bitset) IsSet(pos int) bool {
var flag int32 = 1
if pos < 0 || pos >= b.length*32 {
return false
}
rpos := int32(pos) / 32 // (relative position inside the integer slice)
bpos := int32(pos) % 32 // (local bit position inside bitvec[rpos])
flag = flag << int32(bpos)
return (b.bitvec[rpos] & flag) != 0
}
// Count returns the number of set bits
func (b *Bitset) Count() int {
bitlen := b.bitlength
count := 0
for i := 0; i < bitlen; i++ {
if b.IsSet(i) {
count++
}
}
return count
}
// SetBits returns a list of indices of bits that are set.
func (b *Bitset) SetBits() []int {
bitlen := b.bitlength
indices := make([]int, 0, b.bitlength)
for i := 0; i < bitlen; i++ {
if b.IsSet(i) {
indices = append(indices, i)
}
}
return indices
}
// BitLength returns length in bits.
func (b *Bitset) BitLength() int {
return b.bitlength
} | bitset/bitset.go | 0.76074 | 0.467149 | bitset.go | starcoder |
package chatbot
import (
"fmt"
"net/http"
"os"
"strings"
"sync"
"time"
"github.com/go-chat-bot/bot"
)
const (
invalidDeploySyntax = "Deploy command requires 4 parameters: " +
"```!deploy %s your_app your_container your/docker:image``` \nGot: ```!deploy %s```"
invalidImageFormat = "```Invalid image format, should be your_dockerhub_repo:tag``` \nGot: ```%s```"
invalidImage = "```Invalid image, tag %s does not exist in dockerhub repo %s```"
invalidResetSyntax = "Reset command requires 2 parameters: " +
"```!reset %s your_app``` \nGot: ```!deploy %s```"
invalidP2PSyntax = "P2P command requires 2 parameters: " +
"```!p2p %s your_app``` \nGot: ```!p2p %s```"
appNotFound = "Sorry, app %s could not be found"
cmdResponse = "This is the response to your request:\n ```\n%s\n``` "
clusterNameNotice = "You must specify cluster name in order to use the command:\n ```!%s %s %s\n```"
)
func wrongClusterName(cmd *bot.Cmd, clusterName string) (string, bool) {
if len(cmd.Args) > 0 && strings.HasSuffix(cmd.Args[0], "net") {
return "", cmd.Args[0] != clusterName
}
return fmt.Sprintf(clusterNameNotice, cmd.Command, clusterName, strings.Join(cmd.Args, " ")), true
}
type deployCommand struct {
client *http.Client
clusterName string
}
func (c *deployCommand) Func() func(*bot.Cmd) (string, error) {
panic("stub")
}
func NewDeployCommand(clusterName string) Command {
return &deployCommand{
client: &http.Client{Timeout: 2 * time.Second},
clusterName: clusterName,
}
}
func (c *deployCommand) Register() {
bot.RegisterCommandV3(
"deploy",
"Kubectl deployment abstraction",
fmt.Sprintf("%s your_app your_container your/docker:image", c.clusterName),
c.Func3())
}
func (c *deployCommand) Func3() func(*bot.Cmd) (bot.CmdResultV3, error) {
commandString := "kubectl set image %s %s %s=%s"
statusCommandString := "kubectl get pods --selector=app=%s"
return func(cmd *bot.Cmd) (s bot.CmdResultV3, e error) {
res := bot.CmdResultV3{
Message: make(chan string, 1),
Done: make(chan bool, 1),
}
go func() {
defer func() {
res.Done <- true
}()
msg, isWrong := wrongClusterName(cmd, c.clusterName)
if isWrong {
res.Message <- msg
return
}
if len(cmd.Args) != 4 {
res.Message <- fmt.Sprintf(invalidDeploySyntax, c.clusterName, strings.Join(cmd.Args, " "))
return
}
app := cmd.Args[1]
container := cmd.Args[2]
image := cmd.Args[3]
imageParts := strings.Split(image, ":")
if len(imageParts) != 2 {
res.Message <- fmt.Sprintf(invalidImageFormat, image)
return
}
imageRepo := imageParts[0]
imageTag := imageParts[1]
if r, err :=
c.client.Get(fmt.Sprintf(
"https://index.docker.io/v1/repositories/%s/tags/%s",
imageRepo, imageTag)); r.StatusCode != 200 || err != nil {
res.Message <- fmt.Sprintf(invalidImage, imageTag, imageRepo)
return
}
output := ""
// Note that this is a hack to make sure we force redeployment even if the image tag is the same
for _, imageName := range []string{"dummy", image} {
for _, entityType := range []string{"deployment", "statefulset"} {
output = execute(fmt.Sprintf(commandString, entityType, app, container, imageName))
if !isNotFound(output) {
break
}
}
}
if isNotFound(output) {
res.Message <- fmt.Sprintf(appNotFound, app)
return
}
res.Message <- fmt.Sprintf(cmdResponse, output)
time.Sleep(time.Second * 20)
res.Message <- fmt.Sprintf(cmdResponse, execute(fmt.Sprintf(statusCommandString, app)))
return
}()
return res, nil
}
}
type resetCommand struct {
lock sync.Locker
clusterName string
}
func (c *resetCommand) Func3() func(*bot.Cmd) (bot.CmdResultV3, error) {
panic("stub")
}
func NewResetCommand(clusterName string) Command {
return &resetCommand{
lock: &sync.Mutex{},
clusterName: clusterName,
}
}
func (c *resetCommand) Func() func(*bot.Cmd) (string, error) {
return func(cmd *bot.Cmd) (s string, e error) {
c.lock.Lock()
defer c.lock.Unlock()
msg, isWrong := wrongClusterName(cmd, c.clusterName)
if isWrong {
return msg, nil
}
if len(cmd.Args) != 2 {
return fmt.Sprintf(invalidResetSyntax, c.clusterName, strings.Join(cmd.Args, " ")), nil
}
app := cmd.Args[1]
output, err := c.executeSequence(app)
if err != nil {
return "", err
}
if isNotFound(output) {
return fmt.Sprintf(appNotFound, app), nil
}
return fmt.Sprintf(cmdResponse, output), nil
}
}
func (c *resetCommand) executeSequence(app string) (string, error) {
filename := "/tmp/st.yaml"
pvcDeleteCmd := "kubectl delete pvc -l app=%s"
statefulSetStoreCmd := "kubectl get statefulsets.apps %s -o=yaml"
statefulSetDeleteCmd := "kubectl delete -f /tmp/st.yaml"
statefulSetApplyCmd := "kubectl apply -f /tmp/st.yaml"
defer func() {
_ = os.Remove(filename)
}()
output := make([]string, 0)
statefulSetYaml := execute(fmt.Sprintf(statefulSetStoreCmd, app))
if isNotFound(statefulSetYaml) {
return statefulSetYaml, nil
}
f, err := os.Create(filename)
if err != nil {
return "", err
}
_, err = f.WriteString(statefulSetYaml)
if err != nil {
return "", err
}
output = append(output, execute(statefulSetDeleteCmd))
output = append(output, execute(fmt.Sprintf(pvcDeleteCmd, app)))
output = append(output, execute(statefulSetApplyCmd))
return strings.Join(output, "\n"), nil
}
func (c *resetCommand) Register() {
bot.RegisterCommand(
"reset",
"Kubectl reset abstraction to allow removing pvc for stateful sets by app label and recreating them",
fmt.Sprintf("%s your_app", c.clusterName),
c.Func())
}
type p2pCommand struct {
lock sync.Locker
clusterName string
}
func (c *p2pCommand) Func3() func(*bot.Cmd) (bot.CmdResultV3, error) {
panic("stub")
}
func NewP2PCommand(clusterName string) Command {
return &p2pCommand{
lock: &sync.Mutex{},
clusterName: clusterName,
}
}
func (c *p2pCommand) Func() func(*bot.Cmd) (string, error) {
cmdStr := "kubectl get service %s-p2p -o jsonpath={@.status.loadBalancer.ingress[0].ip}"
return func(cmd *bot.Cmd) (s string, e error) {
c.lock.Lock()
defer c.lock.Unlock()
msg, isWrong := wrongClusterName(cmd, c.clusterName)
if isWrong {
return msg, nil
}
if len(cmd.Args) != 2 {
return fmt.Sprintf(invalidP2PSyntax, c.clusterName, strings.Join(cmd.Args, " ")), nil
}
app := cmd.Args[1]
output := execute(fmt.Sprintf(cmdStr, app))
if isNotFound(output) {
return fmt.Sprintf(appNotFound, app), nil
}
return fmt.Sprintf(cmdResponse, output), nil
}
}
func (c *p2pCommand) executeSequence(app string) (string, error) {
filename := "/tmp/st.yaml"
pvcDeleteCmd := "kubectl delete pvc -l app=%s"
statefulSetStoreCmd := "kubectl get statefulsets.apps %s -o=yaml"
statefulSetDeleteCmd := "kubectl delete -f /tmp/st.yaml"
statefulSetApplyCmd := "kubectl apply -f /tmp/st.yaml"
defer func() {
_ = os.Remove(filename)
}()
output := make([]string, 0)
statefulSetYaml := execute(fmt.Sprintf(statefulSetStoreCmd, app))
if isNotFound(statefulSetYaml) {
return statefulSetYaml, nil
}
f, err := os.Create(filename)
if err != nil {
return "", err
}
_, err = f.WriteString(statefulSetYaml)
if err != nil {
return "", err
}
output = append(output, execute(statefulSetDeleteCmd))
output = append(output, execute(fmt.Sprintf(pvcDeleteCmd, app)))
output = append(output, execute(statefulSetApplyCmd))
return strings.Join(output, "\n"), nil
}
func (c *p2pCommand) Register() {
bot.RegisterCommand(
"p2p",
"Kubectl p2p allows to view external p2p ip for your app",
fmt.Sprintf("%s your_app", c.clusterName),
c.Func())
} | commands.go | 0.537527 | 0.529203 | commands.go | starcoder |
package machine
import (
"errors"
"github.com/offchainlabs/arbitrum/packages/arb-util/protocol"
)
type AssertionDefender struct {
assertion *protocol.Assertion
precondition *protocol.Precondition
initState Machine
}
func NewAssertionDefender(assertion *protocol.Assertion, precondition *protocol.Precondition, initState Machine) AssertionDefender {
return AssertionDefender{assertion, precondition, initState.Clone()}
}
func (ad AssertionDefender) NumSteps() uint32 {
return ad.assertion.NumSteps
}
func (ad AssertionDefender) GetAssertion() *protocol.Assertion {
return ad.assertion
}
func (ad AssertionDefender) GetPrecondition() *protocol.Precondition {
return ad.precondition
}
func (ad AssertionDefender) GetMachineState() Machine {
return ad.initState
}
func (ad AssertionDefender) NBisect(slices uint32) []AssertionDefender {
nsteps := ad.NumSteps()
if nsteps < slices {
slices = nsteps
}
sliceSize := nsteps / slices
defenders := make([]AssertionDefender, 0, slices)
m := ad.initState.Clone()
pre := ad.precondition
for i := uint32(0); i < slices; i++ {
initState := m.Clone()
stepCount := sliceSize
if i < nsteps%slices {
stepCount++
}
assertion := m.ExecuteAssertion(int32(stepCount), pre.TimeBounds)
defenders = append(defenders, NewAssertionDefender(
assertion,
pre,
initState,
))
pre = assertion.Stub().GeneratePostcondition(pre)
}
return defenders
}
func (ad AssertionDefender) SolidityOneStepProof() ([]byte, error) {
return ad.initState.MarshalForProof()
}
func ChooseAssertionToChallenge(m Machine, assertions []*protocol.AssertionStub, preconditions []*protocol.Precondition) (uint16, Machine, error) {
for i := range assertions {
initState := m.Clone()
generatedAssertion := m.ExecuteAssertion(
int32(assertions[i].NumSteps),
preconditions[i].TimeBounds,
)
if !generatedAssertion.Stub().Equals(assertions[i]) {
return uint16(i), initState, nil
}
}
return 0, nil, errors.New("all segments in false Assertion are valid")
} | packages/arb-util/machine/defender.go | 0.628635 | 0.474388 | defender.go | starcoder |
package gamemap
import (
"math"
"math/rand"
"time"
)
// Vector3 代码位置的3D矢量
type Vector3 struct {
X float64
Y float64
Z float64
}
// NewVector3 创建一个新的矢量
func NewVector3(x, y, z float64) Vector3 {
return Vector3{
x,
y,
z,
}
}
// Vector3_Zero 返回零值
func Vector3Zero() Vector3 {
return Vector3{
0,
0,
0,
}
}
// IsEqual 相等
func (v Vector3) IsEqual(r Vector3) bool {
if v.X-r.X > math.SmallestNonzeroFloat64 ||
v.X-r.X < -math.SmallestNonzeroFloat64 ||
v.Y-r.Y > math.SmallestNonzeroFloat64 ||
v.Y-r.Y < -math.SmallestNonzeroFloat64 ||
v.Z-r.Z > math.SmallestNonzeroFloat64 ||
v.Z-r.Z < -math.SmallestNonzeroFloat64 {
return false
}
return true
}
// Add 加
func (v Vector3) Add(o Vector3) Vector3 {
return Vector3{v.X + o.X, v.Y + o.Y, v.Z + o.Z}
}
// AddS 加到自己身上
func (v *Vector3) AddS(o Vector3) {
v.X += o.X
v.Y += o.Y
v.Z += o.Z
}
// Sub 减
func (v Vector3) Sub(o Vector3) Vector3 {
return Vector3{v.X - o.X, v.Y - o.Y, v.Z - o.Z}
}
// SubS 自已身上减
func (v *Vector3) SubS(o Vector3) {
v.X -= o.X
v.Y -= o.Y
v.Z -= o.Z
}
// Mul 乘
func (v Vector3) Mul(o float64) Vector3 {
return Vector3{v.X * o, v.Y * o, v.Z * o}
}
// MulS 自己乘
func (v *Vector3) MulS(o float64) {
v.X *= o
v.Y *= o
v.Z *= o
}
// Cross 叉乘
func (v Vector3) Cross(o Vector3) Vector3 {
return Vector3{v.Y*o.Z - v.Z*o.Y, v.Z*o.X - v.X*o.Z, v.X*o.Y - v.Y*o.X}
}
// Dot 点乘
func (v Vector3) Dot(o Vector3) float64 {
return v.X*o.X + v.Y*o.Y + v.Z*o.Z
}
// Len 获取长度
func (v Vector3) Len() float64 {
return math.Sqrt(v.Dot(v))
}
func (v *Vector3) Normalize() {
len := v.Len()
if len < math.SmallestNonzeroFloat64 {
return
}
v.X = v.X / len
v.Y = v.Y / len
v.Z = v.Z / len
}
// RandXZ 在XZ平面上半径为r的圆内选取一个随机点
func RandXZ(v Vector3, r float32) Vector3 {
randSeed := rand.New(rand.NewSource(time.Now().UnixNano()))
tarR := randSeed.Float64() * float64(r)
angle := randSeed.Float64() * 2 * math.Pi
pos := Vector3{}
pos.Y = 0
pos.X = math.Cos(angle) * tarR
pos.Z = math.Sin(angle) * tarR
return v.Add(pos)
} | gamemap/vector3.go | 0.583559 | 0.603581 | vector3.go | starcoder |
package syntaxtree
import (
"bytes"
"github.com/manishmeganathan/tunalang/lexer"
)
// A structure that represents a Let statement token
type LetStatement struct {
// Represents the lexological token 'LET'
Token lexer.Token
// Represents the identifier in the let statement
Name *Identifier
// Represents the value in the let statement
Value Expression
}
// A method of LetStatement to satisfy the Statement interface
func (ls *LetStatement) statementNode() {}
// A method of LetStatement that returns its token literal value
func (ls *LetStatement) TokenLiteral() string { return ls.Token.Literal }
// A method of LetStatment that returns its string representation
func (ls *LetStatement) String() string {
// Declare a bytes buffer
var out bytes.Buffer
// Add the token literal and identifier string into buffer
out.WriteString(ls.TokenLiteral() + " ")
out.WriteString(ls.Name.String())
out.WriteString(" = ")
// Check if let statement has a value
if ls.Value != nil {
// Add the value into the buffer
out.WriteString(ls.Value.String())
}
// Add a semicolon
out.WriteString(";")
// Return the string of the buffer
return out.String()
}
// A structure that represents a Return statement token
type ReturnStatement struct {
// Represents the lexological token 'RETURN'
Token lexer.Token
// Represents the value in the return statement
ReturnValue Expression
}
// A method of ReturnStatement to satisfy the Statement interface
func (rs *ReturnStatement) statementNode() {}
// A method of ReturnStatement that returns its token literal value
func (rs *ReturnStatement) TokenLiteral() string { return rs.Token.Literal }
// A method of ReturnStatement that returns its string representation
func (rs *ReturnStatement) String() string {
// Declare a bytes buffer
var out bytes.Buffer
// Add the token literal into the buffer
out.WriteString(rs.TokenLiteral() + " ")
// Check if the return statement has a value
if rs.ReturnValue != nil {
// Add the value to the buffer
out.WriteString(rs.ReturnValue.String())
}
// Add a semicolon
out.WriteString(";")
// Return the string of the buffer
return out.String()
}
// A structure that represents a statement wrapper for an expression
type ExpressionStatement struct {
// Represents the first token of the expression
Token lexer.Token
// Represents the full Expression
Expression Expression
}
// A method of ExpressionStatement to satisfy the Statement interface
func (es *ExpressionStatement) statementNode() {}
// A method of ExpressionStatement that returns its token literal value
func (es *ExpressionStatement) TokenLiteral() string { return es.Token.Literal }
// A method of ExpressionStatement that returns its string representation
func (es *ExpressionStatement) String() string {
// Check if the expression value is set
if es.Expression != nil {
// Return the expresion value
return es.Expression.String()
}
// Return an empty string
return ""
}
// A structure that represents a block of code statements
type BlockStatement struct {
// Represents the '{' token
Token lexer.Token
// Represents the statements in the code block
Statements []Statement
}
// A method of BlockStatement to satisfy the Statement interface
func (bs *BlockStatement) statementNode() {}
// A method of BlockStatement that returns its token literal value
func (bs *BlockStatement) TokenLiteral() string { return bs.Token.Literal }
// A method of BlockStatement that returns its string representation
func (bs *BlockStatement) String() string {
// Declare the bytes buffer
var out bytes.Buffer
// Iterate over the block statements
for _, s := range bs.Statements {
// Add its string representation to the buffer
out.WriteString(s.String())
}
// Return the string from the buffer
return out.String()
} | syntaxtree/statements.go | 0.830147 | 0.459622 | statements.go | starcoder |
package iso20022
// Parameters applied to the settlement of a security.
type FundSettlementParameters11 struct {
// Date and time at which the securities are to be delivered or received.
SettlementDate *ISODate `xml:"SttlmDt,omitempty"`
// Place where the settlement of the transaction will take place. In the context of investment funds, the place of settlement is the transfer agent, a Central Securities Depository (CSD) or an International Central Securities Depository (ICSD).
SettlementPlace *PartyIdentification113 `xml:"SttlmPlc"`
// Place where the securities are safe-kept, physically or notionally. This place can be, for example, a local custodian, a Central Securities Depository or an International Central Securities Depository.
SafekeepingPlace *SafekeepingPlaceFormat8Choice `xml:"SfkpgPlc,omitempty"`
// Identification of a specific system or set of rules and/or processes to be applied at the settlement place.
SecuritiesSettlementSystemIdentification *Max35Text `xml:"SctiesSttlmSysId,omitempty"`
// Condition under which the order/trade is to be/was executed. This may be required for settlement through T2S.
TradeTransactionCondition []*TradeTransactionCondition8Choice `xml:"TradTxCond,omitempty"`
// Condition under which the order/trade is to be settled. This may be required for settlement through T2S.
SettlementTransactionCondition []*SettlementTransactionCondition30Choice `xml:"SttlmTxCond,omitempty"`
// Chain of parties involved in the settlement of a transaction resulting in the movement of a security from one account to another.
ReceivingSideDetails *ReceivingPartiesAndAccount16 `xml:"RcvgSdDtls"`
// Chain of parties involved in the settlement of a transaction resulting in the movement of a security from one account to another.
DeliveringSideDetails *DeliveringPartiesAndAccount16 `xml:"DlvrgSdDtls,omitempty"`
}
func (f *FundSettlementParameters11) SetSettlementDate(value string) {
f.SettlementDate = (*ISODate)(&value)
}
func (f *FundSettlementParameters11) AddSettlementPlace() *PartyIdentification113 {
f.SettlementPlace = new(PartyIdentification113)
return f.SettlementPlace
}
func (f *FundSettlementParameters11) AddSafekeepingPlace() *SafekeepingPlaceFormat8Choice {
f.SafekeepingPlace = new(SafekeepingPlaceFormat8Choice)
return f.SafekeepingPlace
}
func (f *FundSettlementParameters11) SetSecuritiesSettlementSystemIdentification(value string) {
f.SecuritiesSettlementSystemIdentification = (*Max35Text)(&value)
}
func (f *FundSettlementParameters11) AddTradeTransactionCondition() *TradeTransactionCondition8Choice {
newValue := new(TradeTransactionCondition8Choice)
f.TradeTransactionCondition = append(f.TradeTransactionCondition, newValue)
return newValue
}
func (f *FundSettlementParameters11) AddSettlementTransactionCondition() *SettlementTransactionCondition30Choice {
newValue := new(SettlementTransactionCondition30Choice)
f.SettlementTransactionCondition = append(f.SettlementTransactionCondition, newValue)
return newValue
}
func (f *FundSettlementParameters11) AddReceivingSideDetails() *ReceivingPartiesAndAccount16 {
f.ReceivingSideDetails = new(ReceivingPartiesAndAccount16)
return f.ReceivingSideDetails
}
func (f *FundSettlementParameters11) AddDeliveringSideDetails() *DeliveringPartiesAndAccount16 {
f.DeliveringSideDetails = new(DeliveringPartiesAndAccount16)
return f.DeliveringSideDetails
} | FundSettlementParameters11.go | 0.798972 | 0.444565 | FundSettlementParameters11.go | starcoder |
package plaid
import (
"encoding/json"
)
// SecurityOverride Specify the security associated with the holding or investment transaction. When inputting custom security data to the Sandbox, Plaid will perform post-data-retrieval normalization and enrichment. These processes may cause the data returned by the Sandbox to be slightly different from the data you input. An ISO-4217 currency code and a security identifier (`ticker_symbol`, `cusip`, `isin`, or `sedol`) are required.
type SecurityOverride struct {
// 12-character ISIN, a globally unique securities identifier.
Isin *string `json:"isin,omitempty"`
// 9-character CUSIP, an identifier assigned to North American securities.
Cusip *string `json:"cusip,omitempty"`
// 7-character SEDOL, an identifier assigned to securities in the UK.
Sedol *string `json:"sedol,omitempty"`
// A descriptive name for the security, suitable for display.
Name *string `json:"name,omitempty"`
// The security’s trading symbol for publicly traded securities, and otherwise a short identifier if available.
TickerSymbol *string `json:"ticker_symbol,omitempty"`
// Either a valid `iso_currency_code` or `unofficial_currency_code`
Currency *string `json:"currency,omitempty"`
}
// NewSecurityOverride instantiates a new SecurityOverride object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewSecurityOverride() *SecurityOverride {
this := SecurityOverride{}
return &this
}
// NewSecurityOverrideWithDefaults instantiates a new SecurityOverride object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewSecurityOverrideWithDefaults() *SecurityOverride {
this := SecurityOverride{}
return &this
}
// GetIsin returns the Isin field value if set, zero value otherwise.
func (o *SecurityOverride) GetIsin() string {
if o == nil || o.Isin == nil {
var ret string
return ret
}
return *o.Isin
}
// GetIsinOk returns a tuple with the Isin field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *SecurityOverride) GetIsinOk() (*string, bool) {
if o == nil || o.Isin == nil {
return nil, false
}
return o.Isin, true
}
// HasIsin returns a boolean if a field has been set.
func (o *SecurityOverride) HasIsin() bool {
if o != nil && o.Isin != nil {
return true
}
return false
}
// SetIsin gets a reference to the given string and assigns it to the Isin field.
func (o *SecurityOverride) SetIsin(v string) {
o.Isin = &v
}
// GetCusip returns the Cusip field value if set, zero value otherwise.
func (o *SecurityOverride) GetCusip() string {
if o == nil || o.Cusip == nil {
var ret string
return ret
}
return *o.Cusip
}
// GetCusipOk returns a tuple with the Cusip field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *SecurityOverride) GetCusipOk() (*string, bool) {
if o == nil || o.Cusip == nil {
return nil, false
}
return o.Cusip, true
}
// HasCusip returns a boolean if a field has been set.
func (o *SecurityOverride) HasCusip() bool {
if o != nil && o.Cusip != nil {
return true
}
return false
}
// SetCusip gets a reference to the given string and assigns it to the Cusip field.
func (o *SecurityOverride) SetCusip(v string) {
o.Cusip = &v
}
// GetSedol returns the Sedol field value if set, zero value otherwise.
func (o *SecurityOverride) GetSedol() string {
if o == nil || o.Sedol == nil {
var ret string
return ret
}
return *o.Sedol
}
// GetSedolOk returns a tuple with the Sedol field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *SecurityOverride) GetSedolOk() (*string, bool) {
if o == nil || o.Sedol == nil {
return nil, false
}
return o.Sedol, true
}
// HasSedol returns a boolean if a field has been set.
func (o *SecurityOverride) HasSedol() bool {
if o != nil && o.Sedol != nil {
return true
}
return false
}
// SetSedol gets a reference to the given string and assigns it to the Sedol field.
func (o *SecurityOverride) SetSedol(v string) {
o.Sedol = &v
}
// GetName returns the Name field value if set, zero value otherwise.
func (o *SecurityOverride) GetName() string {
if o == nil || o.Name == nil {
var ret string
return ret
}
return *o.Name
}
// GetNameOk returns a tuple with the Name field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *SecurityOverride) GetNameOk() (*string, bool) {
if o == nil || o.Name == nil {
return nil, false
}
return o.Name, true
}
// HasName returns a boolean if a field has been set.
func (o *SecurityOverride) HasName() bool {
if o != nil && o.Name != nil {
return true
}
return false
}
// SetName gets a reference to the given string and assigns it to the Name field.
func (o *SecurityOverride) SetName(v string) {
o.Name = &v
}
// GetTickerSymbol returns the TickerSymbol field value if set, zero value otherwise.
func (o *SecurityOverride) GetTickerSymbol() string {
if o == nil || o.TickerSymbol == nil {
var ret string
return ret
}
return *o.TickerSymbol
}
// GetTickerSymbolOk returns a tuple with the TickerSymbol field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *SecurityOverride) GetTickerSymbolOk() (*string, bool) {
if o == nil || o.TickerSymbol == nil {
return nil, false
}
return o.TickerSymbol, true
}
// HasTickerSymbol returns a boolean if a field has been set.
func (o *SecurityOverride) HasTickerSymbol() bool {
if o != nil && o.TickerSymbol != nil {
return true
}
return false
}
// SetTickerSymbol gets a reference to the given string and assigns it to the TickerSymbol field.
func (o *SecurityOverride) SetTickerSymbol(v string) {
o.TickerSymbol = &v
}
// GetCurrency returns the Currency field value if set, zero value otherwise.
func (o *SecurityOverride) GetCurrency() string {
if o == nil || o.Currency == nil {
var ret string
return ret
}
return *o.Currency
}
// GetCurrencyOk returns a tuple with the Currency field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *SecurityOverride) GetCurrencyOk() (*string, bool) {
if o == nil || o.Currency == nil {
return nil, false
}
return o.Currency, true
}
// HasCurrency returns a boolean if a field has been set.
func (o *SecurityOverride) HasCurrency() bool {
if o != nil && o.Currency != nil {
return true
}
return false
}
// SetCurrency gets a reference to the given string and assigns it to the Currency field.
func (o *SecurityOverride) SetCurrency(v string) {
o.Currency = &v
}
func (o SecurityOverride) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Isin != nil {
toSerialize["isin"] = o.Isin
}
if o.Cusip != nil {
toSerialize["cusip"] = o.Cusip
}
if o.Sedol != nil {
toSerialize["sedol"] = o.Sedol
}
if o.Name != nil {
toSerialize["name"] = o.Name
}
if o.TickerSymbol != nil {
toSerialize["ticker_symbol"] = o.TickerSymbol
}
if o.Currency != nil {
toSerialize["currency"] = o.Currency
}
return json.Marshal(toSerialize)
}
type NullableSecurityOverride struct {
value *SecurityOverride
isSet bool
}
func (v NullableSecurityOverride) Get() *SecurityOverride {
return v.value
}
func (v *NullableSecurityOverride) Set(val *SecurityOverride) {
v.value = val
v.isSet = true
}
func (v NullableSecurityOverride) IsSet() bool {
return v.isSet
}
func (v *NullableSecurityOverride) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableSecurityOverride(val *SecurityOverride) *NullableSecurityOverride {
return &NullableSecurityOverride{value: val, isSet: true}
}
func (v NullableSecurityOverride) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableSecurityOverride) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | plaid/model_security_override.go | 0.849191 | 0.499939 | model_security_override.go | starcoder |
package fn
import (
"math"
)
func NewTan(x Operand) *UnaryElementwise {
return &UnaryElementwise{
x: x,
f: tan,
df: tanDeriv,
}
}
func NewTanh(x Operand) *UnaryElementwise {
return &UnaryElementwise{
x: x,
f: tanh,
df: tanhDeriv,
}
}
func NewSigmoid(x Operand) *UnaryElementwise {
return &UnaryElementwise{
x: x,
f: sigmoid,
df: sigmoidDeriv,
}
}
func NewHardSigmoid(x Operand) *UnaryElementwise {
return &UnaryElementwise{
x: x,
f: hardSigmoid,
df: hardSigmoidDeriv,
}
}
func NewHardTanh(x Operand) *UnaryElementwise {
return &UnaryElementwise{
x: x,
f: hardTanh,
df: hardTanhDeriv,
}
}
func NewReLU(x Operand) *UnaryElementwise {
return &UnaryElementwise{
x: x,
f: relu,
df: reluDeriv,
}
}
func NewSoftsign(x Operand) *UnaryElementwise {
return &UnaryElementwise{
x: x,
f: softsign,
df: softsignDeriv,
}
}
func NewCos(x Operand) *UnaryElementwise {
return &UnaryElementwise{
x: x,
f: func(i, j int, v float64) float64 { return math.Cos(v) },
df: func(i, j int, v float64) float64 { return -math.Sin(v) },
}
}
func NewSin(x Operand) *UnaryElementwise {
return &UnaryElementwise{
x: x,
f: func(i, j int, v float64) float64 { return math.Sin(v) },
df: func(i, j int, v float64) float64 { return math.Cos(v) },
}
}
func NewExp(x Operand) *UnaryElementwise {
return &UnaryElementwise{
x: x,
f: func(i, j int, v float64) float64 { return math.Exp(v) },
df: func(i, j int, v float64) float64 { return math.Exp(v) },
}
}
func NewLog(x Operand) *UnaryElementwise {
return &UnaryElementwise{
x: x,
f: safeLog,
df: safeLogDeriv,
}
}
func NewNeg(x Operand) *UnaryElementwise {
return &UnaryElementwise{
x: x,
f: func(i, j int, v float64) float64 { return -v },
df: func(i, j int, v float64) float64 { return -1.0 },
}
}
func NewReciprocal(x Operand) *UnaryElementwise {
return &UnaryElementwise{
x: x,
f: func(i, j int, v float64) float64 { return 1.0 / v },
df: func(i, j int, v float64) float64 { return -1.0 / (v * v) },
}
}
func NewAbs(x Operand) *UnaryElementwise {
return &UnaryElementwise{
x: x,
f: func(i, j int, v float64) float64 { return math.Abs(v) },
df: absDeriv,
}
}
func NewMish(x Operand) *UnaryElementwise {
return &UnaryElementwise{
x: x,
f: mish,
df: mishDeriv,
}
}
func NewGeLU(x Operand) *UnaryElementwise {
return &UnaryElementwise{
x: x,
f: gelu,
df: geluDeriv,
}
}
func NewSqrt(x Operand) *UnaryElementwise {
return &UnaryElementwise{
x: x,
f: func(i, j int, v float64) float64 { return math.Sqrt(v) },
df: func(i, j int, v float64) float64 { return 0.5 * math.Pow(v, -0.5) },
}
}
func absDeriv(i, j int, v float64) float64 {
if v < 0 {
return -1
} else if v > 0 {
return 1
} else {
return 0 // undefined
}
}
// safeLog is a simple work-around that make the math.Log() safe for zero or negative values
func safeLog(i, j int, v float64) float64 {
if v > 0.0 {
return math.Log(v)
} else if v == 0.0 {
return math.Log(1.0e-08)
} else {
panic("ag: invalid log for negative values")
}
}
func safeLogDeriv(i, j int, v float64) float64 {
if v > 0.0 {
return 1.0 / v
} else if v == 0.0 {
return 1.0 / 1.0e-08
} else {
panic("ag: invalid log for negative values")
}
}
func tan(i, j int, v float64) float64 {
return math.Tan(v)
}
func tanDeriv(i, j int, v float64) float64 {
return 1.0 / square(i, j, math.Cos(v))
}
func square(i, j int, v float64) float64 {
return v * v
}
func tanh(i, j int, v float64) float64 {
return math.Tanh(v)
}
func tanhDeriv(i, j int, v float64) float64 {
return 1.0 - math.Pow(math.Tanh(v), 2.0)
}
func sigmoid(i, j int, v float64) float64 {
return 1.0 / (1 + math.Exp(-v))
}
func sigmoidDeriv(i, j int, v float64) float64 {
fx := sigmoid(i, j, v)
return fx * (1.0 - fx)
}
func hardSigmoid(i, j int, v float64) float64 {
if v > 2.5 {
return 1.0
} else if v < -2.5 {
return 0.0
} else {
return 0.2*v + 0.5
}
}
func hardSigmoidDeriv(i, j int, v float64) float64 {
if v < 2.5 && v > -2.5 {
return 0.2
}
return 0.0
}
func hardTanh(i, j int, v float64) float64 {
if v > 1.0 {
return 1.0
} else if v < -1.0 {
return -1.0
} else {
return v
}
}
func hardTanhDeriv(i, j int, v float64) float64 {
if v < 1.0 && v > -1.0 {
return 1.0
}
return 0.0
}
func relu(i, j int, v float64) float64 {
return math.Max(0.0, v)
}
func reluDeriv(i, j int, v float64) float64 {
if v >= 0.0 {
return 1.0
}
return 0.0
}
func softsign(i, j int, v float64) float64 {
return v / (1.0 + math.Abs(v))
}
func softsignDeriv(i, j int, v float64) float64 {
return math.Pow(1.0-math.Abs(softsign(i, j, v)), 2.0)
}
func celu(i, j int, v float64, alpha ...float64) float64 {
if v <= 0 {
return alpha[0] * (math.Exp(v/alpha[0]) - 1)
} else if v > 0 {
return v
}
return 0
}
func celuDeriv(i, j int, v float64, alpha ...float64) float64 {
if v <= 0 {
return math.Exp(v / alpha[0])
} else if v > 0 {
return 1
}
return 0
}
func elu(i, j int, v float64, alpha ...float64) float64 {
if v <= 0 {
return alpha[0] * (math.Exp(v) - 1)
} else if v > 0 {
return v
}
return 0
}
func eluDeriv(i, j int, v float64, alpha ...float64) float64 {
if v <= 0 {
return alpha[0] * math.Exp(v)
} else if v > 0 {
return 1
}
return 0
}
func leakyReLU(i, j int, v float64, alpha ...float64) float64 {
if v <= 0 {
return alpha[0] * v // slope * v
} else if v > 0 {
return v
}
return 0
}
func leakyReLUDeriv(i, j int, v float64, alpha ...float64) float64 {
if v <= 0 {
return alpha[0] // slope
} else if v > 0 {
return 1
}
return 0
}
// alpha[0] is the alpha
// alpha[1] is the scale
func selu(i, j int, v float64, alpha ...float64) float64 {
scale := alpha[1]
if v <= 0 {
return scale * alpha[0] * (math.Exp(v) - 1)
} else if v > 0 {
return scale * v
}
return 0
}
// alpha[0] is the alpha
// alpha[1] is the scale
func seluDeriv(i, j int, v float64, alpha ...float64) float64 {
scale := alpha[1]
if v <= 0 {
return scale * alpha[0] * math.Exp(v)
} else if v > 0 {
return scale
}
return 0
}
func softPlus(i, j int, v float64, alpha ...float64) float64 {
threshold := alpha[1]
beta := alpha[0]
if v <= threshold {
return (1 / beta) * math.Log(1+math.Exp(beta*v))
} else if v > threshold {
return v
}
return 0
}
func softPlusDeriv(i, j int, v float64, alpha ...float64) float64 {
threshold := alpha[1]
beta := alpha[0]
if v <= threshold {
return math.Exp(v*beta) / (math.Exp(v*beta) + 1)
} else if v > threshold {
return 1
}
return 0
}
func softShrink(i, j int, v float64, alpha ...float64) float64 {
lambda := alpha[0]
if v < -lambda {
return v + lambda
} else if v > lambda {
return v - lambda
}
return 0
}
func softShrinkDeriv(i, j int, v float64, alpha ...float64) float64 {
lambda := alpha[0]
if v < -lambda {
return 1
} else if v > lambda {
return 1
}
return 0
}
func threshold(i, j int, v float64, alpha ...float64) float64 {
value := alpha[1]
threshold := alpha[0]
if v <= threshold {
return value
} else if v > threshold {
return v
}
return 0
}
func thresholdDeriv(i, j int, v float64, alpha ...float64) float64 {
threshold := alpha[0]
if v <= threshold {
return 0
} else if v > threshold {
return 1
}
return 0
}
func swish(i, j int, v float64, beta ...float64) float64 {
return v * (1.0 / (1 + math.Exp(beta[0]*-v)))
}
func swishDeriv(i, j int, v float64, beta ...float64) float64 {
prod := v * beta[0]
exp := math.Exp(prod)
return exp * (exp + prod + 1) / ((exp + 1) * (exp + 1))
}
func swishBetaDeriv(v float64, beta float64) float64 {
prod := v * beta
exp := math.Exp(-prod)
return (v * v * exp) / ((exp + 1) * (exp + 1))
}
// Reference: "Mish: A Self Regularized Non-Monotonic Neural Activation Function" by <NAME>, 2019.
// (https://arxiv.org/pdf/1908.08681.pdf)
func mish(i, j int, v float64) float64 {
return v * math.Tanh(math.Log(1+math.Exp(v)))
}
func mishDeriv(i, j int, v float64) float64 {
exp := math.Exp(v)
exp2 := math.Exp(2 * v)
exp3 := math.Exp(3 * v)
omega := 4.0*(v+1.0) + 4.0*exp2 + exp3 + exp*(4.0*v+6.0)
delta := 2*exp + exp2 + 2.0
return exp * (omega / (delta * delta))
}
func gelu(i, j int, v float64) float64 {
return 0.5 * v * (1.0 + math.Tanh(math.Sqrt(2/math.Pi)*(v+0.044715*math.Pow(v, 3.0))))
}
func geluDeriv(i, j int, x float64) float64 {
x3 := math.Pow(x, 3)
return 0.5*math.Tanh(0.0356774*x3+0.797885*x) +
(0.0535161*x3+0.398942*x)*
math.Pow(1.0/math.Cosh(0.0356774*x3+0.797885*x), 2) + 0.5
} | pkg/ml/ag/fn/misc.go | 0.7874 | 0.565479 | misc.go | starcoder |
package namegen
// NameGenerator is a set of names to use
type NameGenerator struct {
MaleFirstNames []string
FemaleFirstNames []string
LastNames []string
}
// NameGeneratorFromType sets up types of names
func NameGeneratorFromType(origin, gender string) NameGenerator {
nameGenerators := map[string]NameGenerator{
"anglosaxon": {anglosaxonMaleFirstNames, anglosaxonFemaleFirstNames, anglosaxonLastNames},
"dutch": {dutchMaleFirstNames, dutchFemaleFirstNames, dutchLastNames},
"dwarf": {dwarfMaleFirstNames, dwarfFemaleFirstNames, getDwarfLastNames(gender)},
"elf": {elfMaleFirstNames, elfFemaleFirstNames, elfLastNames},
"english": {englishMaleFirstNames, englishFemaleFirstNames, englishLastNames},
"estonian": {estonianMaleFirstNames, estonianFemaleFirstNames, estonianLastNames},
"fantasy": {fantasyMaleFirstNames, fantasyFemaleFirstNames, fantasyLastNames},
"finnish": {finnishMaleFistNames, finnishFemaleFirstNames, finnishLastNames},
"german": {germanMaleFirstNames, germanFemaleFirstNames, germanLastNames},
"greek": {greekMaleFirstNames, greekFemaleFirstNames, greekLastNames},
"hindu": {hinduMaleFirstNames, hinduFemaleFirstNames, hinduLastNames},
"icelandic": {getIcelandicFirstNames(), getIcelandicFirstNames(), getIcelandicLastNames(gender)},
"indonesian": {indonesianMaleFirstNames, indonesianFemaleFirstNames, indonesianLastNames},
"italian": {italianMaleFirstNames, italianFemaleFirstNames, italianLastNames},
"japanese": {japaneseMaleFirstNames, japaneseFemaleFirstNames, japaneseLastNames},
"korean": {koreanMaleFirstNames, koreanFemaleFirstNames, koreanLastNames},
"nepalese": {nepaleseMaleFirstNames, nepaleseFemaleFirstNames, nepaleseLastNames},
"norwegian": {norwegianMaleFirstNames, norwegianFemaleFirstNames, norwegianLastNames},
"portuguese": {portugueseMaleFirstNames, portugueseFemaleFirstNames, portugueseLastNames},
"russian": {russianMaleFirstNames, russianFemaleFirstNames, russianLastNames},
"spanish": {spanishMaleFirstNames, spanishFemaleFirstNames, spanishLastNames},
"swedish": {swedishMaleFirstNames, swedishFemaleFirstNames, swedishLastNames},
"thai": {thaiMaleFirstNames, thaiFemaleFirstNames, thaiLastNames},
}
return nameGenerators[origin]
}
// LastName returns a last name
func (gen NameGenerator) LastName() (string, error) {
return RandomItem(gen.LastNames)
}
// FirstName returns a first name
func (gen NameGenerator) FirstName(gender string) (string, error) {
firstNames := gen.MaleFirstNames
if gender == "female" {
firstNames = gen.FemaleFirstNames
} else if gender == "both" {
firstNames = append(firstNames, gen.FemaleFirstNames...)
}
return RandomItem(firstNames)
}
// CompleteName returns a complete name
func (gen NameGenerator) CompleteName(gender string) (string, error) {
firstName, err := gen.FirstName(gender)
if err != nil {
return "", err
}
lastName, err := gen.LastName()
if err != nil {
return "", err
}
fullname := firstName + " " + lastName
return fullname, nil
} | namegen.go | 0.526343 | 0.450964 | namegen.go | starcoder |
package tt
import (
"fmt"
"path"
"reflect"
"regexp"
"runtime"
"testing"
)
// isEqual returns whether val1 is equal to val2 taking into account Pointers, Interfaces and their underlying types
func isEqual(val1, val2 interface{}) bool {
v1 := reflect.ValueOf(val1)
v2 := reflect.ValueOf(val2)
if v1.Kind() == reflect.Ptr {
v1 = v1.Elem()
}
if v2.Kind() == reflect.Ptr {
v2 = v2.Elem()
}
if !v1.IsValid() && !v2.IsValid() {
return true
}
switch v1.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
if v1.IsNil() {
v1 = reflect.ValueOf(nil)
}
}
switch v2.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
if v2.IsNil() {
v2 = reflect.ValueOf(nil)
}
}
v1Underlying := reflect.Zero(reflect.TypeOf(v1)).Interface()
v2Underlying := reflect.Zero(reflect.TypeOf(v2)).Interface()
if v1 == v1Underlying {
if v2 == v2Underlying {
goto CASE4
} else {
goto CASE3
}
} else {
if v2 == v2Underlying {
goto CASE2
} else {
goto CASE1
}
}
CASE1:
return reflect.DeepEqual(v1.Interface(), v2.Interface())
CASE2:
return reflect.DeepEqual(v1.Interface(), v2)
CASE3:
return reflect.DeepEqual(v1, v2.Interface())
CASE4:
return reflect.DeepEqual(v1, v2)
}
// notMatchRegexSkip validates that value matches the regex, either string or *regex
// and throws an error with line number
// but the skip variable tells notMatchRegexSkip how far back on the stack to report the error.
// This is a building block to creating your own more complex validation functions.
func notMatchRegexSkip(t *testing.T, skip int, value string, regex interface{}) {
if r, ok, err := regexMatches(regex, value); ok || err != nil {
_, file, line, _ := runtime.Caller(skip)
if err != nil {
fmt.Printf("%s:%d %v error compiling regex %v\n", path.Base(file), line, value, r.String())
} else {
fmt.Printf("%s:%d %v matches regex %v\n", path.Base(file), line, value, r.String())
}
t.FailNow()
}
}
// matchRegexSkip validates that value matches the regex, either string or *regex
// and throws an error with line number
// but the skip variable tells matchRegexSkip how far back on the stack to report the error.
// This is a building block to creating your own more complex validation functions.
func matchRegexSkip(t *testing.T, skip int, value string, regex interface{}) {
if r, ok, err := regexMatches(regex, value); !ok {
_, file, line, _ := runtime.Caller(skip)
if err != nil {
fmt.Printf("%s:%d %v error compiling regex %v\n", path.Base(file), line, value, r.String())
} else {
fmt.Printf("%s:%d %v does not match regex %v\n", path.Base(file), line, value, r.String())
}
t.FailNow()
}
}
func regexMatches(regex interface{}, value string) (*regexp.Regexp, bool, error) {
var err error
r, ok := regex.(*regexp.Regexp)
// must be a string
if !ok {
if r, err = regexp.Compile(regex.(string)); err != nil {
return r, false, err
}
}
return r, r.MatchString(value), err
}
// equalSkip validates that val1 is equal to val2 and throws an error with line number
// but the skip variable tells equalSkip how far back on the stack to report the error.
// This is a building block to creating your own more complex validation functions.
func equalSkip(t *testing.T, skip int, expected, actual interface{}) {
if !isEqual(expected, actual) {
_, file, line, _ := runtime.Caller(skip)
fmt.Printf("%s:%d %v does not equal %v\n", path.Base(file), line, expected, actual)
t.FailNow()
}
}
// notEqualSkip validates that val1 is not equal to val2 and throws an error with line number
// but the skip variable tells notEqualSkip how far back on the stack to report the error.
// This is a building block to creating your own more complex validation functions.
func notEqualSkip(t *testing.T, skip int, unexpected, actual interface{}) {
if isEqual(unexpected, actual) {
_, file, line, _ := runtime.Caller(skip)
fmt.Printf("%s:%d %v should not be equal %v\n", path.Base(file), line, unexpected, actual)
t.FailNow()
}
}
func panicSkip(t *testing.T, skip int, fn func()) {
_, file, line, _ := runtime.Caller(skip)
defer func() {
if r := recover(); r == nil {
fmt.Printf("%s:%d Panic Expected, none found", path.Base(file), line)
t.FailNow()
}
}()
fn()
}
// panicMatchesSkip validates that the panic output of running fn matches the supplied string
// but the skip variable tells panicMatchesSkip how far back on the stack to report the error.
// This is a building block to creating your own more complex validation functions.
func panicMatchesSkip(t *testing.T, skip int, fn func(), matches string) {
_, file, line, _ := runtime.Caller(skip)
defer func() {
if r := recover(); r != nil {
err := fmt.Sprintf("%s", r)
if err != matches {
fmt.Printf("%s:%d Panic... expected [%s] received [%s]", path.Base(file), line, matches, err)
t.FailNow()
}
} else {
fmt.Printf("%s:%d Panic Expected, none found... expected [%s]", path.Base(file), line, matches)
t.FailNow()
}
}()
fn()
}
func getError(layer int, reasonFormat string, v ...interface{}) string {
_, file, line, _ := runtime.Caller(layer + 1)
return fmt.Sprintf("%s:%d %s", file, line, fmt.Sprintf(reasonFormat, v...))
}
func inMapSkip(t *testing.T, layer int, m interface{}, key interface{}) {
v := reflect.ValueOf(m)
if v.Kind() != reflect.Map {
fmt.Println(getError(layer, "argument passed is not a map"))
t.FailNow()
}
for _, k := range v.MapKeys() {
if k.Interface() == key {
return
}
}
fmt.Println(getError(layer, "key %v does not in map %#+v", key, m))
t.FailNow()
} | utils.go | 0.590543 | 0.418697 | utils.go | starcoder |
package measurements
import (
"fmt"
"math"
"sync"
)
// SimpleExponentialMovingAverage implements a simple exponential moving average
// this implementation only uses a single alpha value to determine warm-up time and provides a mean
// approximation
type SimpleExponentialMovingAverage struct {
alpha float64
initialAlpha float64
minSamples int
seenSamples int
value float64
mu sync.RWMutex
}
// NewSimpleExponentialMovingAverage creates a new simple moving average
func NewSimpleExponentialMovingAverage(
alpha float64,
) (*SimpleExponentialMovingAverage, error) {
if alpha < 0 || alpha > 1 {
return nil, fmt.Errorf("alpha must be [0, 1]")
}
minSamples := int(math.Trunc(math.Ceil(1 / alpha)))
return &SimpleExponentialMovingAverage{
alpha: alpha,
initialAlpha: alpha,
minSamples: minSamples,
}, nil
}
// Add a single sample and update the internal state.
// returns true if the internal state was updated, also return the current value.
func (m *SimpleExponentialMovingAverage) Add(value float64) (float64, bool) {
m.mu.Lock()
defer m.mu.Unlock()
return m.add(value)
}
func (m *SimpleExponentialMovingAverage) add(value float64) (float64, bool) {
changed := false
if m.seenSamples < m.minSamples {
m.seenSamples++
}
var alpha float64
if m.seenSamples >= m.minSamples {
alpha = m.alpha
} else {
alpha = 1 / float64(m.seenSamples)
}
newValue := (1-alpha)*m.value + alpha*value
if newValue != m.value {
changed = true
}
m.value = newValue
return m.value, changed
}
// Get the current value.
func (m *SimpleExponentialMovingAverage) Get() float64 {
m.mu.RLock()
defer m.mu.RUnlock()
return m.value
}
// Reset the internal state as if no samples were ever added.
func (m *SimpleExponentialMovingAverage) Reset() {
m.mu.Lock()
m.seenSamples = 0
m.value = 0
m.alpha = m.initialAlpha
m.mu.Unlock()
}
// Update will update the value given an operation function
func (m *SimpleExponentialMovingAverage) Update(operation func(value float64) float64) {
m.mu.Lock()
defer m.mu.Unlock()
newValue, _ := m.add(m.value)
m.value = operation(newValue)
} | measurements/moving_average.go | 0.879755 | 0.494568 | moving_average.go | starcoder |
package p336
/**
Given a list of unique words, find all pairs of distinct indices (i, j) in the given list, so that the concatenation of the two words, i.e. words[i] + words[j] is a palindrome.
Example 1:
Given words = ["bat", "tab", "cat"]
Return [[0, 1], [1, 0]]
The palindromes are ["battab", "tabbat"]
Example 2:
Given words = ["abcd", "dcba", "lls", "s", "sssll"]
Return [[0, 1], [1, 0], [3, 2], [2, 4]]
The palindromes are ["dcbaabcd", "abcddcba", "slls", "llssssll"]
*/
type trieNode struct {
word int
children [26]*trieNode
}
type Trie struct {
root *trieNode
}
func (n *trieNode) dfsFind(s []byte) []int {
res := make([]int, 0)
if n.word > 0 && isPalindrome(s) {
res = append(res, n.word-1)
}
for i := byte(0); i < 26; i++ {
if n.children[i] != nil {
res = append(res, n.children[i].dfsFind(append(s, 'a'+i))...)
}
}
return res
}
func (t *Trie) insert(word []byte, ix int) {
cur := t.root
for i := 0; i < len(word); i++ {
v := word[i] - 'a'
if cur.children[v] == nil {
cur.children[v] = new(trieNode)
}
cur = cur.children[v]
}
cur.word = ix
}
func (t *Trie) search(word []byte) []int {
ix := 0
node := t.root
res := make([]int, 0)
for ix < len(word) && node != nil {
if node.word > 0 && isPalindrome(word[ix:]) {
res = append(res, node.word-1)
}
if next := node.children[word[ix]-'a']; next != nil {
ix++
node = next
} else {
node = nil
break
}
}
if ix == len(word) && node != nil {
res = append(res, node.dfsFind([]byte{})...)
}
return res
}
func isPalindrome(w []byte) bool {
i, j := 0, len(w)-1
for i <= j {
if w[i] == w[j] {
i++
j--
} else {
break
}
}
return i > j
}
func reverse(s string) []byte {
res := make([]byte, len(s))
i, j := 0, len(s)-1
for i <= j {
res[i] = s[j]
res[j] = s[i]
i++
j--
}
return res
}
func palindromePairs(words []string) [][]int {
trie := Trie{root: &trieNode{}}
for i, v := range words {
trie.insert(reverse(v), i+1)
}
res := make([][]int, 0)
for i, v := range words {
pairs := trie.search([]byte(v))
for _, p := range pairs {
if p != i {
res = append(res, []int{i, p})
}
}
}
return res
} | algorithms/p336/336.go | 0.779616 | 0.474996 | 336.go | starcoder |
package yamlpath
import (
"errors"
"strings"
"unicode/utf8"
"github.com/dprotaso/go-yit"
"gopkg.in/yaml.v3"
)
// Path is a compiled YAML path expression.
type Path struct {
f func(node, root *yaml.Node) yit.Iterator
}
// Find applies the Path to a YAML node and returns the addresses of the subnodes which match the Path.
func (p *Path) Find(node *yaml.Node) ([]*yaml.Node, error) {
return p.find(node, node), nil // currently, errors are not possible
}
func (p *Path) find(node, root *yaml.Node) []*yaml.Node {
return p.f(node, root).ToArray()
}
// NewPath constructs a Path from a string expression.
func NewPath(path string) (*Path, error) {
return newPath(lex("Path lexer", path))
}
func newPath(l *lexer) (*Path, error) {
lx := l.nextLexeme()
switch lx.typ {
case lexemeError:
return nil, errors.New(lx.val)
case lexemeIdentity, lexemeEOF:
return new(identity), nil
case lexemeRoot:
subPath, err := newPath(l)
if err != nil {
return nil, err
}
return new(func(node, root *yaml.Node) yit.Iterator {
if node.Kind == yaml.DocumentNode {
node = node.Content[0]
}
return compose(yit.FromNode(node), subPath, root)
}), nil
case lexemeRecursiveDescent:
subPath, err := newPath(l)
if err != nil {
return nil, err
}
childName := strings.TrimPrefix(lx.val, "..")
switch childName {
case "*":
// includes all nodes, not just mapping nodes
return new(func(node, root *yaml.Node) yit.Iterator {
return compose(yit.FromNode(node).RecurseNodes(), allChildrenThen(subPath), root)
}), nil
case "":
return new(func(node, root *yaml.Node) yit.Iterator {
return compose(yit.FromNode(node).RecurseNodes(), subPath, root)
}), nil
default:
return new(func(node, root *yaml.Node) yit.Iterator {
return compose(yit.FromNode(node).RecurseNodes(), childThen(childName, subPath), root)
}), nil
}
case lexemeDotChild:
subPath, err := newPath(l)
if err != nil {
return nil, err
}
childName := strings.TrimPrefix(lx.val, ".")
return childThen(childName, subPath), nil
case lexemeUndottedChild:
subPath, err := newPath(l)
if err != nil {
return nil, err
}
return childThen(lx.val, subPath), nil
case lexemeBracketChild:
subPath, err := newPath(l)
if err != nil {
return nil, err
}
childNames := strings.TrimSpace(lx.val)
childNames = strings.TrimSuffix(strings.TrimPrefix(childNames, "["), "]")
childNames = strings.TrimSpace(childNames)
return bracketChildThen(childNames, subPath), nil
case lexemeArraySubscript:
subPath, err := newPath(l)
if err != nil {
return nil, err
}
subscript := strings.TrimSuffix(strings.TrimPrefix(lx.val, "["), "]")
return arraySubscriptThen(subscript, subPath), nil
case lexemeFilterBegin:
filterLexemes := []lexeme{}
filterNestingLevel := 1
f:
for {
lx := l.nextLexeme()
switch lx.typ {
case lexemeFilterBegin:
filterNestingLevel++
case lexemeFilterEnd:
filterNestingLevel--
if filterNestingLevel == 0 {
break f
}
case lexemeError:
return nil, errors.New(lx.val)
case lexemeEOF:
// should never happen as lexer should have detected an error
return nil, errors.New("missing end of filter")
}
filterLexemes = append(filterLexemes, lx)
}
subPath, err := newPath(l)
if err != nil {
return nil, err
}
return filterThen(filterLexemes, subPath), nil
}
return nil, errors.New("invalid path syntax")
}
func identity(node, root *yaml.Node) yit.Iterator {
if node.Kind == 0 {
return yit.FromNodes()
}
return yit.FromNode(node)
}
func empty(node, root *yaml.Node) yit.Iterator {
return yit.FromNodes()
}
func compose(i yit.Iterator, p *Path, root *yaml.Node) yit.Iterator {
its := []yit.Iterator{}
for a, ok := i(); ok; a, ok = i() {
its = append(its, p.f(a, root))
}
return yit.FromIterators(its...)
}
func new(f func(node, root *yaml.Node) yit.Iterator) *Path {
return &Path{f: f}
}
func childThen(childName string, p *Path) *Path {
if childName == "*" {
return allChildrenThen(p)
}
childName = unescape(childName)
return new(func(node, root *yaml.Node) yit.Iterator {
if node.Kind != yaml.MappingNode {
return empty(node, root)
}
for i, n := range node.Content {
if i%2 == 0 && n.Value == childName {
return compose(yit.FromNode(node.Content[i+1]), p, root)
}
}
return empty(node, root)
})
}
func bracketChildNames(childNames string) []string {
s := strings.Split(childNames, ",")
// reconstitute child names with embedded commas
children := []string{}
accum := ""
for _, c := range s {
if balanced(c, '\'') && balanced(c, '"') {
if accum != "" {
accum += "," + c
} else {
children = append(children, c)
accum = ""
}
} else {
if accum == "" {
accum = c
} else {
accum += "," + c
children = append(children, accum)
accum = ""
}
}
}
if accum != "" {
children = append(children, accum)
}
unquotedChildren := []string{}
for _, c := range children {
c = strings.TrimSpace(c)
if strings.HasPrefix(c, "'") {
c = strings.TrimSuffix(strings.TrimPrefix(c, "'"), "'")
} else {
c = strings.TrimSuffix(strings.TrimPrefix(c, `"`), `"`)
}
c = unescape(c)
unquotedChildren = append(unquotedChildren, c)
}
return unquotedChildren
}
func balanced(c string, q rune) bool {
bal := true
prev := eof
for i := 0; i < len(c); {
rune, width := utf8.DecodeRuneInString(c[i:])
i += width
if rune == q {
if i > 0 && prev == '\\' {
prev = rune
continue
}
bal = !bal
}
prev = rune
}
return bal
}
func bracketChildThen(childNames string, p *Path) *Path {
unquotedChildren := bracketChildNames(childNames)
return new(func(node, root *yaml.Node) yit.Iterator {
if node.Kind != yaml.MappingNode {
return empty(node, root)
}
its := []yit.Iterator{}
for _, childName := range unquotedChildren {
for i, n := range node.Content {
if i%2 == 0 && n.Value == childName {
its = append(its, yit.FromNode(node.Content[i+1]))
}
}
}
return compose(yit.FromIterators(its...), p, root)
})
}
func unescape(raw string) string {
esc := ""
escaped := false
for i := 0; i < len(raw); {
rune, width := utf8.DecodeRuneInString(raw[i:])
i += width
if rune == '\\' {
if escaped {
esc += string(rune)
}
escaped = !escaped
continue
}
escaped = false
esc += string(rune)
}
return esc
}
func allChildrenThen(p *Path) *Path {
return new(func(node, root *yaml.Node) yit.Iterator {
switch node.Kind {
case yaml.MappingNode:
its := []yit.Iterator{}
for i, n := range node.Content {
if i%2 == 0 {
continue // skip child names
}
its = append(its, compose(yit.FromNode(n), p, root))
}
return yit.FromIterators(its...)
case yaml.SequenceNode:
its := []yit.Iterator{}
for i := 0; i < len(node.Content); i++ {
its = append(its, compose(yit.FromNode(node.Content[i]), p, root))
}
return yit.FromIterators(its...)
default:
return empty(node, root)
}
})
}
func arraySubscriptThen(subscript string, p *Path) *Path {
return new(func(node, root *yaml.Node) yit.Iterator {
if node.Kind == yaml.MappingNode && subscript == "*" {
its := []yit.Iterator{}
for i, n := range node.Content {
if i%2 == 0 {
continue // skip child names
}
its = append(its, compose(yit.FromNode(n), p, root))
}
return yit.FromIterators(its...)
}
if node.Kind != yaml.SequenceNode {
return empty(node, root)
}
slice, err := slice(subscript, len(node.Content))
if err != nil {
panic(err) // should not happen, lexer should have detected errors
}
its := []yit.Iterator{}
for _, s := range slice {
if s >= 0 && s < len(node.Content) {
its = append(its, compose(yit.FromNode(node.Content[s]), p, root))
}
}
return yit.FromIterators(its...)
})
}
func filterThen(filterLexemes []lexeme, p *Path) *Path {
filter := newFilter(newFilterNode(filterLexemes))
return new(func(node, root *yaml.Node) yit.Iterator {
if node.Kind != yaml.SequenceNode {
return empty(node, root)
}
its := []yit.Iterator{}
for _, c := range node.Content {
if filter(c, root) {
its = append(its, compose(yit.FromNode(c), p, root))
}
}
return yit.FromIterators(its...)
})
} | pkg/yamlpath/path.go | 0.631367 | 0.425128 | path.go | starcoder |
package cal
import (
"math"
"strconv"
"strings"
"time"
"github.com/kudrykv/latex-yearly-planner/app/components/header"
"github.com/kudrykv/latex-yearly-planner/app/components/hyper"
"github.com/kudrykv/latex-yearly-planner/app/tex"
)
type Weeks []*Week
type Week struct {
Days [7]Day
Weekday time.Weekday
Year *Year
Months Months
Quarters Quarters
}
func NewWeeksForMonth(wd time.Weekday, year *Year, qrtr *Quarter, month *Month) Weeks {
ptr := time.Date(year.Number, month.Month, 1, 0, 0, 0, 0, time.Local)
weekday := ptr.Weekday()
shift := (7 + weekday - wd) % 7
week := &Week{Weekday: wd, Year: year, Months: Months{month}, Quarters: Quarters{qrtr}}
for i := shift; i < 7; i++ {
week.Days[i] = Day{Time: ptr}
ptr = ptr.AddDate(0, 0, 1)
}
weeks := Weeks{}
weeks = append(weeks, week)
for ptr.Month() == month.Month {
week = &Week{Weekday: weekday, Year: year, Months: Months{month}, Quarters: Quarters{qrtr}}
for i := 0; i < 7; i++ {
if ptr.Month() != month.Month {
break
}
week.Days[i] = Day{ptr}
ptr = ptr.AddDate(0, 0, 1)
}
weeks = append(weeks, week)
}
return weeks
}
func NewWeeksForYear(wd time.Weekday, year *Year) Weeks {
ptr := selectStartWeek(year.Number, wd)
qrtr1 := NewQuarter(wd, year, 1)
mon1 := NewMonth(wd, year, qrtr1, time.January)
week := &Week{Weekday: wd, Year: year, Quarters: Quarters{qrtr1}, Months: Months{mon1}}
weeks := make(Weeks, 0, 53)
for i := 0; i < 7; i++ {
week.Days[i] = ptr
ptr = ptr.Add(1)
}
weeks = append(weeks, week)
for ptr.Time.Year() == year.Number {
weeks = append(weeks, FillWeekly(wd, year, ptr))
ptr = ptr.Add(7)
}
weeks[len(weeks)-1].Quarters = weeks[len(weeks)-1].Quarters[:1]
weeks[len(weeks)-1].Months = weeks[len(weeks)-1].Months[:1]
return weeks
}
func FillWeekly(wd time.Weekday, year *Year, ptr Day) *Week {
qrtr := NewQuarter(wd, year, int(math.Ceil(float64(ptr.Time.Month())/3.)))
month := NewMonth(wd, year, qrtr, ptr.Time.Month())
week := &Week{Weekday: wd, Year: year, Quarters: Quarters{qrtr}, Months: Months{month}}
for i := 0; i < 7; i++ {
week.Days[i] = ptr
ptr = ptr.Add(1)
}
if week.quarterOverlap() {
qrtr = NewQuarter(wd, year, week.rightQuarter())
week.Quarters = append(week.Quarters, qrtr)
}
if week.MonthOverlap() {
month = NewMonth(wd, year, qrtr, week.rightMonth())
week.Months = append(week.Months, month)
}
return week
}
func selectStartWeek(year int, weekStart time.Weekday) Day {
soy := time.Date(year, time.January, 1, 0, 0, 0, 0, time.Local)
sow := soy
for sow.Weekday() != weekStart {
sow = sow.AddDate(0, 0, 1)
}
if sow.Year() == year && sow.Day() > 1 {
sow = sow.AddDate(0, 0, -7)
}
return Day{Time: sow}
}
func (w *Week) WeekNumber(large interface{}) string {
wn := w.weekNumber()
larg, _ := large.(bool)
itoa := strconv.Itoa(wn)
ref := w.ref()
if !larg {
return hyper.Link(ref, itoa)
}
text := `\rotatebox[origin=tr]{90}{\makebox[\myLenMonthlyCellHeight][c]{Week ` + itoa + `}}`
return hyper.Link(ref, text)
}
func (w *Week) weekNumber() int {
_, wn := w.Days[0].Time.ISOWeek()
for _, t := range w.Days {
if _, cwn := t.Time.ISOWeek(); !t.Time.IsZero() && cwn != wn {
return cwn
}
}
return wn
}
func (w *Week) Extra() header.Items {
return header.Items{
header.NewTextItem("Week " + strconv.Itoa(w.weekNumber())),
}
}
func (w *Week) Breadcrumb() string {
return header.Items{
header.NewIntItem(w.Year.Number),
w.QuartersBreadcrumb(),
w.MonthsBreadcrumb(),
header.NewTextItem("Week " + strconv.Itoa(w.weekNumber())).RefText(w.ref()).Ref(true),
}.Table(true)
}
func (w *Week) MonthOverlap() bool {
return w.Days[0].Time.Month() != w.Days[6].Time.Month()
}
func (w *Week) quarterOverlap() bool {
return w.leftQuarter() != w.rightQuarter()
}
func (w *Week) leftQuarter() int {
return int(math.Ceil(float64(w.Days[0].Time.Month()) / 3.))
}
func (w *Week) rightQuarter() int {
return int(math.Ceil(float64(w.Days[6].Time.Month()) / 3.))
}
func (w *Week) rightMonth() time.Month {
for i := 6; i >= 0; i-- {
if w.Days[i].Time.IsZero() {
continue
}
return w.Days[i].Time.Month()
}
return -1
}
func (w *Week) PrevNext() header.Items {
items := header.Items{}
if w.PrevExists() {
wn := w.Prev().weekNumber()
items = append(items, header.NewTextItem("Week "+strconv.Itoa(wn)))
}
if w.NextExists() {
wn := w.Next().weekNumber()
items = append(items, header.NewTextItem("Week "+strconv.Itoa(wn)))
}
return items
}
func (w *Week) NextExists() bool {
stillThisYear := w.Days[6].Time.Year() == w.Year.Number
isntTheLastDayOfTheYear := w.Days[0].Time.Month() != time.December || w.Days[0].Time.Day() != 31
return stillThisYear && isntTheLastDayOfTheYear
}
func (w *Week) PrevExists() bool {
stilThisYear := w.Days[0].Time.Year() == w.Year.Number
isntTheFirstDayOfTheYear := w.Days[0].Time.Month() != time.January || w.Days[0].Time.Day() != 1
return stilThisYear && isntTheFirstDayOfTheYear
}
func (w *Week) Next() *Week {
return FillWeekly(w.Weekday, w.Year, w.Days[0].Add(7))
}
func (w *Week) Prev() *Week {
return FillWeekly(w.Weekday, w.Year, w.Days[0].Add(-7))
}
func (w *Week) QuartersBreadcrumb() header.ItemsGroup {
group := header.ItemsGroup{}.Delim(" / ")
for _, quarter := range w.Quarters {
group.Items = append(group.Items, header.NewTextItem("Q"+strconv.Itoa(quarter.Number)))
}
return group
}
func (w *Week) MonthsBreadcrumb() header.ItemsGroup {
group := header.ItemsGroup{}.Delim(" / ")
for _, month := range w.Months {
group.Items = append(group.Items, header.NewMonthItem(month.Month))
}
return group
}
func (w *Week) ref() string {
prefix := ""
wn := w.weekNumber()
rm := w.rightMonth()
ry := w.rightYear()
if wn > 50 && rm == time.January && ry == w.Year.Number {
prefix = "fw"
}
return prefix + "Week " + strconv.Itoa(wn)
}
func (w *Week) leftMonth() time.Month {
for _, day := range w.Days {
if day.Time.IsZero() {
continue
}
return day.Time.Month()
}
return -1
}
func (w *Week) rightYear() int {
for i := 6; i >= 0; i-- {
if w.Days[i].Time.IsZero() {
continue
}
return w.Days[i].Time.Year()
}
return -1
}
func (w *Week) HeadingMOS() string {
var contents []string
if w.PrevExists() {
leftNavBox := tex.ResizeBoxW(`\myLenHeaderResizeBox`, `$\langle$`)
contents = append(contents, tex.Hyperlink(w.Prev().ref(), leftNavBox))
}
contents = append(contents, tex.ResizeBoxW(`\myLenHeaderResizeBox`, w.Target()))
if w.NextExists() {
rightNavBox := tex.ResizeBoxW(`\myLenHeaderResizeBox`, `$\rangle$`)
contents = append(contents, tex.Hyperlink(w.Next().ref(), rightNavBox))
}
return tex.Tabular("@{}"+strings.Repeat("l", len(contents)), strings.Join(contents, ` & `))
}
func (w *Week) Name() string {
return "Week " + strconv.Itoa(w.weekNumber())
}
func (w *Week) Target() string {
return tex.Hypertarget(w.ref(), w.Name())
} | app/components/cal/week.go | 0.579162 | 0.415314 | week.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.