code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package pure
import (
"context"
"fmt"
"time"
"github.com/benthosdev/benthos/v4/internal/bundle"
"github.com/benthosdev/benthos/v4/internal/component/input"
"github.com/benthosdev/benthos/v4/internal/component/input/processors"
"github.com/benthosdev/benthos/v4/internal/docs"
"github.com/benthosdev/benthos/v4/internal/log"
"github.com/benthosdev/benthos/v4/internal/message"
)
func init() {
err := bundle.AllInputs.Add(processors.WrapConstructor(func(c input.Config, nm bundle.NewManagement) (input.Streamed, error) {
if !nm.ProbeInput(c.Resource) {
return nil, fmt.Errorf("input resource '%v' was not found", c.Resource)
}
return &resourceInput{
mgr: nm,
name: c.Resource,
log: nm.Logger(),
}, nil
}), docs.ComponentSpec{
Name: "resource",
Summary: `Resource is an input type that channels messages from a resource input, identified by its name.`,
Description: `Resources allow you to tidy up deeply nested configs. For example, the config:
` + "```yaml" + `
input:
broker:
inputs:
- kafka:
addresses: [ TODO ]
topics: [ foo ]
consumer_group: foogroup
- gcp_pubsub:
project: bar
subscription: baz
` + "```" + `
Could also be expressed as:
` + "```yaml" + `
input:
broker:
inputs:
- resource: foo
- resource: bar
input_resources:
- label: foo
kafka:
addresses: [ TODO ]
topics: [ foo ]
consumer_group: foogroup
- label: bar
gcp_pubsub:
project: bar
subscription: baz
` + "```" + `
Resources also allow you to reference a single input in multiple places, such as multiple streams mode configs, or multiple entries in a broker input. However, when a resource is referenced more than once the messages it produces are distributed across those references, so each message will only be directed to a single reference, not all of them.
You can find out more about resources [in this document.](/docs/configuration/resources)`,
Categories: []string{
"Utility",
},
Config: docs.FieldString("", "").HasDefault(""),
})
if err != nil {
panic(err)
}
}
//------------------------------------------------------------------------------
type resourceInput struct {
mgr bundle.NewManagement
name string
log log.Modular
}
func (r *resourceInput) TransactionChan() (tChan <-chan message.Transaction) {
if err := r.mgr.AccessInput(context.Background(), r.name, func(i input.Streamed) {
tChan = i.TransactionChan()
}); err != nil {
r.log.Errorf("Failed to obtain input resource '%v': %v", r.name, err)
}
return
}
func (r *resourceInput) Connected() (isConnected bool) {
if err := r.mgr.AccessInput(context.Background(), r.name, func(i input.Streamed) {
isConnected = i.Connected()
}); err != nil {
r.log.Errorf("Failed to obtain input resource '%v': %v", r.name, err)
}
return
}
func (r *resourceInput) CloseAsync() {
}
func (r *resourceInput) WaitForClose(timeout time.Duration) error {
return nil
} | internal/impl/pure/input_resource.go | 0.538741 | 0.444927 | input_resource.go | starcoder |
package advent
var _ Problem = &sonorSweep{}
type sonorSweep struct {
dailyProblem
}
func NewSonorSweep() Problem {
return &sonorSweep{
dailyProblem{day: 1},
}
}
func (s *sonorSweep) Solve() interface{} {
input := IntsFromStrings(s.GetInputLines())
var results []int
results = append(results, s.countDepthIncreases(input))
results = append(results, s.count3WideDepthIncreases(input))
return results
}
/*
You're minding your own business on a ship at sea when the overboard alarm goes off! You rush to see if you can help. Apparently, one of the Elves tripped and accidentally sent the sleigh keys flying into the ocean!
Before you know it, you're inside a submarine the Elves keep ready for situations like this. It's covered in Christmas lights (because of course it is), and it even has an experimental antenna that should be able to track the keys if you can boost its signal strength high enough; there's a little meter that indicates the antenna's signal strength by displaying 0-50 stars.
Your instincts tell you that in order to save Christmas, you'll need to get all fifty stars by December 25th.
Collect stars by solving puzzles. Two puzzles will be made available on each day in the Advent calendar; the second puzzle is unlocked when you complete the first. Each puzzle grants one star. Good luck!
As the submarine drops below the surface of the ocean, it automatically performs a sonar sweep of the nearby sea floor. On a small screen, the sonar sweep report (your puzzle input) appears: each line is a measurement of the sea floor depth as the sweep looks further and further away from the submarine.
For example, suppose you had the following report:
199
200
208
210
200
207
240
269
260
263
This report indicates that, scanning outward from the submarine, the sonar sweep found depths of 199, 200, 208, 210, and so on.
The first order of business is to figure out how quickly the depth increases, just so you know what you're dealing with - you never know if the keys will get carried into deeper water by an ocean current or a fish or something.
To do this, count the number of times a depth measurement increases from the previous measurement. (There is no measurement before the first measurement.) In the example above, the changes are as follows:
199 (N/A - no previous measurement)
200 (increased)
208 (increased)
210 (increased)
200 (decreased)
207 (increased)
240 (increased)
269 (increased)
260 (decreased)
263 (increased)
In this example, there are 7 measurements that are larger than the previous measurement.
How many measurements are larger than the previous measurement?
*/
func (s *sonorSweep) countDepthIncreases(input []int) (increases int) {
if len(input) < 2 {
return //can't have an increase with one or zero elements
}
for i := 1; i < len(input); i++ {
if input[i] > input[i-1] {
increases++
}
}
return
}
/*
Considering every single measurement isn't as useful as you expected: there's just too much noise in the data.
Instead, consider sums of a three-measurement sliding window. Again considering the above example:
199 A
200 A B
208 A B C
210 B C D
200 E C D
207 E F D
240 E F G
269 F G H
260 G H
263 H
Start by comparing the first and second three-measurement windows. The measurements in the first window are marked A (199, 200, 208); their sum is 199 + 200 + 208 = 607. The second window is marked B (200, 208, 210); its sum is 618. The sum of measurements in the second window is larger than the sum of the first, so this first comparison increased.
Your goal now is to count the number of times the sum of measurements in this sliding window increases from the previous sum. So, compare A with B, then compare B with C, then C with D, and so on. Stop when there aren't enough measurements left to create a new three-measurement sum.
In the above example, the sum of each three-measurement window is as follows:
A: 607 (N/A - no previous sum)
B: 618 (increased)
C: 618 (no change)
D: 617 (decreased)
E: 647 (increased)
F: 716 (increased)
G: 769 (increased)
H: 792 (increased)
In this example, there are 5 sums that are larger than the previous sum.
Consider sums of a three-measurement sliding window. How many sums are larger than the previous sum?
*/
func (s *sonorSweep) count3WideDepthIncreases(input []int) (increases int) {
if len(input) < 4 {
return //similar situation as above
}
summedDepth := input[0] + input[1] + input[2]
nextSummedDepth := 0
for i := 3; i < len(input); i++ {
nextSummedDepth = summedDepth + input[i] - input[i-3]
if nextSummedDepth > summedDepth {
increases++
}
summedDepth = nextSummedDepth
}
return
} | internal/advent/day1.go | 0.733261 | 0.473779 | day1.go | starcoder |
package influxql
import (
"fmt"
)
var Language = &ParseTree{}
type ParseTree struct {
Handlers map[Token]func(*Parser) (Statement, error)
Tokens map[Token]*ParseTree
Keys []string
}
// With passes the current parse tree to a function to allow nested functions.
func (t *ParseTree) With(fn func(*ParseTree)) {
fn(t)
}
// Group groups together a set of related handlers with a common token prefix.
func (t *ParseTree) Group(tokens ...Token) *ParseTree {
for _, tok := range tokens {
// Look for the parse tree for this token.
if subtree := t.Tokens[tok]; subtree != nil {
t = subtree
continue
}
// No subtree exists yet. Verify that we don't have a conflicting
// statement.
if _, conflict := t.Handlers[tok]; conflict {
panic(fmt.Sprintf("conflict for token %s", tok))
}
// Create the new parse tree and register it inside of this one for
// later reference.
newT := &ParseTree{}
if t.Tokens == nil {
t.Tokens = make(map[Token]*ParseTree)
}
t.Tokens[tok] = newT
t.Keys = append(t.Keys, tok.String())
t = newT
}
return t
}
// Handle registers a handler to be invoked when seeing the given token.
func (t *ParseTree) Handle(tok Token, fn func(*Parser) (Statement, error)) {
// Verify that there is no conflict for this token in this parse tree.
if _, conflict := t.Tokens[tok]; conflict {
panic(fmt.Sprintf("conflict for token %s", tok))
}
if _, conflict := t.Handlers[tok]; conflict {
panic(fmt.Sprintf("conflict for token %s", tok))
}
if t.Handlers == nil {
t.Handlers = make(map[Token]func(*Parser) (Statement, error))
}
t.Handlers[tok] = fn
t.Keys = append(t.Keys, tok.String())
}
// Parse parses a statement using the language defined in the parse tree.
func (t *ParseTree) Parse(p *Parser) (Statement, error) {
for {
tok, pos, lit := p.ScanIgnoreWhitespace()
if subtree := t.Tokens[tok]; subtree != nil {
t = subtree
continue
}
if stmt := t.Handlers[tok]; stmt != nil {
return stmt(p)
}
// There were no registered handlers. Return the valid tokens in the order they were added.
return nil, newParseError(tokstr(tok, lit), t.Keys, pos)
}
}
func (t *ParseTree) Clone() *ParseTree {
newT := &ParseTree{}
if t.Handlers != nil {
newT.Handlers = make(map[Token]func(*Parser) (Statement, error), len(t.Handlers))
for tok, handler := range t.Handlers {
newT.Handlers[tok] = handler
}
}
if t.Tokens != nil {
newT.Tokens = make(map[Token]*ParseTree, len(t.Tokens))
for tok, subtree := range t.Tokens {
newT.Tokens[tok] = subtree.Clone()
}
}
return newT
}
func init() {
Language.Handle(SELECT, func(p *Parser) (Statement, error) {
return p.parseSelectStatement(targetNotRequired)
})
Language.Handle(DELETE, func(p *Parser) (Statement, error) {
return p.parseDeleteStatement()
})
Language.Group(SHOW).With(func(show *ParseTree) {
show.Group(CONTINUOUS).Handle(QUERIES, func(p *Parser) (Statement, error) {
return p.parseShowContinuousQueriesStatement()
})
show.Handle(DATABASES, func(p *Parser) (Statement, error) {
return p.parseShowDatabasesStatement()
})
show.Handle(SERVERS, func(p *Parser) (Statement, error) {
return p.parseShowServersStatement()
})
show.Handle(DIAGNOSTICS, func(p *Parser) (Statement, error) {
return p.parseShowDiagnosticsStatement()
})
show.Group(FIELD).With(func(field *ParseTree) {
field.Handle(KEY, func(p *Parser) (Statement, error) {
return p.parseShowFieldKeyCardinalityStatement()
})
field.Handle(KEYS, func(p *Parser) (Statement, error) {
return p.parseShowFieldKeysStatement()
})
})
show.Group(GRANTS).Handle(FOR, func(p *Parser) (Statement, error) {
return p.parseGrantsForUserStatement()
})
show.Group(MEASUREMENT).Handle(EXACT, func(p *Parser) (Statement, error) {
return p.parseShowMeasurementCardinalityStatement(true)
})
show.Group(MEASUREMENT).Handle(CARDINALITY, func(p *Parser) (Statement, error) {
return p.parseShowMeasurementCardinalityStatement(false)
})
show.Handle(MEASUREMENTS, func(p *Parser) (Statement, error) {
return p.parseShowMeasurementsStatement()
})
show.Handle(QUERIES, func(p *Parser) (Statement, error) {
return p.parseShowQueriesStatement()
})
show.Group(RETENTION).Handle(POLICIES, func(p *Parser) (Statement, error) {
return p.parseShowRetentionPoliciesStatement()
})
show.Handle(SERIES, func(p *Parser) (Statement, error) {
return p.parseShowSeriesStatement()
})
show.Group(SHARD).Handle(GROUPS, func(p *Parser) (Statement, error) {
return p.parseShowShardGroupsStatement()
})
show.Handle(SHARDS, func(p *Parser) (Statement, error) {
return p.parseShowShardsStatement()
})
show.Handle(STATS, func(p *Parser) (Statement, error) {
return p.parseShowStatsStatement()
})
show.Handle(SUBSCRIPTIONS, func(p *Parser) (Statement, error) {
return p.parseShowSubscriptionsStatement()
})
show.Group(TAG).With(func(tag *ParseTree) {
tag.Handle(KEY, func(p *Parser) (Statement, error) {
return p.parseShowTagKeyCardinalityStatement()
})
tag.Handle(KEYS, func(p *Parser) (Statement, error) {
return p.parseShowTagKeysStatement()
})
tag.Handle(VALUES, func(p *Parser) (Statement, error) {
return p.parseShowTagValuesStatement()
})
})
show.Handle(USERS, func(p *Parser) (Statement, error) {
return p.parseShowUsersStatement()
})
})
Language.Group(CREATE).With(func(create *ParseTree) {
create.Group(CONTINUOUS).Handle(QUERY, func(p *Parser) (Statement, error) {
return p.parseCreateContinuousQueryStatement()
})
create.Handle(DATABASE, func(p *Parser) (Statement, error) {
return p.parseCreateDatabaseStatement()
})
create.Handle(USER, func(p *Parser) (Statement, error) {
return p.parseCreateUserStatement()
})
create.Group(RETENTION).Handle(POLICY, func(p *Parser) (Statement, error) {
return p.parseCreateRetentionPolicyStatement()
})
create.Handle(SUBSCRIPTION, func(p *Parser) (Statement, error) {
return p.parseCreateSubscriptionStatement()
})
})
Language.Group(DROP).With(func(drop *ParseTree) {
drop.Group(CONTINUOUS).Handle(QUERY, func(p *Parser) (Statement, error) {
return p.parseDropContinuousQueryStatement()
})
drop.Handle(DATABASE, func(p *Parser) (Statement, error) {
return p.parseDropDatabaseStatement()
})
drop.Handle(MEASUREMENT, func(p *Parser) (Statement, error) {
return p.parseDropMeasurementStatement()
})
drop.Group(RETENTION).Handle(POLICY, func(p *Parser) (Statement, error) {
return p.parseDropRetentionPolicyStatement()
})
drop.Handle(SERIES, func(p *Parser) (Statement, error) {
return p.parseDropSeriesStatement()
})
drop.Handle(SHARD, func(p *Parser) (Statement, error) {
return p.parseDropShardStatement()
})
drop.Handle(SUBSCRIPTION, func(p *Parser) (Statement, error) {
return p.parseDropSubscriptionStatement()
})
drop.Handle(USER, func(p *Parser) (Statement, error) {
return p.parseDropUserStatement()
})
})
Language.Handle(EXPLAIN, func(p *Parser) (Statement, error) {
return p.parseExplainStatement()
})
Language.Handle(GRANT, func(p *Parser) (Statement, error) {
return p.parseGrantStatement()
})
Language.Handle(REVOKE, func(p *Parser) (Statement, error) {
return p.parseRevokeStatement()
})
Language.Group(ALTER, RETENTION).Handle(POLICY, func(p *Parser) (Statement, error) {
return p.parseAlterRetentionPolicyStatement()
})
Language.Group(SET, PASSWORD).Handle(FOR, func(p *Parser) (Statement, error) {
return p.parseSetPasswordUserStatement()
})
Language.Group(KILL).Handle(QUERY, func(p *Parser) (Statement, error) {
return p.parseKillQueryStatement()
})
} | services/influxql/parse_tree.go | 0.664105 | 0.423995 | parse_tree.go | starcoder |
package iso20022
// Cash movements from or to a fund as a result of investment funds transactions, eg, subscriptions or redemptions.
type FundCashForecast7 struct {
// Unique technical identifier for an instance of a fund cash forecast within a fund cash forecast report as assigned by the issuer of the report.
Identification *Max35Text `xml:"Id"`
// Date and, if required, the time, at which the price has been applied.
TradeDateTime *DateAndDateTimeChoice `xml:"TradDtTm"`
// Previous date and time at which the price was applied.
PreviousTradeDateTime *DateAndDateTimeChoice `xml:"PrvsTradDtTm,omitempty"`
// Investment fund class to which a cash flow is related.
FinancialInstrumentDetails *FinancialInstrument9 `xml:"FinInstrmDtls"`
// Total value of all the holdings, less the fund's liabilities, attributable to a specific investment fund class.
TotalNAV []*ActiveOrHistoricCurrencyAndAmount `xml:"TtlNAV,omitempty"`
// Previous value of all the holdings, less the fund's liabilities, attributable to a specific investment fund class.
PreviousTotalNAV []*ActiveOrHistoricCurrencyAndAmount `xml:"PrvsTtlNAV,omitempty"`
// Total number of investment fund class units that have been issued.
TotalUnitsNumber *FinancialInstrumentQuantity1 `xml:"TtlUnitsNb,omitempty"`
// Previous total number of investment fund class units that have been issued.
PreviousTotalUnitsNumber *FinancialInstrumentQuantity1 `xml:"PrvsTtlUnitsNb,omitempty"`
// Rate of change of the net asset value.
TotalNAVChangeRate *PercentageRate `xml:"TtlNAVChngRate,omitempty"`
// Currency of the investment fund class.
InvestmentCurrency []*ActiveOrHistoricCurrencyCode `xml:"InvstmtCcy,omitempty"`
// Information about the designation of the share class currency, that is, whether it is for onshore or offshore purposes and other information that may be required. This is typically only required for CNY funds.
CurrencyStatus *CurrencyDesignation1 `xml:"CcySts,omitempty"`
// Indicates whether the net cash flow is exceptional.
ExceptionalNetCashFlowIndicator *YesNoIndicator `xml:"XcptnlNetCshFlowInd"`
// Price per unit of the trade date.
Price *UnitPrice19 `xml:"Pric,omitempty"`
// Foreign exchange rate.
ForeignExchangeRate *ForeignExchangeTerms19 `xml:"FXRate,omitempty"`
// Net cash flow expressed as a percentage of the total NAV for the share class.
PercentageOfShareClassTotalNAV *PercentageRate `xml:"PctgOfShrClssTtlNAV,omitempty"`
// Cash movements into the fund as a result of transactions in shares in an investment fund, for example, subscriptions or switch-ins.
CashInForecastDetails []*CashInForecast6 `xml:"CshInFcstDtls,omitempty"`
// Cash movements out of the fund as a result of transactions in shares in an investment fund, for example, redemptions or switch-outs.
CashOutForecastDetails []*CashOutForecast6 `xml:"CshOutFcstDtls,omitempty"`
// Net cash as a result of the cash-in and cash-out flows.
NetCashForecastDetails []*NetCashForecast4 `xml:"NetCshFcstDtls,omitempty"`
}
func (f *FundCashForecast7) SetIdentification(value string) {
f.Identification = (*Max35Text)(&value)
}
func (f *FundCashForecast7) AddTradeDateTime() *DateAndDateTimeChoice {
f.TradeDateTime = new(DateAndDateTimeChoice)
return f.TradeDateTime
}
func (f *FundCashForecast7) AddPreviousTradeDateTime() *DateAndDateTimeChoice {
f.PreviousTradeDateTime = new(DateAndDateTimeChoice)
return f.PreviousTradeDateTime
}
func (f *FundCashForecast7) AddFinancialInstrumentDetails() *FinancialInstrument9 {
f.FinancialInstrumentDetails = new(FinancialInstrument9)
return f.FinancialInstrumentDetails
}
func (f *FundCashForecast7) AddTotalNAV(value, currency string) {
f.TotalNAV = append(f.TotalNAV, NewActiveOrHistoricCurrencyAndAmount(value, currency))
}
func (f *FundCashForecast7) AddPreviousTotalNAV(value, currency string) {
f.PreviousTotalNAV = append(f.PreviousTotalNAV, NewActiveOrHistoricCurrencyAndAmount(value, currency))
}
func (f *FundCashForecast7) AddTotalUnitsNumber() *FinancialInstrumentQuantity1 {
f.TotalUnitsNumber = new(FinancialInstrumentQuantity1)
return f.TotalUnitsNumber
}
func (f *FundCashForecast7) AddPreviousTotalUnitsNumber() *FinancialInstrumentQuantity1 {
f.PreviousTotalUnitsNumber = new(FinancialInstrumentQuantity1)
return f.PreviousTotalUnitsNumber
}
func (f *FundCashForecast7) SetTotalNAVChangeRate(value string) {
f.TotalNAVChangeRate = (*PercentageRate)(&value)
}
func (f *FundCashForecast7) AddInvestmentCurrency(value string) {
f.InvestmentCurrency = append(f.InvestmentCurrency, (*ActiveOrHistoricCurrencyCode)(&value))
}
func (f *FundCashForecast7) AddCurrencyStatus() *CurrencyDesignation1 {
f.CurrencyStatus = new(CurrencyDesignation1)
return f.CurrencyStatus
}
func (f *FundCashForecast7) SetExceptionalNetCashFlowIndicator(value string) {
f.ExceptionalNetCashFlowIndicator = (*YesNoIndicator)(&value)
}
func (f *FundCashForecast7) AddPrice() *UnitPrice19 {
f.Price = new(UnitPrice19)
return f.Price
}
func (f *FundCashForecast7) AddForeignExchangeRate() *ForeignExchangeTerms19 {
f.ForeignExchangeRate = new(ForeignExchangeTerms19)
return f.ForeignExchangeRate
}
func (f *FundCashForecast7) SetPercentageOfShareClassTotalNAV(value string) {
f.PercentageOfShareClassTotalNAV = (*PercentageRate)(&value)
}
func (f *FundCashForecast7) AddCashInForecastDetails() *CashInForecast6 {
newValue := new (CashInForecast6)
f.CashInForecastDetails = append(f.CashInForecastDetails, newValue)
return newValue
}
func (f *FundCashForecast7) AddCashOutForecastDetails() *CashOutForecast6 {
newValue := new (CashOutForecast6)
f.CashOutForecastDetails = append(f.CashOutForecastDetails, newValue)
return newValue
}
func (f *FundCashForecast7) AddNetCashForecastDetails() *NetCashForecast4 {
newValue := new (NetCashForecast4)
f.NetCashForecastDetails = append(f.NetCashForecastDetails, newValue)
return newValue
} | FundCashForecast7.go | 0.862988 | 0.42173 | FundCashForecast7.go | starcoder |
package lexicon
import (
"fmt"
"strings"
"github.com/pkg/errors"
)
// _Trie is a ordinary implementation of trie
type _Trie struct {
hasSuffix bool
suffix []byte
hasValue bool
value int32
children map[byte]*_Trie
}
// newTrie creates a new instance of trie-node
func newTrie() *_Trie {
return new(_Trie)
}
// isEmpty returns true if the trie is empty (no child, no value and no suffix)
func (t *_Trie) isEmpty() bool {
return len(t.children) == 0 && !t.hasValue && !t.hasSuffix
}
// convertSuffix converts suffix to child in trie-node
func (t *_Trie) convertSuffix() {
assert(t.hasSuffix, "unexpected call of convertSuffix()")
if t.children == nil {
t.children = make(map[byte]*_Trie)
}
child := newTrie()
child.add(t.suffix[1:], t.value)
t.children[t.suffix[0]] = child
t.hasSuffix = false
t.suffix = nil
t.value = 0
}
// add adds a key value pair into trie
func (t *_Trie) add(key []byte, value int32) {
// We will put some thing into this trie-node now. So, if the node has
// suffix, we need to convert it to normal child-node first
if t.hasSuffix {
t.convertSuffix()
}
if len(key) == 0 {
// Reaches the node to put value
t.hasValue = true
t.value = value
} else if t.isEmpty() {
// If it's a empty node, we put key-value as suffix here
t.suffix = key
t.hasSuffix = true
t.value = value
} else {
// Put the key recursively
if t.children == nil {
t.children = make(map[byte]*_Trie)
}
if _, ok := t.children[key[0]]; !ok {
t.children[key[0]] = newTrie()
}
t.children[key[0]].add(key[1:], value)
}
}
// buildTrie constructs the trie from string->int map
func buildTrie(dict map[string]int32) (trie *_Trie, err error) {
trie = newTrie()
for key, value := range dict {
if strings.Contains("key", "\x00") {
err = errors.Errorf("unexpected character '\\x00' in key: %s", key)
return nil, err
}
if key == "" {
err = errors.New("unexpected empty key")
return nil, err
}
trie.add([]byte(key), value)
}
return
}
// countNode counts the node in _Trie
func (t *_Trie) countNode() int {
// 1 for the node self
count := 1
for _, c := range t.children {
count += c.countNode()
}
return count
}
// printTree prints the structure of trie
func (t *_Trie) printTree() {
fmt.Println("ROOT")
t.print("")
}
// print prints the current trie (just for debugging)
func (t *_Trie) print(prefix string) {
if t.hasSuffix {
assert(t.children == nil, "unexpected _Trie node")
fmt.Printf(
"%s+- SUFFIX('%s', %d)\n",
prefix,
t.suffix,
t.value)
} else {
assert(t.suffix == nil, "unexpected _Trie node")
childList := []byte{}
for child := range t.children {
childList = append(childList, child)
}
for i, child := range childList {
medium := "|-"
nextPrefix := prefix + "| "
if i == len(childList)-1 && !t.hasValue {
medium = "+-"
nextPrefix = prefix + " "
}
fmt.Printf("%s%s %c\n", prefix, medium, child)
t.children[child].print(nextPrefix)
}
// The value node
if t.hasValue {
fmt.Printf("%s+- VALUE(%d)\n", prefix, t.value)
}
}
} | trie.go | 0.657098 | 0.470007 | trie.go | starcoder |
package utils
import (
"errors"
"math"
"strconv"
"strings"
"github.com/microsoft/go-cidr-manager/ipv4cidr/consts"
)
// GetNetmask takes the mask number as input and creates the netmask from it
// @input mask uint8: The mask for the CIDR range
// @returns uint32: The integer representation of the netmask
func GetNetmask(mask uint8) uint32 {
// Netmask = 32-bit number with all bits set, shifted left by (32-mask)
return consts.MaxUInt32 << (consts.MaxBits - mask)
}
// GetCIDRRangeLength calculates the number of IP addresses in that CIDR range
// @input mask uint8: The mask for the CIDR range
// @returns uint32: The length of the CIDR range
func GetCIDRRangeLength(mask uint8) uint32 {
// Length of CIDR range = 2^(32-mask)
return uint32(math.Pow(float64(2), float64((consts.MaxBits - mask))))
}
// Standardize converts the IP to the first IP address of the CIDR range
// @input ip uint32: The IP address in integer representation
// @input netmask uint32: The netmask of the CIDR range
// @returns uint32: First IP in CIDR range
func Standardize(ip uint32, netmask uint32) uint32 {
// A bitwise AND of the input IP and the netmask gives the first IP address in range
return (ip & netmask)
}
// CheckStandardized checks if the IP stored in object is the first IP in range or not
// @input ip uint32: The IP address in integer representation
// @input netmask uint32: The netmask of the CIDR range
// @returns error: If not the first IP in range, an error is returned. Else, return value is nil
func CheckStandardized(ip uint32, netmask uint32) error {
// If IP stored in object is same as the standardized representation, then the check passes
if ip == Standardize(ip, netmask) {
return nil
}
// If above check fails, return an error
return errors.New(consts.NonStandardizedIPError)
}
// ConvertIPToString converts an integer IP address to its string representation
// @param ip uint32: IP address in integer representation
// @returns string: IP address in string representation
func ConvertIPToString(ip uint32) string {
// IP addresses consist of 4 sections (a.b.c.d)
ipSections := make([]string, 4)
for i := 3; i >= 0; i-- {
// To generate each section, we go in reverse order (right to left)
// 1. Pull the least significant 8 bits into another var
// 2. Convert to int and save in the corresponding section
// 3. Shift the IP by 8 bits to the right
sectionInt := int(ip & consts.EightBits)
ipSections[i] = strconv.Itoa(sectionInt)
ip = ip >> consts.GroupSize
}
return strings.Join(ipSections, ".")
} | ipv4cidr/utils/utils.go | 0.817684 | 0.466238 | utils.go | starcoder |
package models
import (
"strconv"
t "time"
)
const (
csvColumnRow = 2 // 1 Header row + 1 Data row (2)
csvColumnCount = 15 // 12 Months of the year + Closed, Accepted and Rejected (15)
)
// StatisticsReport holds statistical data formed from Transaction data.
type StatisticsReport struct {
ClosedTransactions int
AcceptedTransactions int
RejectedTransactions int
FirstYearAcceptedMonthlyFilings map[t.Month]int
SecondYearAcceptedMonthlyFilings map[t.Month]int
}
// NewStatisticsReport returns a newly constructed StatisticsReport with default values.
func NewStatisticsReport() *StatisticsReport {
return &StatisticsReport{
ClosedTransactions: 0,
AcceptedTransactions: 0,
RejectedTransactions: 0,
FirstYearAcceptedMonthlyFilings: initialiseMap(),
SecondYearAcceptedMonthlyFilings: initialiseMap(),
}
}
// initialiseMap returns a map with months mapped to 0 values ready to be used.
func initialiseMap() map[t.Month]int {
return map[t.Month]int{
t.January: 0,
t.February: 0,
t.March: 0,
t.April: 0,
t.May: 0,
t.June: 0,
t.July: 0,
t.August: 0,
t.September: 0,
t.October: 0,
t.November: 0,
t.December: 0,
}
}
// ToCSV returns a [][]string version of the data within the StatisticsReport struct provided.
func (sr *StatisticsReport) ToCSV() [][]string {
csv := make([][]string, csvColumnRow)
csv[0] = sr.constructHeaders()
csv[1] = sr.getValues()
return csv
}
// constructHeaders retrieves the headers from the statistics report (Months of the year, and other important information)
// which will be used in the final CSV document as titles of each data point.
func (sr *StatisticsReport) constructHeaders() []string {
headers := make([]string, csvColumnCount)
for k := range sr.FirstYearAcceptedMonthlyFilings {
headers[int(k)-1] = k.String()
}
headers[12] = "Total Closed"
headers[13] = "Total Accepted"
headers[14] = "Total Rejected"
return headers
}
// getValues retrieves the data points which will sit under the previously retrieved headers in the new CSV file.
func (sr *StatisticsReport) getValues() []string {
values := make([]string, csvColumnCount)
counter := 0 // Use counter as Months start at 1, but we want our array to start at 0.
for _, v := range sr.FirstYearAcceptedMonthlyFilings {
values[counter] = strconv.Itoa(v)
counter++
}
values[12] = strconv.Itoa(sr.ClosedTransactions)
values[13] = strconv.Itoa(sr.AcceptedTransactions)
values[14] = strconv.Itoa(sr.RejectedTransactions)
return values
} | models/statistics_report.go | 0.756447 | 0.489992 | statistics_report.go | starcoder |
package path
import (
"sort"
"github.com/xlucas/heap"
)
// Graph represents an arranged set of vertexes.
type Graph struct {
remaining *heap.Heap
vertexMap map[string]*Vertex
}
// NewGraph creates a graph from a collection of vertexes.
func NewGraph(vertexes []*Vertex) *Graph {
return &Graph{
vertexMap: prepareMap(vertexes),
}
}
// pathTo traces back the path computed by the shortest path algorithm and
// returns it as an ordered list of vertexes, starting from origin.
func (g *Graph) pathTo(dst string) []*Vertex {
var path []*Vertex
for vertex := g.vertexMap[dst]; vertex != nil; vertex = vertex.prev {
path = append([]*Vertex{vertex}, path...)
}
return path
}
// prepareOrigin is used to set the origin's distance to 0 and its neighbors
// distances to the distance of each arc that leads to them.
func (g *Graph) prepareOrigin(origin string) {
src := g.vertexMap[origin]
src.distance = 0
for _, arc := range src.Arcs {
g.vertexMap[arc.Dst].distance = arc.Distance
g.vertexMap[arc.Dst].prev = src
}
}
// prepareRemainingSet is used to create the set of vertexes that needs to be
// visited by the shortest path algorithm.
func (g *Graph) prepareRemainingSet() {
var slice []interface{}
for _, v := range g.vertexMap {
slice = append(slice, v)
}
g.remaining = heap.Heapify(slice, new(vertexComparator))
}
// ShortestPath computes the shortest path from src to dst within the graph.
func (g *Graph) ShortestPath(src, dst string) []*Vertex {
g.prepareOrigin(src)
g.prepareRemainingSet()
// The first vertex is the origin of the graph.
minVertex := g.remaining.Pop().(*Vertex)
for minVertex != nil {
if v, ok := g.remaining.Pop().(*Vertex); ok {
minVertex = v
} else {
break
}
if minVertex.ID == dst {
return g.pathTo(dst)
}
sort.Sort(minVertex.Arcs)
for _, arc := range minVertex.Arcs {
dist := minVertex.distance + arc.Distance
if neighbor := g.vertexMap[arc.Dst]; neighbor.distance == infinity || dist < neighbor.distance {
neighbor.distance = dist
neighbor.prev = minVertex
}
}
}
return nil
}
// prepareMap creates a vertex map indexed by vertex ID, from a slice of vertex.
func prepareMap(vertexes []*Vertex) map[string]*Vertex {
m := make(map[string]*Vertex)
for _, v := range vertexes {
m[v.ID] = v
}
return m
} | graph.go | 0.865253 | 0.560794 | graph.go | starcoder |
package stdlib
import (
"fmt"
"reflect"
)
var math = []mapEntry{
// functions and operators
entry("+", Add,
"Returns sum of all number arguments",
"Usage: (+ num1 num2 ...)",
),
entry("-", Sub,
"Returns result of subtracting all the right-most args from first arg.",
),
entry("*", Mul,
"Returns result of multiplying all number arguments",
),
entry("/", Div,
"Returns result of continuously dividing first arg by remaining args",
),
entry(">", Gt,
"Returns true if 1st arg is greater than the 2nd",
),
entry("<", Lt,
"Returns true if 1st arg is less than second arg",
),
entry("==", Eq,
"Returns true if all arguments are equal to each other",
),
entry("not", Not,
"Returns true if argument is falsy, false otherwise",
),
}
// Add returns sum of all the arguments.
func Add(vals ...float64) float64 {
sum := 0.0
for _, val := range vals {
sum += val
}
return sum
}
// Sub returns result of subtracting from left-to-right.
func Sub(vals ...float64) float64 {
if len(vals) == 1 {
if vals[0] == 0 {
return 0
}
return -1 * vals[0]
}
for i := 1; i < len(vals); i++ {
vals[i] = -1 * vals[i]
}
return Add(vals...)
}
// Mul multiplies all numbers.
func Mul(vals ...float64) float64 {
result := 1.0
for _, val := range vals {
result = result * val
}
return result
}
// Div divides from left to right.
func Div(vals ...float64) float64 {
if len(vals) < 2 {
panic(fmt.Errorf("division requires at least 2 arguments, got %d", len(vals)))
}
result := vals[0]
for i := 1; i < len(vals); i++ {
result = result / vals[i]
}
return result
}
// Gt checks if lval is greater than rval
func Gt(lval, rval float64) bool {
return lval > rval
}
// Lt checks if lval is lesser than rval
func Lt(lval, rval float64) bool {
return lval < rval
}
// Eq checks if lval is same as rval
func Eq(vals ...interface{}) bool {
if len(vals) <= 1 {
return true
}
lval := vals[0]
for i := 1; i < len(vals); i++ {
if !reflect.DeepEqual(lval, vals[i]) {
return false
}
}
return true
}
// Not returns true if val is nil or false value and false
// otherwise.
func Not(val interface{}) bool {
if b, ok := val.(bool); ok {
return !b
}
if val == nil {
return true
}
return false
} | stdlib/math.go | 0.704262 | 0.461563 | math.go | starcoder |
package wal
// Copyright 2015 MediaMath <http://www.mediamath.com>. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
import (
"fmt"
"time"
"unsafe"
)
// Entry contains the data extracted from insert/update/delete/commit records
type Entry struct {
Type RecordType
ReadFrom Location
Previous Location
TimelineID uint32
LogID uint32
TransactionID uint32
TablespaceID uint32
DatabaseID uint32
RelationID uint32
FromBlock uint32
FromOffset uint16
ToBlock uint32
ToOffset uint16
ParseTime int64
}
//EntryBytesSize is the size of the entries.
const EntryBytesSize = 61
// ToBytes converts an entry to a slice of bytes
func (e Entry) ToBytes() []byte {
timePtr := (*uint64)(unsafe.Pointer(&e.ParseTime))
return []byte{
byte(e.Type),
byte(e.ReadFrom.offset >> 56),
byte(e.ReadFrom.offset >> 48),
byte(e.ReadFrom.offset >> 40),
byte(e.ReadFrom.offset >> 32),
byte(e.ReadFrom.offset >> 24),
byte(e.ReadFrom.offset >> 16),
byte(e.ReadFrom.offset >> 8),
byte(e.ReadFrom.offset),
byte(e.Previous.offset >> 56),
byte(e.Previous.offset >> 48),
byte(e.Previous.offset >> 40),
byte(e.Previous.offset >> 32),
byte(e.Previous.offset >> 24),
byte(e.Previous.offset >> 16),
byte(e.Previous.offset >> 8),
byte(e.Previous.offset),
byte(e.TimelineID >> 24),
byte(e.TimelineID >> 16),
byte(e.TimelineID >> 8),
byte(e.TimelineID),
byte(e.LogID >> 24),
byte(e.LogID >> 16),
byte(e.LogID >> 8),
byte(e.LogID),
byte(e.TransactionID >> 24),
byte(e.TransactionID >> 16),
byte(e.TransactionID >> 8),
byte(e.TransactionID),
byte(e.TablespaceID >> 24),
byte(e.TablespaceID >> 16),
byte(e.TablespaceID >> 8),
byte(e.TablespaceID),
byte(e.DatabaseID >> 24),
byte(e.DatabaseID >> 16),
byte(e.DatabaseID >> 8),
byte(e.DatabaseID),
byte(e.RelationID >> 24),
byte(e.RelationID >> 16),
byte(e.RelationID >> 8),
byte(e.RelationID),
byte(e.FromBlock >> 24),
byte(e.FromBlock >> 16),
byte(e.FromBlock >> 8),
byte(e.FromBlock),
byte(e.FromOffset >> 8),
byte(e.FromOffset),
byte(e.ToBlock >> 24),
byte(e.ToBlock >> 16),
byte(e.ToBlock >> 8),
byte(e.ToBlock),
byte(e.ToOffset >> 8),
byte(e.ToOffset),
byte(*timePtr >> 56),
byte(*timePtr >> 48),
byte(*timePtr >> 40),
byte(*timePtr >> 32),
byte(*timePtr >> 24),
byte(*timePtr >> 16),
byte(*timePtr >> 8),
byte(*timePtr),
}
}
// EntryFromBytes reconstructs an entry from a slice of bytes
func EntryFromBytes(bs []byte) Entry {
parseTime := uint64(bs[53])<<56 + uint64(bs[54])<<48 + uint64(bs[55])<<40 + uint64(bs[56])<<32 + uint64(bs[57])<<24 + uint64(bs[58])<<16 + uint64(bs[59])<<8 + uint64(bs[60])
return Entry{
Type: RecordType(bs[0]),
ReadFrom: NewLocationWithDefaults(uint64(bs[1])<<56 + uint64(bs[2])<<48 + uint64(bs[3])<<40 + uint64(bs[4])<<32 + uint64(bs[5])<<24 + uint64(bs[6])<<16 + uint64(bs[7])<<8 + uint64(bs[8])),
Previous: NewLocationWithDefaults(uint64(bs[9])<<56 + uint64(bs[10])<<48 + uint64(bs[11])<<40 + uint64(bs[12])<<32 + uint64(bs[13])<<24 + uint64(bs[14])<<16 + uint64(bs[15])<<8 + uint64(bs[16])),
TimelineID: uint32(bs[17])<<24 + uint32(bs[18])<<16 + uint32(bs[19])<<8 + uint32(bs[20]),
LogID: uint32(bs[21])<<24 + uint32(bs[22])<<16 + uint32(bs[23])<<8 + uint32(bs[24]),
TransactionID: uint32(bs[25])<<24 + uint32(bs[26])<<16 + uint32(bs[27])<<8 + uint32(bs[28]),
TablespaceID: uint32(bs[29])<<24 + uint32(bs[30])<<16 + uint32(bs[31])<<8 + uint32(bs[32]),
DatabaseID: uint32(bs[33])<<24 + uint32(bs[34])<<16 + uint32(bs[35])<<8 + uint32(bs[36]),
RelationID: uint32(bs[37])<<24 + uint32(bs[38])<<16 + uint32(bs[39])<<8 + uint32(bs[40]),
FromBlock: uint32(bs[41])<<24 + uint32(bs[42])<<16 + uint32(bs[43])<<8 + uint32(bs[44]),
FromOffset: uint16(bs[45])<<8 + uint16(bs[46]),
ToBlock: uint32(bs[47])<<24 + uint32(bs[48])<<16 + uint32(bs[49])<<8 + uint32(bs[50]),
ToOffset: uint16(bs[51])<<8 + uint16(bs[52]),
ParseTime: int64(parseTime),
}
}
// NewEntries builds an entry from a page, record header, record body and a location
func NewEntries(page *Page, recordHeader *RecordHeader, recordBody *RecordBody) (entries []Entry) {
var now = time.Now().UnixNano()
heapData := recordBody.HeapData()
if len(heapData) > 0 {
for _, heapData := range heapData {
entries = append(entries, Entry{
Type: recordHeader.Type(),
ReadFrom: recordHeader.readFrom,
Previous: recordHeader.Previous(),
TimelineID: page.TimelineID(),
LogID: page.Location().LogID(),
TransactionID: recordHeader.TransactionID(),
TablespaceID: heapData.TablespaceID(),
DatabaseID: heapData.DatabaseID(),
RelationID: heapData.RelationID(),
FromBlock: heapData.FromBlock(),
FromOffset: heapData.FromOffset(),
ToBlock: heapData.ToBlock(),
ToOffset: heapData.ToOffset(),
ParseTime: now,
})
}
} else {
entries = append(entries, Entry{
Type: recordHeader.Type(),
ReadFrom: recordHeader.readFrom,
Previous: recordHeader.Previous(),
TimelineID: page.TimelineID(),
LogID: page.Location().LogID(),
TransactionID: recordHeader.TransactionID(),
ParseTime: now,
})
}
return
}
func (e Entry) String() string {
switch e.Type {
case Insert:
return fmt.Sprintf("Insert into %v/%v/%v::(%v,%v) on transaction id %v read from %v/%v",
e.TablespaceID, e.DatabaseID, e.RelationID, e.ToBlock, e.ToOffset, e.TransactionID, e.TimelineID, e.ReadFrom)
case Update:
return fmt.Sprintf("Update in %v/%v/%v::(%v,%v)->(%v,%v) on transaction id %v read from %v/%v",
e.TablespaceID, e.DatabaseID, e.RelationID, e.FromBlock, e.FromOffset, e.ToBlock, e.ToOffset, e.TransactionID, e.TimelineID, e.ReadFrom)
case Delete:
return fmt.Sprintf("Delete from %v/%v/%v::(%v,%v) on transaction id %v read from %v/%v",
e.TablespaceID, e.DatabaseID, e.RelationID, e.FromBlock, e.FromOffset, e.TransactionID, e.TimelineID, e.ReadFrom)
case Commit:
return fmt.Sprintf("Commit of transaction id %v read from %v/%v", e.TransactionID, e.TimelineID, e.ReadFrom)
case Abort:
return fmt.Sprintf("Abort of transaction id %v read from %v/%v", e.TransactionID, e.TimelineID, e.ReadFrom)
}
return fmt.Sprintf("Unknown WAL Entry read from %v/%v", e.TimelineID, e.ReadFrom)
} | pg/wal/entry.go | 0.561215 | 0.41052 | entry.go | starcoder |
package effects
import (
"image"
"runtime"
)
// CTOpts options to pass to the Cartoon effect
type CTOpts struct {
// BlurKernelSize is the gaussian blur kernel size. You might need to blur
// the original input image to reduce the amount of noise you get in the edge
// detection phase. Set to 0 to skip blur, otherwise the number must be an
// odd number, the bigger the number the more blur
BlurKernelSize int
// EdgeThreshold is a number between 0 and 255 that specifies a cutoff point to
// determine if an intensity change is an edge. Make smaller to include more details
// as edges
EdgeThreshold int
// OilFilterSize specifies how bold the simulated strokes will be when turning the
// style towards a painting, something around 5,10,15 should work well
OilFilterSize int
// OilLevels is the number of levels that the oil painting style will bucket colors in
// to. Larger number to get more detail.
OilLevels int
// DebugPath is not empty is assumed to be a path where intermediate debug files can
// be written to, such as the gaussian blured image and the sobel edge detection. This
// can be useful for tweaking parameters
DebugPath string
}
type cartoon struct {
opts CTOpts
}
// Apply runs the image through the cartoon filter
func (c *cartoon) Apply(img *Image, numRoutines int) (*Image, error) {
if numRoutines == 0 {
numRoutines = runtime.GOMAXPROCS(0)
}
pipeline := Pipeline{}
if c.opts.BlurKernelSize > 0 {
pipeline.Add(NewGaussian(c.opts.BlurKernelSize, 1), nil)
}
pipeline.Add(NewGrayscale(GSLUMINOSITY), nil)
pipeline.Add(NewSobel(c.opts.EdgeThreshold, false), nil)
edgeImg, err := pipeline.Run(img, numRoutines)
if err != nil {
return nil, err
}
edgePix := edgeImg.img.Pix
pf := func(ri, x, y, offset, inStride int, inPix, outPix []uint8) {
r := inPix[offset]
g := inPix[offset+1]
b := inPix[offset+2]
rEdge := edgePix[offset]
if rEdge == 255 {
r = 0
b = 0
g = 0
}
outPix[offset] = r
outPix[offset+1] = g
outPix[offset+2] = b
outPix[offset+3] = 255
}
oil := NewOilPainting(c.opts.OilFilterSize, c.opts.OilLevels)
oilImg, err := oil.Apply(img, numRoutines)
if err != nil {
return nil, err
}
out := &Image{
img: image.NewRGBA(image.Rectangle{
Min: image.Point{X: 0, Y: 0},
Max: image.Point{X: img.Width, Y: img.Height},
}),
Width: img.Width,
Height: img.Height,
// Have to take in to account pixels are lost in some of the effects around the edges,
// so so only have the area where the two rections intersect from the edge detection and
// the oil painting effect
Bounds: oilImg.Bounds.Intersect(edgeImg.Bounds),
}
runParallel(numRoutines, oilImg, out.Bounds, out, pf, 0)
return out, nil
}
// NewCartoon returns an effect that renders images as if they are drawn like a cartoon.
// It works by rendering the input image using the OilPainting effect, then drawing lines
// ontop of the image based on the Sobel edge detection method. You will probably have to
// play with the opts values to get a good result. Some starting values are:
// BlurKernelSize: 21
// EdgeThreshold: 40
// OilFilterSize: 15
// OilLevels: 15
func NewCartoon(opts CTOpts) Effect {
return &cartoon{
opts: opts,
}
} | pkg/effects/cartoon.go | 0.73077 | 0.503418 | cartoon.go | starcoder |
package setop
import (
"bytes"
"fmt"
)
type Skipper interface {
// skip returns a value matching the min and inclusive criteria.
// If the last yielded value matches the criteria the same value will be returned again.
Skip(min []byte, inc bool) (result *SetOpResult, err error)
}
func createSkippersAndWeights(r RawSourceCreator, sources []SetOpSource) (skippers []Skipper, weights []float64) {
skippers = make([]Skipper, len(sources))
weights = make([]float64, len(sources))
for index, source := range sources {
if source.Key != nil {
skippers[index] = r(source.Key)
} else {
skippers[index] = createSkipper(r, source.SetOp)
}
if source.Weight != nil {
weights[index] = *source.Weight
} else {
weights[index] = 1
}
}
return
}
func createSkipper(r RawSourceCreator, op *SetOp) (result Skipper) {
skippers, weights := createSkippersAndWeights(r, op.Sources)
switch op.Type {
case Union:
result = &unionOp{
skippers: skippers,
weights: weights,
merger: getMerger(op.Merge),
}
case Intersection:
result = &interOp{
skippers: skippers,
weights: weights,
merger: getMerger(op.Merge),
}
case Difference:
result = &diffOp{
skippers: skippers,
weights: weights,
merger: getMerger(op.Merge),
}
case Xor:
result = &xorOp{
skippers: skippers,
weights: weights,
merger: getMerger(op.Merge),
}
default:
panic(fmt.Errorf("Unknown SetOp Type %v", op.Type))
}
return
}
type xorOp struct {
skippers []Skipper
weights []float64
curr *SetOpResult
merger mergeFunc
}
func (self *xorOp) Skip(min []byte, inc bool) (result *SetOpResult, err error) {
gt := 0
if inc {
gt = -1
}
if self.curr != nil && bytes.Compare(self.curr.Key, min) > gt {
result = self.curr
return
}
newSkippers := make([]Skipper, 0, len(self.skippers))
var res *SetOpResult
var cmp int
var multi bool
for result == nil {
for index, thisSkipper := range self.skippers {
if res, err = thisSkipper.Skip(min, inc); err != nil {
result = nil
self.curr = nil
return
}
if res != nil {
newSkippers = append(newSkippers, thisSkipper)
if result == nil {
result = res.ShallowCopy()
result.Values = self.merger(nil, result.Values, self.weights[index])
multi = false
} else {
cmp = bytes.Compare(res.Key, result.Key)
if cmp < 0 {
multi = false
result = res.ShallowCopy()
result.Values = self.merger(nil, result.Values, self.weights[index])
} else if cmp == 0 {
multi = true
}
}
}
}
if len(newSkippers) == 0 {
result = nil
self.curr = nil
return
}
if result != nil && multi {
min = result.Key
inc = false
result = nil
}
self.skippers = newSkippers
newSkippers = newSkippers[:0]
}
self.curr = result
return
}
type unionOp struct {
skippers []Skipper
weights []float64
curr *SetOpResult
merger mergeFunc
}
func (self *unionOp) Skip(min []byte, inc bool) (result *SetOpResult, err error) {
gt := 0
if inc {
gt = -1
}
if self.curr != nil && bytes.Compare(self.curr.Key, min) > gt {
result = self.curr
return
}
newSkippers := make([]Skipper, 0, len(self.skippers))
var cmp int
var res *SetOpResult
for index, thisSkipper := range self.skippers {
if res, err = thisSkipper.Skip(min, inc); err != nil {
result = nil
self.curr = nil
return
}
if res != nil {
newSkippers = append(newSkippers, thisSkipper)
if result == nil {
result = res.ShallowCopy()
result.Values = self.merger(nil, result.Values, self.weights[index])
} else {
cmp = bytes.Compare(res.Key, result.Key)
if cmp < 0 {
result = res.ShallowCopy()
result.Values = self.merger(nil, result.Values, self.weights[index])
} else if cmp == 0 {
result.Values = self.merger(result.Values, res.Values, self.weights[index])
}
}
}
}
self.skippers = newSkippers
self.curr = result
return
}
type interOp struct {
skippers []Skipper
weights []float64
curr *SetOpResult
merger mergeFunc
}
func (self *interOp) Skip(min []byte, inc bool) (result *SetOpResult, err error) {
gt := 0
if inc {
gt = -1
}
if self.curr != nil && bytes.Compare(self.curr.Key, min) > gt {
result = self.curr
return
}
var maxKey []byte
var res *SetOpResult
var cmp int
for result == nil {
maxKey = nil
for index, thisSkipper := range self.skippers {
if res, err = thisSkipper.Skip(min, inc); res == nil || err != nil {
result = nil
self.curr = nil
return
}
if maxKey == nil {
maxKey = res.Key
result = res.ShallowCopy()
result.Values = self.merger(nil, result.Values, self.weights[index])
} else {
cmp = bytes.Compare(res.Key, maxKey)
if cmp != 0 {
if cmp > 0 {
maxKey = res.Key
}
result = nil
} else {
result.Values = self.merger(result.Values, res.Values, self.weights[index])
}
}
}
min = maxKey
inc = true
}
self.curr = result
return
}
type diffOp struct {
skippers []Skipper
weights []float64
curr *SetOpResult
merger mergeFunc
}
func (self *diffOp) Skip(min []byte, inc bool) (result *SetOpResult, err error) {
gt := 0
if inc {
gt = -1
}
if self.curr != nil && bytes.Compare(self.curr.Key, min) > gt {
result = self.curr
return
}
var newSkippers = make([]Skipper, 0, len(self.skippers))
var res *SetOpResult
for result == nil {
for index, thisSkipper := range self.skippers {
if res, err = thisSkipper.Skip(min, inc); err != nil {
result = nil
self.curr = nil
return
}
if index == 0 {
if res == nil {
result = nil
self.curr = nil
return
}
result = res.ShallowCopy()
result.Values = self.merger(nil, result.Values, self.weights[0])
newSkippers = append(newSkippers, thisSkipper)
min = res.Key
inc = true
} else {
if res != nil {
newSkippers = append(newSkippers, thisSkipper)
if bytes.Compare(min, res.Key) == 0 {
result = nil
break
}
}
}
}
self.skippers = newSkippers
newSkippers = newSkippers[:0]
inc = false
}
self.curr = result
return
} | setop/operations.go | 0.744656 | 0.456228 | operations.go | starcoder |
package server
import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// TrafficFlowType defines allowed direction of the traffic in the rule
type TrafficFlowType int
const (
// TrafficFlowBidirect allows traffic to both direction
TrafficFlowBidirect TrafficFlowType = iota
)
// Rule of ACL for groups
type Rule struct {
// ID of the rule
ID string
// Name of the rule visible in the UI
Name string
// Source list of groups IDs of peers
Source []string
// Destination list of groups IDs of peers
Destination []string
// Flow of the traffic allowed by the rule
Flow TrafficFlowType
}
func (r *Rule) Copy() *Rule {
return &Rule{
ID: r.ID,
Name: r.Name,
Source: r.Source[:],
Destination: r.Destination[:],
Flow: r.Flow,
}
}
// GetRule of ACL from the store
func (am *DefaultAccountManager) GetRule(accountID, ruleID string) (*Rule, error) {
am.mux.Lock()
defer am.mux.Unlock()
account, err := am.Store.GetAccount(accountID)
if err != nil {
return nil, status.Errorf(codes.NotFound, "account not found")
}
rule, ok := account.Rules[ruleID]
if ok {
return rule, nil
}
return nil, status.Errorf(codes.NotFound, "rule with ID %s not found", ruleID)
}
// SaveRule of ACL in the store
func (am *DefaultAccountManager) SaveRule(accountID string, rule *Rule) error {
am.mux.Lock()
defer am.mux.Unlock()
account, err := am.Store.GetAccount(accountID)
if err != nil {
return status.Errorf(codes.NotFound, "account not found")
}
account.Rules[rule.ID] = rule
return am.Store.SaveAccount(account)
}
// DeleteRule of ACL from the store
func (am *DefaultAccountManager) DeleteRule(accountID, ruleID string) error {
am.mux.Lock()
defer am.mux.Unlock()
account, err := am.Store.GetAccount(accountID)
if err != nil {
return status.Errorf(codes.NotFound, "account not found")
}
delete(account.Rules, ruleID)
return am.Store.SaveAccount(account)
}
// ListRules of ACL from the store
func (am *DefaultAccountManager) ListRules(accountID string) ([]*Rule, error) {
am.mux.Lock()
defer am.mux.Unlock()
account, err := am.Store.GetAccount(accountID)
if err != nil {
return nil, status.Errorf(codes.NotFound, "account not found")
}
rules := make([]*Rule, 0, len(account.Rules))
for _, item := range account.Rules {
rules = append(rules, item)
}
return rules, nil
} | management/server/rule.go | 0.574992 | 0.401248 | rule.go | starcoder |
package main
import (
"fmt"
"strconv"
"github.com/budavariam/advent_of_code/2018/utils"
)
func main() {
data := utils.LoadInput("11_2")
x, y, size, _ := GetLargestPowerLevelOfAnySize(data[0], 300, 300)
fmt.Printf("%d,%d,%d\n", x, y, size)
}
// GetLargestPowerLevelOfAnySize returns the largest power level in the grid with the grid size
func GetLargestPowerLevelOfAnySize(input string, gridY, gridX int) (int, int, int, int) {
serialNumber, _ := strconv.Atoi(input)
matrix := generatePowerLevelGrid(serialNumber, gridY, gridX)
maxSumGridSize, maxSumX, maxSumY, maxPower := 0, 0, 0, 0
for convGridSize := 1; convGridSize <= 300; convGridSize++ {
// fmt.Println("Gridsize", convGridSize)
result, x, y := findLargestGridSumCoordinate(matrix, gridY, gridX, convGridSize, convGridSize)
if result > maxPower {
maxSumGridSize = convGridSize
maxSumX = x
maxSumY = y
maxPower = result
}
}
return maxSumX, maxSumY, maxSumGridSize, maxPower
}
func generatePowerLevelGrid(serialNumber, height, width int) [][]int {
matrix := make([][]int, height)
for y := range matrix {
matrix[y] = make([]int, width)
for x := range matrix[y] {
matrix[y][x] = CalculatePowerLevel(serialNumber, y+1, x+1)
}
}
return matrix
}
func findLargestGridSumCoordinate(matrix [][]int, height, width, convGridY, convGridX int) (int, int, int) {
max, maxTopLeftY, maxTopLeftX := 0, 0, 0
for y := 0; y <= height-convGridY; y++ {
for x := 0; x <= width-convGridX; x++ {
sum := 0
for convY := 0; convY < convGridY; convY++ {
for convX := 0; convX < convGridX; convX++ {
sum += matrix[y+convY][x+convX]
}
}
if sum > max {
max = sum
maxTopLeftX = x + 1
maxTopLeftY = y + 1
}
}
}
return max, maxTopLeftX, maxTopLeftY
}
/*CalculatePowerLevel counts the result by the task definition
- Find the fuel cell's rack ID, which is its X coordinate plus 10.
- Begin with a power level of the rack ID times the Y coordinate.
- Increase the power level by the value of the grid serial number (your puzzle input).
- Set the power level to itself multiplied by the rack ID.
- Keep only the hundreds digit of the power level (so 12345 becomes 3; numbers with no hundreds digit become 0).
- Subtract 5 from the power level
*/
func CalculatePowerLevel(serialNumber, y, x int) int {
rackID := x + 10
return ((((rackID*y)+serialNumber)*rackID)%1000)/100 - 5
} | 2018/11_2/solution.go | 0.690246 | 0.448909 | solution.go | starcoder |
package rtree
import (
"math"
"sort"
)
type Feature interface {
Mbr() Mbr
Equals(f Feature) bool
}
type Rtree struct {
dim int
fan int
halfFan int
root *node
size int32
height int8
}
func NewRtree(dim int, fan int, features ...Feature) *Rtree {
t := &Rtree{
dim: dim,
fan: fan,
halfFan: (fan / 2),
root: &node{
objs: []*object{},
leaf: true,
level: 1,
},
size: 0,
height: 1,
}
if len(features) <= fan {
for _, feature := range features {
t.Insert(feature)
}
} else {
t.bulkLoad(features)
}
return t
}
func (t *Rtree) Dim() int {
return t.dim
}
func (t *Rtree) Size() int32 {
return t.size
}
func (t *Rtree) Height() int8 {
return t.height
}
func (t *Rtree) Insert(feature Feature) {
obj := &object{
mbr: feature.Mbr(),
feature: feature,
}
t.insertObj(obj, 1)
t.size++
}
func (t *Rtree) insertObj(e *object, level int8) {
leaf := t.chooseNode(t.root, e, level)
leaf.objs = append(leaf.objs, e)
if e.node != nil {
e.node.parent = leaf
}
var split *node
if len(leaf.objs) > t.fan {
leaf, split = leaf.split(t.halfFan)
}
root, splitRoot := t.adjustTree(leaf, split)
if splitRoot != nil {
oldRoot := root
t.height++
t.root = &node{
parent: nil,
level: t.height,
objs: []*object{
&object{
mbr: oldRoot.computeMbr(),
node: oldRoot,
},
&object{
mbr: splitRoot.computeMbr(),
node: splitRoot,
},
},
}
oldRoot.parent = t.root
splitRoot.parent = t.root
}
}
func (t *Rtree) bulkLoad(features []Feature) {
n := len(features)
objs := make([]*object, n)
for i, feature := range features {
objs[i] = &object{
mbr: feature.Mbr(),
feature: feature,
}
}
t.root.leaf = false
t.size = int32(n)
t.height = int8(math.Ceil(math.Log(float64(n)) / float64(math.Log(float64(t.fan)))))
t.root.level = t.height
nsub := int(math.Pow(float64(t.fan), float64(t.height-1)))
s := int(math.Floor(math.Sqrt(math.Ceil(float64(n) / float64(nsub)))))
sortByDim(0, objs)
t.root.objs = make([]*object, s)
for i, part := range splitInS(s, objs) {
node := t.omt(t.root.level-1, part, t.fan)
node.parent = t.root
t.root.objs[i] = &object{
mbr: node.computeMbr(),
node: node,
}
}
}
func (t *Rtree) omt(level int8, objs []*object, m int) *node {
if len(objs) <= m {
return &node{
leaf: true,
objs: objs,
level: level,
}
}
sortByDim(int(t.height-level)%t.dim, objs)
n := &node{
level: level,
objs: make([]*object, 0, m),
}
for _, part := range splitByM(m, objs) {
node := t.omt(level-1, part, m)
node.parent = n
n.objs = append(n.objs, &object{
mbr: node.computeMbr(),
node: node,
})
}
return n
}
func (t *Rtree) chooseNode(n *node, obj *object, level int8) *node {
if n.leaf || n.level == level {
return n
}
diff := math.MaxFloat64
var chosen *object
for _, en := range n.objs {
mbr := MergeMbrs(en.mbr, obj.mbr)
d := mbr.size() - en.mbr.size()
if d < diff || (d == diff && en.mbr.size() < chosen.mbr.size()) {
diff = d
chosen = en
}
}
return t.chooseNode(chosen.node, obj, level)
}
func (t *Rtree) adjustTree(n, nn *node) (*node, *node) {
if n == t.root {
return n, nn
}
en := n.getObject()
en.mbr = n.computeMbr()
if nn == nil {
return t.adjustTree(n.parent, nil)
}
enn := &object{
mbr: nn.computeMbr(),
node: nn,
feature: nil,
}
n.parent.objs = append(n.parent.objs, enn)
if len(n.parent.objs) > t.fan {
return t.adjustTree(n.parent.split(t.halfFan))
}
return t.adjustTree(n.parent, nil)
}
func (t *Rtree) Search(mbr Mbr) []Feature {
return t.searchIntersect([]Feature{}, t.root, mbr)
}
func (t *Rtree) searchIntersect(results []Feature, n *node, mbr Mbr) []Feature {
for _, e := range n.objs {
if !mbr.Intersects(e.mbr) {
continue
}
if !n.leaf {
results = t.searchIntersect(results, e.node, mbr)
continue
}
results = append(results, e.feature)
}
return results
}
func (t *Rtree) Remove(feature Feature) bool {
n := t.findLeaf(t.root, feature)
if n == nil {
return false
}
ind := -1
for i, e := range n.objs {
if e.feature.Equals(feature) {
ind = i
}
}
if ind < 0 {
return false
}
n.objs = append(n.objs[:ind], n.objs[ind+1:]...)
t.condenseTree(n)
t.size--
if !t.root.leaf && len(t.root.objs) == 1 {
t.root = t.root.objs[0].node
}
t.height = t.root.level
return true
}
func (t *Rtree) findLeaf(n *node, feature Feature) *node {
if n.leaf {
return n
}
for _, e := range n.objs {
if e.mbr.Contains(feature.Mbr()) {
leaf := t.findLeaf(e.node, feature)
if leaf == nil {
continue
}
for _, leafEntry := range leaf.objs {
if leafEntry.feature.Equals(feature) {
return leaf
}
}
}
}
return nil
}
func (t *Rtree) condenseTree(n *node) {
deleted := []*node{}
for n != t.root {
if len(n.objs) < t.halfFan {
objs := []*object{}
for _, obj := range n.parent.objs {
if obj.node != n {
objs = append(objs, obj)
}
}
n.parent.objs = objs
if len(n.objs) > 0 {
deleted = append(deleted, n)
}
} else {
n.getObject().mbr = n.computeMbr()
}
n = n.parent
}
for _, node := range deleted {
obj := &object{
mbr: node.computeMbr(),
node: node,
feature: nil,
}
t.insertObj(obj, node.level+1)
}
}
type node struct {
parent *node
leaf bool
objs []*object
level int8
}
func (n *node) getObject() *object {
var e *object
for i := range n.parent.objs {
if n.parent.objs[i].node == n {
e = n.parent.objs[i]
break
}
}
return e
}
func (n *node) computeMbr() Mbr {
mbrs := make([]Mbr, len(n.objs))
for i, obj := range n.objs {
mbrs[i] = obj.mbr
}
return MergeMbrs(mbrs...)
}
func (n *node) split(minGroupSize int) (left, right *node) {
l, r := n.pickSeeds()
leftSeed, rightSeed := n.objs[l], n.objs[r]
remaining := append(n.objs[:l], n.objs[l+1:r]...)
remaining = append(remaining, n.objs[r+1:]...)
left = n
left.objs = []*object{leftSeed}
right = &node{
parent: n.parent,
leaf: n.leaf,
level: n.level,
objs: []*object{rightSeed},
}
// TODO
if rightSeed.node != nil {
rightSeed.node.parent = right
}
if leftSeed.node != nil {
leftSeed.node.parent = left
}
for len(remaining) > 0 {
next := pickNext(left, right, remaining)
e := remaining[next]
if len(remaining)+len(left.objs) <= minGroupSize {
assign(e, left)
} else if len(remaining)+len(right.objs) <= minGroupSize {
assign(e, right)
} else {
assignGroup(e, left, right)
}
remaining = append(remaining[:next], remaining[next+1:]...)
}
return
}
func (n *node) pickSeeds() (int, int) {
left, right := 0, 1
maxWastedSpace := -1.0
for i, obj1 := range n.objs {
for j, obj2 := range n.objs[i+1:] {
d := MergeMbrs(obj1.mbr, obj2.mbr).size() - obj1.mbr.size() - obj2.mbr.size()
if d > maxWastedSpace {
maxWastedSpace = d
left, right = i, j+i+1
}
}
}
return left, right
}
func pickNext(left *node, right *node, objs []*object) (next int) {
maxDiff := -1.0
leftMbr := left.computeMbr()
rightMbr := right.computeMbr()
for i, obj := range objs {
d1 := MergeMbrs(leftMbr, obj.mbr).size() - leftMbr.size()
d2 := MergeMbrs(rightMbr, obj.mbr).size() - rightMbr.size()
d := math.Abs(d1 - d2)
if d > maxDiff {
maxDiff = d
next = i
}
}
return
}
type object struct {
mbr Mbr
node *node
feature Feature
}
type dimSorter struct {
dim int
objs []*object
}
func (s *dimSorter) Len() int {
return len(s.objs)
}
func (s *dimSorter) Swap(i, j int) {
s.objs[i], s.objs[j] = s.objs[j], s.objs[i]
}
func (s *dimSorter) Less(i, j int) bool {
m1 := s.objs[i].mbr
m2 := s.objs[j].mbr
switch m1.Type() {
case MbrTypeInt32:
a, aok := m1.(*MbrInt32)
b, bok := m2.(*MbrInt32)
if aok && bok {
return (*a)[s.dim*2] < (*b)[s.dim*2]
}
case MbrTypeFloat64:
a, aok := m1.(*MbrFloat64)
b, bok := m2.(*MbrFloat64)
if aok && bok {
return a.mins[s.dim] < b.mins[s.dim]
}
}
return false
}
// splitByM splits objects into slices of maximum m objects.
// Split 10 in to 3 will yield 3 + 3 + 3 + 1
func splitByM(m int, objs []*object) [][]*object {
perSlice := len(objs) / m
numSlices := m
if len(objs)%m != 0 {
numSlices++
}
split := make([][]*object, numSlices)
for i := 0; i < numSlices; i++ {
if i == numSlices-1 {
split[i] = objs[i*perSlice:]
break
}
split[i] = objs[i*perSlice : i*perSlice+perSlice]
}
return split
}
func splitInS(s int, objs []*object) [][]*object {
split := splitByM(s, objs)
if len(split) < 2 {
return split
}
last := split[len(split)-1]
secondLast := split[len(split)-2]
if len(last) < len(secondLast) {
merged := append(secondLast, last...)
split = split[:len(split)-1]
split[len(split)-1] = merged
}
return split
}
func sortByDim(dim int, objs []*object) {
sort.Sort(&dimSorter{dim, objs})
}
func assign(obj *object, group *node) {
if obj.node != nil {
obj.node.parent = group
}
group.objs = append(group.objs, obj)
}
func assignGroup(obj *object, left, right *node) {
leftMbr := left.computeMbr()
rightMbr := right.computeMbr()
leftEnlarged := MergeMbrs(leftMbr, obj.mbr)
rightEnlarged := MergeMbrs(rightMbr, obj.mbr)
leftDiff := leftEnlarged.size() - leftMbr.size()
rightDiff := rightEnlarged.size() - rightMbr.size()
if diff := leftDiff - rightDiff; diff < 0 {
assign(obj, left)
return
} else if diff > 0 {
assign(obj, right)
return
}
if diff := leftMbr.size() - rightMbr.size(); diff < 0 {
assign(obj, left)
return
} else if diff > 0 {
assign(obj, right)
return
}
if diff := len(left.objs) - len(right.objs); diff <= 0 {
assign(obj, left)
return
}
assign(obj, right)
} | rtree.go | 0.587943 | 0.464476 | rtree.go | starcoder |
package channels
// BatchingChannel implements the Channel interface, with the change that instead of producing individual elements
// on Out(), it batches together the entire internal buffer each time. Trying to construct an unbuffered batching channel
// will panic, that configuration is not supported (and provides no benefit over an unbuffered NativeChannel).
type BatchingChannel struct {
input, output chan interface{}
length chan int
buffer []interface{}
size BufferCap
}
func NewBatchingChannel(size BufferCap) *BatchingChannel {
if size == None {
panic("channels: BatchingChannel does not support unbuffered behaviour")
}
if size < 0 && size != Infinity {
panic("channels: invalid negative size in NewBatchingChannel")
}
ch := &BatchingChannel{
input: make(chan interface{}),
output: make(chan interface{}),
length: make(chan int),
size: size,
}
go ch.batchingBuffer()
return ch
}
func (ch *BatchingChannel) In() chan<- interface{} {
return ch.input
}
// Out returns a <-chan interface{} in order that BatchingChannel conforms to the standard Channel interface provided
// by this package, however each output value is guaranteed to be of type []interface{} - a slice collecting the most
// recent batch of values sent on the In channel. The slice is guaranteed to not be empty or nil. In practice the net
// result is that you need an additional type assertion to access the underlying values.
func (ch *BatchingChannel) Out() <-chan interface{} {
return ch.output
}
func (ch *BatchingChannel) Len() int {
return <-ch.length
}
func (ch *BatchingChannel) Cap() BufferCap {
return ch.size
}
func (ch *BatchingChannel) Close() {
close(ch.input)
}
func (ch *BatchingChannel) batchingBuffer() {
var input, output, nextInput chan interface{}
nextInput = ch.input
input = nextInput
for input != nil || output != nil {
select {
case elem, open := <-input:
if open {
ch.buffer = append(ch.buffer, elem)
} else {
input = nil
nextInput = nil
}
case output <- ch.buffer:
ch.buffer = nil
case ch.length <- len(ch.buffer):
}
if len(ch.buffer) == 0 {
input = nextInput
output = nil
} else if ch.size != Infinity && len(ch.buffer) >= int(ch.size) {
input = nil
output = ch.output
} else {
input = nextInput
output = ch.output
}
}
close(ch.output)
close(ch.length)
} | vendor/github.com/eapache/channels/batching_channel.go | 0.826432 | 0.54056 | batching_channel.go | starcoder |
// Package cpu provides an emulator for the unnamed time travel control computer/wearable from AoC 2018.
package cpu
import (
"fmt"
)
// Op represents one of the device's 16 opcodes.
type Op int
const (
// AddR (add register), rC = rA + rB.
AddR Op = iota
// AddI (add immediate), rC = rA + #B.
AddI
// MulR (multiply register), rC = rA * rB.
MulR
// MulI (multiply immediate), rC = rA * #B.
MulI
// BanR (bitwise AND register), rC = rA & rB.
BanR
// BanI (bitwise AND immediate), rC = rA & #B.
BanI
// BorR (bitwise OR register), rC = rA | rB.
BorR
// BorI (bitwise OR immediate), rC = rA | #B.
BorI
// SetR (set register), rC = rA, B ignored.
SetR
// SetI (set immediate), rC = #A, B ignored.
SetI
// GtIR (greater-than immediate/register), rC = #A > rB.
GtIR
// GtRI (greater-than register/immediate), rC = rA > #B.
GtRI
// GtRR (greater-than register/register), rC = rA > rB.
GtRR
// EqIR (equal immediate/register), rC = #A == rB.
EqIR
// EqRI (equal register/immediate), rC = rA == #B.
EqRI
// EqRR (equal register/register), rC = rA == rB.
EqRR
// FirstOp is the numerically first opcode; you can iterate from it to LastOp.
FirstOp = AddR
// LastOp is the numerically last opcode; you can iterate to it from FirstOp.
LastOp = EqRR
)
var (
opToName = [...]string{
AddR: "addr", AddI: "addi", MulR: "mulr", MulI: "muli", BanR: "banr", BanI: "bani", BorR: "borr", BorI: "bori",
SetR: "setr", SetI: "seti", GtIR: "gtir", GtRI: "gtri", GtRR: "gtrr", EqIR: "eqir", EqRI: "eqri", EqRR: "eqrr",
}
nameToOp map[string]Op
)
func init() {
nameToOp = make(map[string]Op)
for op := FirstOp; op <= LastOp; op++ {
nameToOp[opToName[op]] = op
}
}
func (op Op) String() string {
return opToName[op]
}
// OpNamed returns the opcode with the given mnemonic, or false as the second argument if the mnemonic is not valid.
func OpNamed(s string) (op Op, ok bool) {
if op, ok = nameToOp[s]; ok {
return op, ok
}
return 0, false
}
// Inst represents a single instruction: an opcode, and the A, B and C operands.
type Inst struct {
Op Op
A, B, C int
}
// ParseInst converts the textual format of an instruction (e.g., "seti 5 0 1") to an instruction.
func ParseInst(s string) (i Inst, ok bool) {
var op string
if _, err := fmt.Sscanf(s, "%s %d %d %d", &op, &i.A, &i.B, &i.C); err != nil {
return Inst{}, false
} else if i.Op, ok = OpNamed(op); !ok {
return Inst{}, false
}
return i, true
}
// Prog represents an entire program: a sequence of instructions, together with IP binding instructions.
type Prog struct {
Code []Inst
IPBound bool
IPR int
}
// ParseProg reads the text of a program (one symbolic instruction per line, plus an optional IP binding header).
func ParseProg(lines []string) (p Prog, err error) {
if len(lines) == 0 {
return Prog{}, nil
}
if _, err := fmt.Sscanf(lines[0], "#ip %d", &p.IPR); err == nil {
p.IPBound = true
lines = lines[1:]
}
p.Code = make([]Inst, len(lines))
for i, line := range lines {
var ok bool
p.Code[i], ok = ParseInst(line)
if !ok {
return Prog{}, fmt.Errorf("invalid instruction: %s", line)
}
}
return p, nil
}
// State holds the entire CPU state: 6 registers and the instruction pointer,
// as well as the current IP/register binding state.
type State struct {
R [6]int
IP int
IPBound bool
IPR int
}
// Run executes an entire program until it halts. The IP binding state is reset to be that of the program,
// but the IP (and other registers) are itself not reset to zero.
func (s *State) Run(p Prog) {
s.IPBound, s.IPR = p.IPBound, p.IPR
for s.IP >= 0 && s.IP < len(p.Code) {
i := p.Code[s.IP]
//fmt.Printf("ip=%d %v %v %d %d %d", s.IP, s.R, i.Op, i.A, i.B, i.C)
s.Step(i.Op, i.A, i.B, i.C)
//fmt.Printf(" %v\n", s.R)
}
}
// Step executes one CPU cycle, given the operation to execute.
func (s *State) Step(op Op, a, b, c int) {
if s.IPBound {
s.R[s.IPR] = s.IP
}
switch op {
case AddR:
s.R[c] = s.R[a] + s.R[b]
case AddI:
s.R[c] = s.R[a] + b
case MulR:
s.R[c] = s.R[a] * s.R[b]
case MulI:
s.R[c] = s.R[a] * b
case BanR:
s.R[c] = s.R[a] & s.R[b]
case BanI:
s.R[c] = s.R[a] & b
case BorR:
s.R[c] = s.R[a] | s.R[b]
case BorI:
s.R[c] = s.R[a] | b
case SetR:
s.R[c] = s.R[a]
case SetI:
s.R[c] = a
case GtIR:
s.R[c] = asInt(a > s.R[b])
case GtRI:
s.R[c] = asInt(s.R[a] > b)
case GtRR:
s.R[c] = asInt(s.R[a] > s.R[b])
case EqIR:
s.R[c] = asInt(a == s.R[b])
case EqRI:
s.R[c] = asInt(s.R[a] == b)
case EqRR:
s.R[c] = asInt(s.R[a] == s.R[b])
}
if s.IPBound {
s.IP = s.R[s.IPR]
}
s.IP++
}
func asInt(b bool) int {
if b {
return 1
}
return 0
} | 2018/cpu/cpu.go | 0.587233 | 0.413536 | cpu.go | starcoder |
package wiki
import (
"sort"
"strings"
"time"
)
// Sortable is the interface that allows quiki to sort wiki resources.
type Sortable interface {
SortInfo() SortInfo
}
// SortInfo is the data returned from Sortable items for sorting wiki resources.
type SortInfo struct {
Title string
Author string
Created time.Time
Modified time.Time
Dimensions []int
}
// SortFunc is a type for functions that can sort items.
type SortFunc func(p, q Sortable) bool
// SortTitle is a SortFunc for sorting items alphabetically by title.
func SortTitle(p, q Sortable) bool {
return strings.ToLower(p.SortInfo().Title) < strings.ToLower(q.SortInfo().Title)
}
// SortAuthor is a SortFunc for sorting items alphabetically by author.
func SortAuthor(p, q Sortable) bool {
return strings.ToLower(p.SortInfo().Author) < strings.ToLower(q.SortInfo().Author)
}
// SortCreated is a SortFunc for sorting items by creation time.
func SortCreated(p, q Sortable) bool {
return p.SortInfo().Created.Before(q.SortInfo().Created)
}
// SortModified is a SortFunc for sorting items by modification time.
func SortModified(p, q Sortable) bool {
return p.SortInfo().Modified.Before(q.SortInfo().Modified)
}
// SortDimensions is a SortFunc for sorting images by their dimensions.
func SortDimensions(p, q Sortable) bool {
d1, d2 := p.SortInfo().Dimensions, q.SortInfo().Dimensions
if d1 == nil || d2 == nil {
return false
}
product1, product2 := d1[0]*d1[1], d2[0]*d2[1]
return product1 < product2
}
// itemSorter implements the Sort interface, sorting the changes within.
type itemSorter struct {
items []Sortable
less []SortFunc
}
// Sort sorts the argument slice according to the less functions passed to itemsOrderedBy.
func (ps *itemSorter) Sort(items []Sortable) {
ps.items = items
sort.Sort(ps)
}
// sorter returns a Sorter that sorts using the less functions, in order.
// Call its Sort method to sort the data.
func sorter(items []Sortable, less ...SortFunc) *itemSorter {
return &itemSorter{items, less}
}
// Len is part of sort.Interface.
func (ps *itemSorter) Len() int {
return len(ps.items)
}
// Swap is part of sort.Interface.
func (ps *itemSorter) Swap(i, j int) {
ps.items[i], ps.items[j] = ps.items[j], ps.items[i]
}
// Less is part of sort.Interface. It is implemented by looping along the
// less functions until it finds a comparison that discriminates between
// the two items (one is less than the other). Note that it can call the
// less functions twice per call. We could change the functions to return
// -1, 0, 1 and reduce the number of calls for greater efficiency: an
// exercise for the reader.
func (ps *itemSorter) Less(i, j int) bool {
p, q := ps.items[i], ps.items[j]
// Try all but the last comparison.
var k int
for k = 0; k < len(ps.less)-1; k++ {
less := ps.less[k]
switch {
case less(p, q):
// p < q, so we have a decision.
return true
case less(q, p):
// p > q, so we have a decision.
return false
}
// p == q; try the next comparison.
}
// All comparisons to here said "equal", so just return whatever
// the final comparison reports.
return ps.less[k](p, q)
} | wiki/sort.go | 0.630799 | 0.438004 | sort.go | starcoder |
package cmdapm
const (
apmCreateLong = `Creates an APM deployment, limitting the creation scope to APM resources.
There are a few ways to create an APM deployment, sane default values are provided, making
the command work out of the box even when no parameters are set. When version is not specified,
the matching elasticsearch deployment version will be used. These are the available options:
* Simplified flags: --zones <zone count> --size <node memory in MB>
* File definition: --file=<file path> (shorthand: -f). The definition can be found in:
https://www.elastic.co/guide/en/cloud-enterprise/current/definitions.html#ApmPayload
As an option, "--generate-payload" can be used in order to obtain the generated ApmPayload
that would be sent as a request, save it, update or extend the topology and create an Apm
deployment using the saved payload with the "--file" flag.`
apmCreateExample = `## Create a single APM server. The command will exit after the API response has been returned,
## without waiting until the deployment resources have been created. To make the command wait until
the resources have been created use the "--track" flag.
$ ecctl deployment apm create --id=a57f8b7ce54c4afb90ce3755d1e94000 --track
{
"id": "a57f8b7ce54c4afb90ce3755d1e94000",
"name": "a57f8b7ce54c4afb90ce3755d1e94000",
"resources": [
{
"elasticsearch_cluster_ref_id": "elasticsearch",
"id": "53d104a432a648f68ec76d52ecb521d5",
"kind": "apm",
"ref_id": "apm",
"region": "ece-region"
},
{
"elasticsearch_cluster_ref_id": "elasticsearch",
"id": "39e4a65fc2b14651b666aaff18a13b8f",
"kind": "kibana",
"ref_id": "kibana",
"region": "ece-region"
},
{
"cloud_id": "a57f8b7ce54c4afb90ce3755d1e94000:MTkyLjE2OC40NC4xMC5pcC5lcy5pbzo5MjQzJGQzODIwOWU4ZTYwYzRlYTliY2UzMDc1OThhMTljNGI3JDM5ZTRhNjVmYzJiMTQ2NTFiNjY2YWFmZjE4YTEzYjhm",
"id": "d38209e8e60c4ea9bce307598a19c4b7",
"kind": "elasticsearch",
"ref_id": "elasticsearch",
"region": "ece-region"
}
]
}
Cluster [53d104a432a648f68ec76d52ecb521d5][Apm]: running step "wait-until-running" (Plan duration 1.38505959s)...
Cluster [39e4a65fc2b14651b666aaff18a13b8f][Kibana]: finished running all the plan steps (Total plan duration: 1.73493053s)
Cluster [d38209e8e60c4ea9bce307598a19c4b7][Elasticsearch]: finished running all the plan steps (Total plan duration: 1.849794895s)
Cluster [53d104a432a648f68ec76d52ecb521d5][Apm]: running step "set-maintenance" (Plan duration 11.162178491s)...
Cluster [53d104a432a648f68ec76d52ecb521d5][Apm]: finished running all the plan steps (Total plan duration: 16.677195277s)
## Save the definition to a file for later use.
$ ecctl deployment apm create --generate-payload --id a57f8b7ce54c4afb90ce3755d1e94000 --zones 2 --size 2048 > apm_create_example.json
## Create the deployment piping through the file contents tracking the creation progress
$ cat apm_create_example.json | dev-cli deployment apm create --track --id a57f8b7ce54c4afb90ce3755d1e94000
[...]`
) | cmd/deployment/apm/create_help.go | 0.700792 | 0.453867 | create_help.go | starcoder |
package script
import (
"go/ast"
"go/token"
"reflect"
)
// compiles a binary expression x 'op' y
func (w *World) compileBinaryExpr(n *ast.BinaryExpr) Expr {
switch n.Op {
default:
panic(err(n.Pos(), "not allowed:", n.Op))
case token.ADD:
return &add{w.newBinExpr(n)}
case token.SUB:
return &sub{w.newBinExpr(n)}
case token.MUL:
return &mul{w.newBinExpr(n)}
case token.QUO:
return &quo{w.newBinExpr(n)}
case token.LSS:
return &lss{w.newComp(n)}
case token.GTR:
return >r{w.newComp(n)}
case token.LEQ:
return &leq{w.newComp(n)}
case token.GEQ:
return &geq{w.newComp(n)}
case token.EQL:
return &eql{w.newComp(n)}
case token.NEQ:
return &neq{w.newComp(n)}
case token.LAND:
return &and{w.newBoolOp(n)}
case token.LOR:
return &or{w.newBoolOp(n)}
}
}
// abstract superclass for all binary expressions
type binaryExpr struct{ x, y Expr }
func (w *World) newBinExpr(n *ast.BinaryExpr) binaryExpr {
x := typeConv(n.Pos(), w.compileExpr(n.X), float64_t)
y := typeConv(n.Pos(), w.compileExpr(n.Y), float64_t)
return binaryExpr{x, y}
}
func (b *binaryExpr) Type() reflect.Type { return float64_t }
func (b *binaryExpr) Child() []Expr { return []Expr{b.x, b.y} }
type add struct{ binaryExpr }
type sub struct{ binaryExpr }
type mul struct{ binaryExpr }
type quo struct{ binaryExpr }
func (b *add) Eval() interface{} { return b.x.Eval().(float64) + b.y.Eval().(float64) }
func (b *sub) Eval() interface{} { return b.x.Eval().(float64) - b.y.Eval().(float64) }
func (b *mul) Eval() interface{} { return b.x.Eval().(float64) * b.y.Eval().(float64) }
func (b *quo) Eval() interface{} { return b.x.Eval().(float64) / b.y.Eval().(float64) }
type comp binaryExpr
func (w *World) newComp(n *ast.BinaryExpr) comp {
return comp(w.newBinExpr(n))
}
func (b *comp) Type() reflect.Type { return bool_t }
func (b *comp) Child() []Expr { return []Expr{b.x, b.y} }
type lss struct{ comp }
type gtr struct{ comp }
type leq struct{ comp }
type geq struct{ comp }
type eql struct{ comp }
type neq struct{ comp }
func (b *lss) Eval() interface{} { return b.x.Eval().(float64) < b.y.Eval().(float64) }
func (b *gtr) Eval() interface{} { return b.x.Eval().(float64) > b.y.Eval().(float64) }
func (b *leq) Eval() interface{} { return b.x.Eval().(float64) <= b.y.Eval().(float64) }
func (b *geq) Eval() interface{} { return b.x.Eval().(float64) >= b.y.Eval().(float64) }
func (b *eql) Eval() interface{} { return b.x.Eval().(float64) == b.y.Eval().(float64) }
func (b *neq) Eval() interface{} { return b.x.Eval().(float64) != b.y.Eval().(float64) }
type boolOp struct{ x, y Expr }
func (w *World) newBoolOp(n *ast.BinaryExpr) boolOp {
x := typeConv(n.Pos(), w.compileExpr(n.X), bool_t)
y := typeConv(n.Pos(), w.compileExpr(n.Y), bool_t)
return boolOp{x, y}
}
func (b *boolOp) Child() []Expr { return []Expr{b.x, b.y} }
func (b *boolOp) Type() reflect.Type { return bool_t }
type and struct{ boolOp }
type or struct{ boolOp }
func (b *and) Eval() interface{} { return b.x.Eval().(bool) && b.y.Eval().(bool) }
func (b *or) Eval() interface{} { return b.x.Eval().(bool) || b.y.Eval().(bool) } | script/binaryexpr.go | 0.599485 | 0.461623 | binaryexpr.go | starcoder |
package strings
import (
"fmt"
"math"
)
// Challenge:
// Implement atoi which converts a string to an integer.
// The function first discards as many whitespace characters as necessary until the first non-whitespace character is found.
// Then, starting from this character, takes an optional initial plus or minus sign followed by as many numerical digits as possible, and interprets them as a numerical value.
// The string can contain additional characters after those that form the integral number, which are ignored and have no effect on the behavior of this function.
// If the first sequence of non-whitespace characters in str is not a valid integral number, or if no such sequence exists because either str is empty or it contains only whitespace characters, no conversion is performed.
// If no valid conversion could be performed, a zero value is returned.
// Note:
// Only the space character ' ' is considered as whitespace character.
// Assume we are dealing with an environment which could only store integers within the 32-bit signed integer range: [−231, 231 − 1].
// If the numerical value is out of the range of representable values, INT_MAX (231 − 1) or INT_MIN (−231) is returned.
//Solution:
// Time complexity is O(n)
func myAtoi(str string) int {
if len(str) == 0 {
return 0
}
//Step1: trim leading space (only ' ')
start := 0
for start < len(str) && str[start] == ' ' {
start++
}
str = str[start:]
if len(str) == 0 {
return 0
}
negative := false
//Step2: test for leading +/-, creates boolean for it
if str[0] == '-' {
negative = true
str = str[1:]
} else if str[0] == '+' {
str = str[1:]
}
//Step3: read digits until they last
end := 0
for i := 0; i < len(str); i++ {
if isNumber(str[i]) {
end++
} else {
break
}
}
// no digits were found
if end == 0 {
return 0
}
str = str[:end]
// Step4: convert and check for overflow
var result int
for cur := 0; cur < len(str); cur++ {
digit := int(str[cur] - '0')
result = result*10 + digit
if result > math.MaxInt32 {
result = math.MaxInt32
if negative {
result++
}
break
}
}
if negative {
return result * -1
}
return result
}
func isNumber(char byte) bool {
return (char >= '0') && (char <= '9')
}
func main() {
// fmt.Println(math.MaxInt32)
// fmt.Println(math.MaxInt64)
// fmt.Println(math.MinInt32)
// fmt.Println(math.MinInt64)
fmt.Println(myAtoi("123")) // valid
fmt.Println(myAtoi("1234567 is a number!")) // valid
fmt.Println(myAtoi(" 1234567 is a number too!")) //valid
fmt.Println(myAtoi("+1234567")) //valid
fmt.Println(myAtoi("-1234567")) //valid
fmt.Println(myAtoi("Number 1234567 is not!")) // invalid
fmt.Println(myAtoi("1234567889897987")) // too big
fmt.Println(myAtoi("-1234567889897987")) // too small
fmt.Println(myAtoi("")) // invalid
fmt.Println(myAtoi(" ")) // invalid
fmt.Println(myAtoi("-91283472332"))
fmt.Println(myAtoi("2147483648"))
fmt.Println(myAtoi("-2147483648"))
fmt.Println(myAtoi("9223372036854775808"))
fmt.Println(myAtoi("-9223372036854775808"))
fmt.Println(myAtoi("-2147483649"))
fmt.Println(myAtoi("-9223372036854775809"))
fmt.Println(myAtoi("-6147483648"))
fmt.Println(myAtoi("-2147483647"))
} | easy/strings/atoi.go | 0.628179 | 0.555616 | atoi.go | starcoder |
package util
import (
"github.com/ElvertMora/quasar-fire-app/controllers/request"
"math"
)
var Satellites = []string{"Kenobi", "Skywalker", "Sato"}
func GetMapDistances(request *request.SatellitesRequest) map[string]float64 {
mapSatellites := map[string]float64{}
for _, satellite := range request.Satellites {
if SatelliteExist(satellite.Name) {
mapSatellites[satellite.Name] = satellite.Distance
}
}
return mapSatellites
}
func GetMapDistancesFromSatellitesRecord(satellites []*request.Satellite) map[string]float64 {
mapSatellites := map[string]float64{}
for _, satellite := range satellites {
if SatelliteExist(satellite.Name) {
mapSatellites[satellite.Name] = satellite.Distance
}
}
return mapSatellites
}
func GetCoordinatesSatellites(satellites []*request.Satellite) map[string]Point {
mapCoordinates := map[string]Point{}
for _, satellite := range satellites {
if SatelliteExist(satellite.Name) {
mapCoordinates[satellite.Name] = Point{X: satellite.X, Y: satellite.Y}
}
}
return mapCoordinates
}
func SatelliteExist(e string) bool {
for _, a := range Satellites {
if a == e {
return true
}
}
return false
}
func GetPart(message1, message2 []string) []string {
tmp := make([]string, len(message1))
for i := range message1 {
tmp[i] = selectMessage(message1, message2, i)
}
return tmp
}
func selectMessage(message1 []string, message2 []string, index int) string {
if message1[index] == "" {
return message2[index]
}
return message1[index]
}
type Point struct {
X float64
Y float64
}
func norm(p Point) float64 {
return math.Pow(math.Pow(p.X, 2)+math.Pow(p.Y, 2), .5)
}
func Trilateration(point1 Point, point2 Point, point3 Point, r1 float64, r2 float64, r3 float64) (float64, float64) {
//unit vector in a direction from point1 to point 2
p2p1Distance := math.Pow(math.Pow(point2.X-point1.X, 2)+math.Pow(point2.Y-point1.Y, 2), 0.5)
ex := Point{(point2.X - point1.X) / p2p1Distance, (point2.Y - point1.Y) / p2p1Distance}
aux := Point{point3.X - point1.X, point3.Y - point1.Y}
//signed magnitude of the X component
i := ex.X*aux.X + ex.Y*aux.Y
//the unit vector in the y direction.
aux2 := Point{point3.X - point1.X - i*ex.X, point3.Y - point1.Y - i*ex.Y}
ey := Point{aux2.X / norm(aux2), aux2.Y / norm(aux2)}
//the signed magnitude of the y component
j := ey.X*aux.X + ey.Y*aux.Y
//coordinates
x := (math.Pow(r1, 2) - math.Pow(r2, 2) + math.Pow(p2p1Distance, 2)) / (2 * p2p1Distance)
y := (math.Pow(r1, 2)-math.Pow(r3, 2)+math.Pow(i, 2)+math.Pow(j, 2))/(2*j) - i*x/j
//result coordinates
finalX := point1.X + x*ex.X + y*ey.X
finalY := point1.Y + x*ex.Y + y*ey.Y
return finalX, finalY
} | util/util.go | 0.618435 | 0.501282 | util.go | starcoder |
package gocudnn
/*
#include <cudnn.h>
*/
import "C"
import (
"runtime"
"unsafe"
"github.com/dereklstinson/cutil"
)
//SpatialTransformerD holdes the spatial descriptor
type SpatialTransformerD struct {
descriptor C.cudnnSpatialTransformerDescriptor_t
dims C.int
gogc bool
}
//GridGeneratorForward This function generates a grid of coordinates in the input tensor corresponding to each pixel from the output tensor.
func (s *SpatialTransformerD) GridGeneratorForward(
handle *Handle,
theta cutil.Mem, //Input. Affine transformation matrix. It should be of size n*2*3 for a 2d transformation, n is the number of images.
grid cutil.Mem, /*Output. A grid of coordinates. It is of size n*h*w*2 for a 2d transformation, where n,
h, w is specified in stDesc . In the 4th dimension, the first coordinate is x, and the
second coordinate is y*/
) error {
if handle.w != nil {
return handle.w.Work(func() error {
return Status(C.cudnnSpatialTfGridGeneratorForward(
handle.x,
s.descriptor,
theta.Ptr(),
grid.Ptr(),
)).error("(s *SpatialTransformerD) GridGeneratorForward")
})
}
return Status(C.cudnnSpatialTfGridGeneratorForward(
handle.x,
s.descriptor,
theta.Ptr(),
grid.Ptr(),
)).error("(s *SpatialTransformerD) GridGeneratorForward")
}
//GridGeneratorForwardUS is like GridGeneratorForward but uses unsafe.Pointer instead of cutil.Mem
func (s *SpatialTransformerD) GridGeneratorForwardUS(
handle *Handle,
theta unsafe.Pointer, //Input. Affine transformation matrix. It should be of size n*2*3 for a 2d transformation, n is the number of images.
grid unsafe.Pointer, /*Output. A grid of coordinates. It is of size n*h*w*2 for a 2d transformation, where n,
h, w is specified in stDesc . In the 4th dimension, the first coordinate is x, and the
second coordinate is y*/
) error {
if handle.w != nil {
return handle.w.Work(func() error {
return Status(C.cudnnSpatialTfGridGeneratorForward(
handle.x,
s.descriptor,
theta,
grid,
)).error("(s *SpatialTransformerD) GridGeneratorForwardUS")
})
}
return Status(C.cudnnSpatialTfGridGeneratorForward(
handle.x,
s.descriptor,
theta,
grid,
)).error("(s *SpatialTransformerD) GridGeneratorForwardUS")
}
//GridGeneratorBackward - This function generates a grid of coordinates in the input tensor corresponding to each pixel from the output tensor.
func (s *SpatialTransformerD) GridGeneratorBackward(
handle *Handle,
grid cutil.Mem,
theta cutil.Mem,
) error {
if handle.w != nil {
return handle.w.Work(func() error {
return Status(C.cudnnSpatialTfGridGeneratorBackward(
handle.x,
s.descriptor,
grid.Ptr(),
theta.Ptr(),
)).error("(s *SpatialTransformerD) GridGeneratorBackward")
})
}
return Status(C.cudnnSpatialTfGridGeneratorBackward(
handle.x,
s.descriptor,
grid.Ptr(),
theta.Ptr(),
)).error("(s *SpatialTransformerD) GridGeneratorBackward")
}
//GridGeneratorBackwardUS is like GridGeneratorBackward but uses unsafe.Pointer instead of cutil.Mem
func (s *SpatialTransformerD) GridGeneratorBackwardUS(
handle *Handle,
grid unsafe.Pointer,
theta unsafe.Pointer,
) error {
if handle.w != nil {
return handle.w.Work(func() error {
return Status(C.cudnnSpatialTfGridGeneratorBackward(
handle.x,
s.descriptor,
grid,
theta,
)).error("(s *SpatialTransformerD) GridGeneratorBackwardUS(")
})
}
return Status(C.cudnnSpatialTfGridGeneratorBackward(
handle.x,
s.descriptor,
grid,
theta,
)).error("(s *SpatialTransformerD) GridGeneratorBackwardUS(")
}
//SamplerForward performs the spatialtfsampleforward
func (s *SpatialTransformerD) SamplerForward(
handle *Handle,
alpha float64,
xD *TensorD, x cutil.Mem,
grid cutil.Mem,
beta float64,
yD *TensorD, y cutil.Mem,
) error {
a := cscalarbydatatype(xD.dtype, alpha)
b := cscalarbydatatype(yD.dtype, beta)
if handle.w != nil {
return handle.w.Work(func() error {
return Status(C.cudnnSpatialTfSamplerForward(
handle.x,
s.descriptor,
a.CPtr(),
xD.descriptor,
x.Ptr(),
grid.Ptr(),
b.CPtr(),
yD.descriptor,
y.Ptr(),
)).error("(s *SpatialTransformerD) SamplerForward")
})
}
return Status(C.cudnnSpatialTfSamplerForward(
handle.x,
s.descriptor,
a.CPtr(),
xD.descriptor,
x.Ptr(),
grid.Ptr(),
b.CPtr(),
yD.descriptor,
y.Ptr(),
)).error("(s *SpatialTransformerD) SamplerForward")
}
//SamplerForwardUS is like SamplerForward but uses unsafe.Pointer instead of cutil.Mem
func (s *SpatialTransformerD) SamplerForwardUS(
handle *Handle,
alpha float64,
xD *TensorD, x unsafe.Pointer,
grid unsafe.Pointer,
beta float64,
yD *TensorD, y unsafe.Pointer,
) error {
a := cscalarbydatatype(xD.dtype, alpha)
b := cscalarbydatatype(yD.dtype, beta)
if handle.w != nil {
return handle.w.Work(func() error {
return Status(C.cudnnSpatialTfSamplerForward(
handle.x,
s.descriptor,
a.CPtr(),
xD.descriptor, x,
grid,
b.CPtr(),
yD.descriptor, y,
)).error("(s *SpatialTransformerD) SamplerForwardUS")
})
}
return Status(C.cudnnSpatialTfSamplerForward(
handle.x,
s.descriptor,
a.CPtr(),
xD.descriptor, x,
grid,
b.CPtr(),
yD.descriptor, y,
)).error("(s *SpatialTransformerD) SamplerForwardUS")
}
//SamplerBackward does the spatial Tranform Sample Backward
func (s *SpatialTransformerD) SamplerBackward(
handle *Handle,
alpha float64,
xD *TensorD, x cutil.Mem,
beta float64,
dxD *TensorD, dx cutil.Mem,
alphaDgrid float64,
dyD *TensorD, dy cutil.Mem,
grid cutil.Mem,
betaDgrid float64,
dGrid cutil.Mem,
) error {
a := cscalarbydatatype(dyD.dtype, alpha)
b := cscalarbydatatype(dxD.dtype, beta)
ad := cscalarbydatatype(xD.dtype, alphaDgrid)
bd := cscalarbydatatype(dxD.dtype, betaDgrid)
if handle.w != nil {
return handle.w.Work(func() error {
return Status(C.cudnnSpatialTfSamplerBackward(
handle.x,
s.descriptor,
a.CPtr(),
xD.descriptor, x.Ptr(),
b.CPtr(),
dxD.descriptor, dx.Ptr(),
ad.CPtr(),
dyD.descriptor, dy.Ptr(),
grid.Ptr(),
bd.CPtr(),
dGrid.Ptr(),
)).error("(s *SpatialTransformerD) SamplerBackward")
})
}
return Status(C.cudnnSpatialTfSamplerBackward(
handle.x,
s.descriptor,
a.CPtr(),
xD.descriptor, x.Ptr(),
b.CPtr(),
dxD.descriptor, dx.Ptr(),
ad.CPtr(),
dyD.descriptor, dy.Ptr(),
grid.Ptr(),
bd.CPtr(),
dGrid.Ptr(),
)).error("(s *SpatialTransformerD) SamplerBackward")
}
//SamplerBackwardUS is like SamplerBackward but uses unsafe.Pointer instead of cutil.Mem
func (s *SpatialTransformerD) SamplerBackwardUS(
handle *Handle,
alpha float64,
xD *TensorD, x unsafe.Pointer,
beta float64,
dxD *TensorD, dx unsafe.Pointer,
alphaDgrid float64,
dyD *TensorD, dy unsafe.Pointer,
grid unsafe.Pointer,
betaDgrid float64,
dGrid unsafe.Pointer,
) error {
a := cscalarbydatatype(dyD.dtype, alpha)
b := cscalarbydatatype(dxD.dtype, beta)
ad := cscalarbydatatype(xD.dtype, alphaDgrid)
bd := cscalarbydatatype(dxD.dtype, betaDgrid)
if handle.w != nil {
return handle.w.Work(func() error {
return Status(C.cudnnSpatialTfSamplerBackward(
handle.x,
s.descriptor,
a.CPtr(),
xD.descriptor, x,
b.CPtr(),
dxD.descriptor, dx,
ad.CPtr(),
dyD.descriptor, dy,
grid,
bd.CPtr(),
dGrid,
)).error("(s *SpatialTransformerD) SamplerBackwardUS")
})
}
return Status(C.cudnnSpatialTfSamplerBackward(
handle.x,
s.descriptor,
a.CPtr(),
xD.descriptor, x,
b.CPtr(),
dxD.descriptor, dx,
ad.CPtr(),
dyD.descriptor, dy,
grid,
bd.CPtr(),
dGrid,
)).error("(s *SpatialTransformerD) SamplerBackwardUS")
}
/* APIs for spatial transformer network*/
//SamplerType is used for flags
type SamplerType C.cudnnSamplerType_t
//Bilinear sets s to SamplerType(C.CUDNN_SAMPLER_BILINEAR) and returns new value of s
func (s *SamplerType) Bilinear() SamplerType { *s = SamplerType(C.CUDNN_SAMPLER_BILINEAR); return *s }
func (s SamplerType) String() string {
f := s
var st string
switch s {
case f.Bilinear():
st = "Bilinear"
default:
st = "Unssuported Type"
}
return "SamplerType" + st
}
func (s SamplerType) c() C.cudnnSamplerType_t { return C.cudnnSamplerType_t(s) }
//CreateSpatialTransformerDescriptor creates the spacial tesnor
func CreateSpatialTransformerDescriptor() (*SpatialTransformerD, error) {
x := new(SpatialTransformerD)
err := Status(C.cudnnCreateSpatialTransformerDescriptor(&x.descriptor)).error("CreateSpatialTransformerDescriptor()")
if setfinalizer {
runtime.SetFinalizer(x, cudnnDestroySpatialTransformerDescriptor)
}
return x, err
}
//Set sets spacial to nd descriptor.
func (s *SpatialTransformerD) Set(sampler SamplerType, data DataType, dimA []int32) error {
dims := C.int(len(dimA))
cdimA := int32Tocint(dimA)
return Status(C.cudnnSetSpatialTransformerNdDescriptor(
s.descriptor,
sampler.c(),
data.c(),
dims,
&cdimA[0],
)).error("(s *SpatialTransformerD) Set")
}
//Destroy destroys the spatial Transformer Desctiptor. If GC is enable this function won't delete transformer. It will only return nil
//Since gc is automatically enabled this function is not functional.
func (s *SpatialTransformerD) Destroy() error {
if s.gogc || setfinalizer {
return nil
}
return cudnnDestroySpatialTransformerDescriptor(s)
}
func cudnnDestroySpatialTransformerDescriptor(s *SpatialTransformerD) error {
return Status(C.cudnnDestroySpatialTransformerDescriptor(s.descriptor)).error("DestroyDescriptor")
} | cudnnSpatial.go | 0.717111 | 0.421195 | cudnnSpatial.go | starcoder |
package cp
import (
"math"
"fmt"
)
type BB struct {
L, B, R, T float64
}
func (bb BB) String() string {
return fmt.Sprintf("%v %v %v %v", bb.L, bb.T, bb.R, bb.B)
}
func NewBBForExtents(c Vector, hw, hh float64) BB {
return BB{
L: c.X - hw,
B: c.Y - hh,
R: c.X + hw,
T: c.Y + hh,
}
}
func NewBBForCircle(p Vector, r float64) BB {
return NewBBForExtents(p, r, r)
}
func (a BB) Intersects(b BB) bool {
return a.L <= b.R && b.L <= a.R && a.B <= b.T && b.B <= a.T
}
func (bb BB) Contains(other BB) bool {
return bb.L <= other.L && bb.R >= other.R && bb.B <= other.B && bb.T >= other.T
}
func (bb BB) ContainsVect(v Vector) bool {
return bb.L <= v.X && bb.R >= v.X && bb.B <= v.Y && bb.T >= v.Y
}
func (a BB) Merge(b BB) BB {
return BB{
math.Min(a.L, b.L),
math.Min(a.B, b.B),
math.Max(a.R, b.R),
math.Max(a.T, b.T),
}
}
func (bb BB) Expand(v Vector) BB {
return BB{
math.Min(bb.L, v.X),
math.Min(bb.B, v.Y),
math.Max(bb.R, v.X),
math.Max(bb.T, v.Y),
}
}
func (bb BB) Center() Vector {
return Vector{bb.L, bb.B}.Lerp(Vector{bb.R, bb.T}, 0.5)
}
func (bb BB) Area() float64 {
return (bb.R - bb.L) * (bb.T - bb.B)
}
func (a BB) MergedArea(b BB) float64 {
return (math.Max(a.R, b.R) - math.Min(a.L, b.L)) * (math.Max(a.T, b.T) - math.Min(a.B, b.B))
}
func (bb BB) SegmentQuery(a, b Vector) float64 {
delta := b.Sub(a)
tmin := -INFINITY
tmax := INFINITY
if delta.X == 0 {
if a.X < bb.L || bb.R < a.X {
return INFINITY
}
} else {
t1 := (bb.L - a.X) / delta.X
t2 := (bb.R - a.X) / delta.X
tmin = math.Max(tmin, math.Min(t1, t2))
tmax = math.Min(tmax, math.Max(t1, t2))
}
if delta.Y == 0 {
if a.Y < bb.B || bb.T < a.Y {
return INFINITY
}
} else {
t1 := (bb.B - a.Y) / delta.Y
t2 := (bb.T - a.Y) / delta.Y
tmin = math.Max(tmin, math.Min(t1, t2))
tmax = math.Min(tmax, math.Max(t1, t2))
}
if tmin <= tmax && 0 <= tmax && tmin <= 1.0 {
return math.Max(tmin, 0.0)
} else {
return INFINITY
}
}
func (bb BB) IntersectsSegment(a, b Vector) bool {
return bb.SegmentQuery(a, b) != INFINITY
}
func (bb BB) ClampVect(v *Vector) Vector {
return Vector{Clamp(v.X, bb.L, bb.R), Clamp(v.Y, bb.B, bb.T)}
}
func (bb BB) WrapVect(v Vector) Vector {
dx := math.Abs(bb.R - bb.L)
modx := math.Mod(v.X-bb.L, dx)
var x float64
if modx > 0 {
x = modx
} else {
x = modx + dx
}
dy := math.Abs(bb.T - bb.B)
mody := math.Mod(v.Y-bb.B, dy)
var y float64
if mody > 0 {
y = mody
} else {
y = mody + dy
}
return Vector{x + bb.L, y + bb.B}
}
func (bb BB) Offset(v Vector) BB {
return BB{
bb.L + v.X,
bb.B + v.Y,
bb.R + v.X,
bb.T + v.Y,
}
}
func (a BB) Proximity(b BB) float64 {
return math.Abs(a.L+a.R-b.L-b.R) + math.Abs(a.B+a.T-b.B-b.T)
} | bb.go | 0.810366 | 0.538801 | bb.go | starcoder |
package region
import (
"context"
"math"
"github.com/ironarachne/world/pkg/geometry"
"github.com/ironarachne/world/pkg/random"
)
// Region is a geographic area.
type Region struct {
Description string `json:"description"`
Altitude int `json:"altitude"` // -99-99, 0 is sea level
Humidity int `json:"humidity"` // 0-99
Temperature int `json:"temperature"` // 0-99
NearestOceanDistance int `json:"nearest_ocean_distance"`
NearestOceanDirection int `json:"nearest_ocean_direction"`
NearestMountainsDistance int `json:"nearest_mountains_distance"`
NearestMountainsDirection int `json:"nearest_mountains_direction"`
DistanceToEquator int `json:"distance_to_equator"` // 0 is on equator, -99 is south pole, 99 is north pole
}
// Generate procedurally generates a random region.
func Generate(ctx context.Context) Region {
region := RandomTemperate(ctx)
return region
}
// GenerateSpecific generates a region based on specific characteristics
func GenerateSpecific(ctx context.Context, temperature int, humidity int, altitude int, distance int) Region {
region := Region{}
region.DistanceToEquator = distance
region.Temperature = temperature
region.Humidity = humidity
region.Altitude = altitude
// TODO: Replace the following with real data gleaned from the world
region.NearestOceanDistance = random.Intn(ctx, 100)
region.NearestOceanDirection = geometry.RandomDirection(ctx)
region.NearestMountainsDirection = geometry.OppositeDirection(region.NearestOceanDirection)
region.NearestMountainsDistance = random.Intn(ctx, 100)
region.Description = region.Describe()
return region
}
// RandomTemperate returns a random region that is appropriate for life
func RandomTemperate(ctx context.Context) Region {
region := Region{}
region.DistanceToEquator = random.Intn(ctx, 100) - 50
region.Altitude = random.Intn(ctx, 50) + 10
region.NearestOceanDistance = random.Intn(ctx, 100)
region.NearestOceanDirection = geometry.RandomDirection(ctx)
region.NearestMountainsDirection = geometry.OppositeDirection(region.NearestOceanDirection)
region.NearestMountainsDistance = random.Intn(ctx, 100)
region.Temperature = GetTemperature(region.DistanceToEquator, region.Altitude)
region.Humidity = GetHumidity(region.Altitude, region.NearestOceanDistance)
region.Description = region.Describe()
return region
}
// GetHumidity calculates a region's humidity based on its altitude and its distance from the nearest ocean
func GetHumidity(altitude int, oceanDistance int) int {
if oceanDistance == 0 {
return 100
}
humidity := 100 - (altitude / 2) - (oceanDistance / 2)
if humidity > 100 {
humidity = 100
}
if humidity < 0 {
humidity = 0
}
return humidity
}
// GetTemperature calculates a temperature for a region given its distance from the equator and its altitude
func GetTemperature(distanceToEquator int, altitude int) int {
temperature := 100 - int(math.Abs(float64(distanceToEquator))) - (altitude / 2)
if temperature < 0 {
temperature = 0
}
if temperature > 99 {
temperature = 99
}
return temperature
} | pkg/geography/region/region.go | 0.792183 | 0.491029 | region.go | starcoder |
package fetch
import (
"strconv"
flatbuffers "github.com/google/flatbuffers/go"
)
type Kind int8
const (
KindUNKNOWN Kind = 0
KindNUMERIC Kind = 1
KindHIST Kind = 2
KindHIST_CUMULATIVE Kind = 3
KindTEXT Kind = 4
)
var EnumNamesKind = map[Kind]string{
KindUNKNOWN: "UNKNOWN",
KindNUMERIC: "NUMERIC",
KindHIST: "HIST",
KindHIST_CUMULATIVE: "HIST_CUMULATIVE",
KindTEXT: "TEXT",
}
var EnumValuesKind = map[string]Kind{
"UNKNOWN": KindUNKNOWN,
"NUMERIC": KindNUMERIC,
"HIST": KindHIST,
"HIST_CUMULATIVE": KindHIST_CUMULATIVE,
"TEXT": KindTEXT,
}
func (v Kind) String() string {
if s, ok := EnumNamesKind[v]; ok {
return s
}
return "Kind(" + strconv.FormatInt(int64(v), 10) + ")"
}
type SeriesT struct {
Type Series
Value interface{}
}
func SeriesPack(builder *flatbuffers.Builder, t *SeriesT) flatbuffers.UOffsetT {
if t == nil {
return 0
}
switch t.Type {
case SeriesNumericSeries:
return NumericSeriesPack(builder, t.Value.(*NumericSeriesT))
case SeriesHistSeries:
return HistSeriesPack(builder, t.Value.(*HistSeriesT))
case SeriesTextSeries:
return TextSeriesPack(builder, t.Value.(*TextSeriesT))
}
return 0
}
func SeriesUnPack(t Series, table flatbuffers.Table) *SeriesT {
switch t {
case SeriesNumericSeries:
x := NumericSeries{_tab: table}
return &SeriesT{Type: SeriesNumericSeries, Value: x.UnPack()}
case SeriesHistSeries:
x := HistSeries{_tab: table}
return &SeriesT{Type: SeriesHistSeries, Value: x.UnPack()}
case SeriesTextSeries:
x := TextSeries{_tab: table}
return &SeriesT{Type: SeriesTextSeries, Value: x.UnPack()}
}
return nil
}
type Series byte
const (
SeriesNONE Series = 0
SeriesNumericSeries Series = 1
SeriesHistSeries Series = 2
SeriesTextSeries Series = 3
)
var EnumNamesSeries = map[Series]string{
SeriesNONE: "NONE",
SeriesNumericSeries: "NumericSeries",
SeriesHistSeries: "HistSeries",
SeriesTextSeries: "TextSeries",
}
var EnumValuesSeries = map[string]Series{
"NONE": SeriesNONE,
"NumericSeries": SeriesNumericSeries,
"HistSeries": SeriesHistSeries,
"TextSeries": SeriesTextSeries,
}
func (v Series) String() string {
if s, ok := EnumNamesSeries[v]; ok {
return s
}
return "Series(" + strconv.FormatInt(int64(v), 10) + ")"
}
type HistSeriesT struct {
Values []*HistogramT
}
func HistSeriesPack(builder *flatbuffers.Builder, t *HistSeriesT) flatbuffers.UOffsetT {
if t == nil {
return 0
}
valuesOffset := flatbuffers.UOffsetT(0)
if t.Values != nil {
valuesLength := len(t.Values)
valuesOffsets := make([]flatbuffers.UOffsetT, valuesLength)
for j := 0; j < valuesLength; j++ {
valuesOffsets[j] = HistogramPack(builder, t.Values[j])
}
HistSeriesStartValuesVector(builder, valuesLength)
for j := valuesLength - 1; j >= 0; j-- {
builder.PrependUOffsetT(valuesOffsets[j])
}
valuesOffset = builder.EndVector(valuesLength)
}
HistSeriesStart(builder)
HistSeriesAddValues(builder, valuesOffset)
return HistSeriesEnd(builder)
}
func (rcv *HistSeries) UnPack() *HistSeriesT {
if rcv == nil {
return nil
}
t := &HistSeriesT{}
valuesLength := rcv.ValuesLength()
t.Values = make([]*HistogramT, valuesLength)
for j := 0; j < valuesLength; j++ {
x := Histogram{}
rcv.Values(&x, j)
t.Values[j] = x.UnPack()
}
return t
}
type HistSeries struct {
_tab flatbuffers.Table
}
func GetRootAsHistSeries(buf []byte, offset flatbuffers.UOffsetT) *HistSeries {
n := flatbuffers.GetUOffsetT(buf[offset:])
x := &HistSeries{}
x.Init(buf, n+offset)
return x
}
func (rcv *HistSeries) Init(buf []byte, i flatbuffers.UOffsetT) {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
}
func (rcv *HistSeries) Table() flatbuffers.Table {
return rcv._tab
}
func (rcv *HistSeries) Values(obj *Histogram, j int) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
return true
}
return false
}
func (rcv *HistSeries) ValuesLength() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func HistSeriesStart(builder *flatbuffers.Builder) {
builder.StartObject(1)
}
func HistSeriesAddValues(builder *flatbuffers.Builder, values flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(values), 0)
}
func HistSeriesStartValuesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
func HistSeriesEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
type TextEntryT struct {
InternalOffsetMs uint64
Value string
}
func TextEntryPack(builder *flatbuffers.Builder, t *TextEntryT) flatbuffers.UOffsetT {
if t == nil {
return 0
}
valueOffset := builder.CreateString(t.Value)
TextEntryStart(builder)
TextEntryAddInternalOffsetMs(builder, t.InternalOffsetMs)
TextEntryAddValue(builder, valueOffset)
return TextEntryEnd(builder)
}
func (rcv *TextEntry) UnPack() *TextEntryT {
if rcv == nil {
return nil
}
t := &TextEntryT{}
t.InternalOffsetMs = rcv.InternalOffsetMs()
t.Value = string(rcv.Value())
return t
}
type TextEntry struct {
_tab flatbuffers.Table
}
func GetRootAsTextEntry(buf []byte, offset flatbuffers.UOffsetT) *TextEntry {
n := flatbuffers.GetUOffsetT(buf[offset:])
x := &TextEntry{}
x.Init(buf, n+offset)
return x
}
func (rcv *TextEntry) Init(buf []byte, i flatbuffers.UOffsetT) {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
}
func (rcv *TextEntry) Table() flatbuffers.Table {
return rcv._tab
}
func (rcv *TextEntry) InternalOffsetMs() uint64 {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
return rcv._tab.GetUint64(o + rcv._tab.Pos)
}
return 0
}
func (rcv *TextEntry) MutateInternalOffsetMs(n uint64) bool {
return rcv._tab.MutateUint64Slot(4, n)
}
func (rcv *TextEntry) Value() []byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
return rcv._tab.ByteVector(o + rcv._tab.Pos)
}
return nil
}
func TextEntryStart(builder *flatbuffers.Builder) {
builder.StartObject(2)
}
func TextEntryAddInternalOffsetMs(builder *flatbuffers.Builder, internalOffsetMs uint64) {
builder.PrependUint64Slot(0, internalOffsetMs, 0)
}
func TextEntryAddValue(builder *flatbuffers.Builder, value flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(value), 0)
}
func TextEntryEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
type TextMultiValueT struct {
Entries []*TextEntryT
}
func TextMultiValuePack(builder *flatbuffers.Builder, t *TextMultiValueT) flatbuffers.UOffsetT {
if t == nil {
return 0
}
entriesOffset := flatbuffers.UOffsetT(0)
if t.Entries != nil {
entriesLength := len(t.Entries)
entriesOffsets := make([]flatbuffers.UOffsetT, entriesLength)
for j := 0; j < entriesLength; j++ {
entriesOffsets[j] = TextEntryPack(builder, t.Entries[j])
}
TextMultiValueStartEntriesVector(builder, entriesLength)
for j := entriesLength - 1; j >= 0; j-- {
builder.PrependUOffsetT(entriesOffsets[j])
}
entriesOffset = builder.EndVector(entriesLength)
}
TextMultiValueStart(builder)
TextMultiValueAddEntries(builder, entriesOffset)
return TextMultiValueEnd(builder)
}
func (rcv *TextMultiValue) UnPack() *TextMultiValueT {
if rcv == nil {
return nil
}
t := &TextMultiValueT{}
entriesLength := rcv.EntriesLength()
t.Entries = make([]*TextEntryT, entriesLength)
for j := 0; j < entriesLength; j++ {
x := TextEntry{}
rcv.Entries(&x, j)
t.Entries[j] = x.UnPack()
}
return t
}
type TextMultiValue struct {
_tab flatbuffers.Table
}
func GetRootAsTextMultiValue(buf []byte, offset flatbuffers.UOffsetT) *TextMultiValue {
n := flatbuffers.GetUOffsetT(buf[offset:])
x := &TextMultiValue{}
x.Init(buf, n+offset)
return x
}
func (rcv *TextMultiValue) Init(buf []byte, i flatbuffers.UOffsetT) {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
}
func (rcv *TextMultiValue) Table() flatbuffers.Table {
return rcv._tab
}
func (rcv *TextMultiValue) Entries(obj *TextEntry, j int) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
return true
}
return false
}
func (rcv *TextMultiValue) EntriesLength() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func TextMultiValueStart(builder *flatbuffers.Builder) {
builder.StartObject(1)
}
func TextMultiValueAddEntries(builder *flatbuffers.Builder, entries flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(entries), 0)
}
func TextMultiValueStartEntriesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
func TextMultiValueEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
type TextSeriesT struct {
Values []*TextMultiValueT
}
func TextSeriesPack(builder *flatbuffers.Builder, t *TextSeriesT) flatbuffers.UOffsetT {
if t == nil {
return 0
}
valuesOffset := flatbuffers.UOffsetT(0)
if t.Values != nil {
valuesLength := len(t.Values)
valuesOffsets := make([]flatbuffers.UOffsetT, valuesLength)
for j := 0; j < valuesLength; j++ {
valuesOffsets[j] = TextMultiValuePack(builder, t.Values[j])
}
TextSeriesStartValuesVector(builder, valuesLength)
for j := valuesLength - 1; j >= 0; j-- {
builder.PrependUOffsetT(valuesOffsets[j])
}
valuesOffset = builder.EndVector(valuesLength)
}
TextSeriesStart(builder)
TextSeriesAddValues(builder, valuesOffset)
return TextSeriesEnd(builder)
}
func (rcv *TextSeries) UnPack() *TextSeriesT {
if rcv == nil {
return nil
}
t := &TextSeriesT{}
valuesLength := rcv.ValuesLength()
t.Values = make([]*TextMultiValueT, valuesLength)
for j := 0; j < valuesLength; j++ {
x := TextMultiValue{}
rcv.Values(&x, j)
t.Values[j] = x.UnPack()
}
return t
}
type TextSeries struct {
_tab flatbuffers.Table
}
func GetRootAsTextSeries(buf []byte, offset flatbuffers.UOffsetT) *TextSeries {
n := flatbuffers.GetUOffsetT(buf[offset:])
x := &TextSeries{}
x.Init(buf, n+offset)
return x
}
func (rcv *TextSeries) Init(buf []byte, i flatbuffers.UOffsetT) {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
}
func (rcv *TextSeries) Table() flatbuffers.Table {
return rcv._tab
}
func (rcv *TextSeries) Values(obj *TextMultiValue, j int) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
return true
}
return false
}
func (rcv *TextSeries) ValuesLength() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func TextSeriesStart(builder *flatbuffers.Builder) {
builder.StartObject(1)
}
func TextSeriesAddValues(builder *flatbuffers.Builder, values flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(values), 0)
}
func TextSeriesStartValuesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
func TextSeriesEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
type NumericSeriesT struct {
Values []float64
}
func NumericSeriesPack(builder *flatbuffers.Builder, t *NumericSeriesT) flatbuffers.UOffsetT {
if t == nil {
return 0
}
valuesOffset := flatbuffers.UOffsetT(0)
if t.Values != nil {
valuesLength := len(t.Values)
NumericSeriesStartValuesVector(builder, valuesLength)
for j := valuesLength - 1; j >= 0; j-- {
builder.PrependFloat64(t.Values[j])
}
valuesOffset = builder.EndVector(valuesLength)
}
NumericSeriesStart(builder)
NumericSeriesAddValues(builder, valuesOffset)
return NumericSeriesEnd(builder)
}
func (rcv *NumericSeries) UnPack() *NumericSeriesT {
if rcv == nil {
return nil
}
t := &NumericSeriesT{}
valuesLength := rcv.ValuesLength()
t.Values = make([]float64, valuesLength)
for j := 0; j < valuesLength; j++ {
t.Values[j] = rcv.Values(j)
}
return t
}
type NumericSeries struct {
_tab flatbuffers.Table
}
func GetRootAsNumericSeries(buf []byte, offset flatbuffers.UOffsetT) *NumericSeries {
n := flatbuffers.GetUOffsetT(buf[offset:])
x := &NumericSeries{}
x.Init(buf, n+offset)
return x
}
func (rcv *NumericSeries) Init(buf []byte, i flatbuffers.UOffsetT) {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
}
func (rcv *NumericSeries) Table() flatbuffers.Table {
return rcv._tab
}
func (rcv *NumericSeries) Values(j int) float64 {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.GetFloat64(a + flatbuffers.UOffsetT(j*8))
}
return 0
}
func (rcv *NumericSeries) ValuesLength() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func (rcv *NumericSeries) MutateValues(j int, n float64) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.MutateFloat64(a+flatbuffers.UOffsetT(j*8), n)
}
return false
}
func NumericSeriesStart(builder *flatbuffers.Builder) {
builder.StartObject(1)
}
func NumericSeriesAddValues(builder *flatbuffers.Builder, values flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(values), 0)
}
func NumericSeriesStartValuesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(8, numElems, 8)
}
func NumericSeriesEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
type SeriesContainerT struct {
Kind Kind
Data *SeriesT
}
func SeriesContainerPack(builder *flatbuffers.Builder, t *SeriesContainerT) flatbuffers.UOffsetT {
if t == nil {
return 0
}
dataOffset := SeriesPack(builder, t.Data)
SeriesContainerStart(builder)
SeriesContainerAddKind(builder, t.Kind)
if t.Data != nil {
SeriesContainerAddDataType(builder, t.Data.Type)
}
SeriesContainerAddData(builder, dataOffset)
return SeriesContainerEnd(builder)
}
func (rcv *SeriesContainer) UnPack() *SeriesContainerT {
if rcv == nil {
return nil
}
t := &SeriesContainerT{}
t.Kind = rcv.Kind()
dataTable := flatbuffers.Table{}
if rcv.Data(&dataTable) {
t.Data = SeriesUnPack(rcv.DataType(), dataTable)
}
return t
}
type SeriesContainer struct {
_tab flatbuffers.Table
}
func GetRootAsSeriesContainer(buf []byte, offset flatbuffers.UOffsetT) *SeriesContainer {
n := flatbuffers.GetUOffsetT(buf[offset:])
x := &SeriesContainer{}
x.Init(buf, n+offset)
return x
}
func (rcv *SeriesContainer) Init(buf []byte, i flatbuffers.UOffsetT) {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
}
func (rcv *SeriesContainer) Table() flatbuffers.Table {
return rcv._tab
}
func (rcv *SeriesContainer) Kind() Kind {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
return Kind(rcv._tab.GetInt8(o + rcv._tab.Pos))
}
return 0
}
func (rcv *SeriesContainer) MutateKind(n Kind) bool {
return rcv._tab.MutateInt8Slot(4, int8(n))
}
func (rcv *SeriesContainer) DataType() Series {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
return Series(rcv._tab.GetByte(o + rcv._tab.Pos))
}
return 0
}
func (rcv *SeriesContainer) MutateDataType(n Series) bool {
return rcv._tab.MutateByteSlot(6, byte(n))
}
func (rcv *SeriesContainer) Data(obj *flatbuffers.Table) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
if o != 0 {
rcv._tab.Union(obj, o)
return true
}
return false
}
func SeriesContainerStart(builder *flatbuffers.Builder) {
builder.StartObject(3)
}
func SeriesContainerAddKind(builder *flatbuffers.Builder, kind Kind) {
builder.PrependInt8Slot(0, int8(kind), 0)
}
func SeriesContainerAddDataType(builder *flatbuffers.Builder, dataType Series) {
builder.PrependByteSlot(1, byte(dataType), 0)
}
func SeriesContainerAddData(builder *flatbuffers.Builder, data flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(data), 0)
}
func SeriesContainerEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
type KVPairT struct {
Key string
Value string
}
func KVPairPack(builder *flatbuffers.Builder, t *KVPairT) flatbuffers.UOffsetT {
if t == nil {
return 0
}
keyOffset := builder.CreateString(t.Key)
valueOffset := builder.CreateString(t.Value)
KVPairStart(builder)
KVPairAddKey(builder, keyOffset)
KVPairAddValue(builder, valueOffset)
return KVPairEnd(builder)
}
func (rcv *KVPair) UnPack() *KVPairT {
if rcv == nil {
return nil
}
t := &KVPairT{}
t.Key = string(rcv.Key())
t.Value = string(rcv.Value())
return t
}
type KVPair struct {
_tab flatbuffers.Table
}
func GetRootAsKVPair(buf []byte, offset flatbuffers.UOffsetT) *KVPair {
n := flatbuffers.GetUOffsetT(buf[offset:])
x := &KVPair{}
x.Init(buf, n+offset)
return x
}
func (rcv *KVPair) Init(buf []byte, i flatbuffers.UOffsetT) {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
}
func (rcv *KVPair) Table() flatbuffers.Table {
return rcv._tab
}
func (rcv *KVPair) Key() []byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
return rcv._tab.ByteVector(o + rcv._tab.Pos)
}
return nil
}
func (rcv *KVPair) Value() []byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
return rcv._tab.ByteVector(o + rcv._tab.Pos)
}
return nil
}
func KVPairStart(builder *flatbuffers.Builder) {
builder.StartObject(2)
}
func KVPairAddKey(builder *flatbuffers.Builder, key flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(key), 0)
}
func KVPairAddValue(builder *flatbuffers.Builder, value flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(value), 0)
}
func KVPairEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
type GlobalMetaDataT struct {
StartMs uint64
PeriodMs uint32
Count uint32
Error []string
Warning []string
Meta []*KVPairT
}
func GlobalMetaDataPack(builder *flatbuffers.Builder, t *GlobalMetaDataT) flatbuffers.UOffsetT {
if t == nil {
return 0
}
errorOffset := flatbuffers.UOffsetT(0)
if t.Error != nil {
errorLength := len(t.Error)
errorOffsets := make([]flatbuffers.UOffsetT, errorLength)
for j := 0; j < errorLength; j++ {
errorOffsets[j] = builder.CreateString(t.Error[j])
}
GlobalMetaDataStartErrorVector(builder, errorLength)
for j := errorLength - 1; j >= 0; j-- {
builder.PrependUOffsetT(errorOffsets[j])
}
errorOffset = builder.EndVector(errorLength)
}
warningOffset := flatbuffers.UOffsetT(0)
if t.Warning != nil {
warningLength := len(t.Warning)
warningOffsets := make([]flatbuffers.UOffsetT, warningLength)
for j := 0; j < warningLength; j++ {
warningOffsets[j] = builder.CreateString(t.Warning[j])
}
GlobalMetaDataStartWarningVector(builder, warningLength)
for j := warningLength - 1; j >= 0; j-- {
builder.PrependUOffsetT(warningOffsets[j])
}
warningOffset = builder.EndVector(warningLength)
}
metaOffset := flatbuffers.UOffsetT(0)
if t.Meta != nil {
metaLength := len(t.Meta)
metaOffsets := make([]flatbuffers.UOffsetT, metaLength)
for j := 0; j < metaLength; j++ {
metaOffsets[j] = KVPairPack(builder, t.Meta[j])
}
GlobalMetaDataStartMetaVector(builder, metaLength)
for j := metaLength - 1; j >= 0; j-- {
builder.PrependUOffsetT(metaOffsets[j])
}
metaOffset = builder.EndVector(metaLength)
}
GlobalMetaDataStart(builder)
GlobalMetaDataAddStartMs(builder, t.StartMs)
GlobalMetaDataAddPeriodMs(builder, t.PeriodMs)
GlobalMetaDataAddCount(builder, t.Count)
GlobalMetaDataAddError(builder, errorOffset)
GlobalMetaDataAddWarning(builder, warningOffset)
GlobalMetaDataAddMeta(builder, metaOffset)
return GlobalMetaDataEnd(builder)
}
func (rcv *GlobalMetaData) UnPack() *GlobalMetaDataT {
if rcv == nil {
return nil
}
t := &GlobalMetaDataT{}
t.StartMs = rcv.StartMs()
t.PeriodMs = rcv.PeriodMs()
t.Count = rcv.Count()
errorLength := rcv.ErrorLength()
t.Error = make([]string, errorLength)
for j := 0; j < errorLength; j++ {
t.Error[j] = string(rcv.Error(j))
}
warningLength := rcv.WarningLength()
t.Warning = make([]string, warningLength)
for j := 0; j < warningLength; j++ {
t.Warning[j] = string(rcv.Warning(j))
}
metaLength := rcv.MetaLength()
t.Meta = make([]*KVPairT, metaLength)
for j := 0; j < metaLength; j++ {
x := KVPair{}
rcv.Meta(&x, j)
t.Meta[j] = x.UnPack()
}
return t
}
type GlobalMetaData struct {
_tab flatbuffers.Table
}
func GetRootAsGlobalMetaData(buf []byte, offset flatbuffers.UOffsetT) *GlobalMetaData {
n := flatbuffers.GetUOffsetT(buf[offset:])
x := &GlobalMetaData{}
x.Init(buf, n+offset)
return x
}
func (rcv *GlobalMetaData) Init(buf []byte, i flatbuffers.UOffsetT) {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
}
func (rcv *GlobalMetaData) Table() flatbuffers.Table {
return rcv._tab
}
func (rcv *GlobalMetaData) StartMs() uint64 {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
return rcv._tab.GetUint64(o + rcv._tab.Pos)
}
return 0
}
func (rcv *GlobalMetaData) MutateStartMs(n uint64) bool {
return rcv._tab.MutateUint64Slot(4, n)
}
func (rcv *GlobalMetaData) PeriodMs() uint32 {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
return rcv._tab.GetUint32(o + rcv._tab.Pos)
}
return 0
}
func (rcv *GlobalMetaData) MutatePeriodMs(n uint32) bool {
return rcv._tab.MutateUint32Slot(6, n)
}
func (rcv *GlobalMetaData) Count() uint32 {
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
if o != 0 {
return rcv._tab.GetUint32(o + rcv._tab.Pos)
}
return 0
}
func (rcv *GlobalMetaData) MutateCount(n uint32) bool {
return rcv._tab.MutateUint32Slot(8, n)
}
func (rcv *GlobalMetaData) Error(j int) []byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4))
}
return nil
}
func (rcv *GlobalMetaData) ErrorLength() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func (rcv *GlobalMetaData) Warning(j int) []byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4))
}
return nil
}
func (rcv *GlobalMetaData) WarningLength() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func (rcv *GlobalMetaData) Meta(obj *KVPair, j int) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
return true
}
return false
}
func (rcv *GlobalMetaData) MetaLength() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func GlobalMetaDataStart(builder *flatbuffers.Builder) {
builder.StartObject(6)
}
func GlobalMetaDataAddStartMs(builder *flatbuffers.Builder, startMs uint64) {
builder.PrependUint64Slot(0, startMs, 0)
}
func GlobalMetaDataAddPeriodMs(builder *flatbuffers.Builder, periodMs uint32) {
builder.PrependUint32Slot(1, periodMs, 0)
}
func GlobalMetaDataAddCount(builder *flatbuffers.Builder, count uint32) {
builder.PrependUint32Slot(2, count, 0)
}
func GlobalMetaDataAddError(builder *flatbuffers.Builder, error flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(error), 0)
}
func GlobalMetaDataStartErrorVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
func GlobalMetaDataAddWarning(builder *flatbuffers.Builder, warning flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(warning), 0)
}
func GlobalMetaDataStartWarningVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
func GlobalMetaDataAddMeta(builder *flatbuffers.Builder, meta flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(5, flatbuffers.UOffsetT(meta), 0)
}
func GlobalMetaDataStartMetaVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
func GlobalMetaDataEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
type ColumnMetaDataT struct {
Label string
Meta []*KVPairT
}
func ColumnMetaDataPack(builder *flatbuffers.Builder, t *ColumnMetaDataT) flatbuffers.UOffsetT {
if t == nil {
return 0
}
labelOffset := builder.CreateString(t.Label)
metaOffset := flatbuffers.UOffsetT(0)
if t.Meta != nil {
metaLength := len(t.Meta)
metaOffsets := make([]flatbuffers.UOffsetT, metaLength)
for j := 0; j < metaLength; j++ {
metaOffsets[j] = KVPairPack(builder, t.Meta[j])
}
ColumnMetaDataStartMetaVector(builder, metaLength)
for j := metaLength - 1; j >= 0; j-- {
builder.PrependUOffsetT(metaOffsets[j])
}
metaOffset = builder.EndVector(metaLength)
}
ColumnMetaDataStart(builder)
ColumnMetaDataAddLabel(builder, labelOffset)
ColumnMetaDataAddMeta(builder, metaOffset)
return ColumnMetaDataEnd(builder)
}
func (rcv *ColumnMetaData) UnPack() *ColumnMetaDataT {
if rcv == nil {
return nil
}
t := &ColumnMetaDataT{}
t.Label = string(rcv.Label())
metaLength := rcv.MetaLength()
t.Meta = make([]*KVPairT, metaLength)
for j := 0; j < metaLength; j++ {
x := KVPair{}
rcv.Meta(&x, j)
t.Meta[j] = x.UnPack()
}
return t
}
type ColumnMetaData struct {
_tab flatbuffers.Table
}
func GetRootAsColumnMetaData(buf []byte, offset flatbuffers.UOffsetT) *ColumnMetaData {
n := flatbuffers.GetUOffsetT(buf[offset:])
x := &ColumnMetaData{}
x.Init(buf, n+offset)
return x
}
func (rcv *ColumnMetaData) Init(buf []byte, i flatbuffers.UOffsetT) {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
}
func (rcv *ColumnMetaData) Table() flatbuffers.Table {
return rcv._tab
}
func (rcv *ColumnMetaData) Label() []byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
return rcv._tab.ByteVector(o + rcv._tab.Pos)
}
return nil
}
func (rcv *ColumnMetaData) Meta(obj *KVPair, j int) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
return true
}
return false
}
func (rcv *ColumnMetaData) MetaLength() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func ColumnMetaDataStart(builder *flatbuffers.Builder) {
builder.StartObject(2)
}
func ColumnMetaDataAddLabel(builder *flatbuffers.Builder, label flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(label), 0)
}
func ColumnMetaDataAddMeta(builder *flatbuffers.Builder, meta flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(meta), 0)
}
func ColumnMetaDataStartMetaVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
func ColumnMetaDataEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
type DF4T struct {
Version uint32
Head *GlobalMetaDataT
Meta []*ColumnMetaDataT
Columns []*SeriesContainerT
}
func DF4Pack(builder *flatbuffers.Builder, t *DF4T) flatbuffers.UOffsetT {
if t == nil {
return 0
}
headOffset := GlobalMetaDataPack(builder, t.Head)
metaOffset := flatbuffers.UOffsetT(0)
if t.Meta != nil {
metaLength := len(t.Meta)
metaOffsets := make([]flatbuffers.UOffsetT, metaLength)
for j := 0; j < metaLength; j++ {
metaOffsets[j] = ColumnMetaDataPack(builder, t.Meta[j])
}
DF4StartMetaVector(builder, metaLength)
for j := metaLength - 1; j >= 0; j-- {
builder.PrependUOffsetT(metaOffsets[j])
}
metaOffset = builder.EndVector(metaLength)
}
columnsOffset := flatbuffers.UOffsetT(0)
if t.Columns != nil {
columnsLength := len(t.Columns)
columnsOffsets := make([]flatbuffers.UOffsetT, columnsLength)
for j := 0; j < columnsLength; j++ {
columnsOffsets[j] = SeriesContainerPack(builder, t.Columns[j])
}
DF4StartColumnsVector(builder, columnsLength)
for j := columnsLength - 1; j >= 0; j-- {
builder.PrependUOffsetT(columnsOffsets[j])
}
columnsOffset = builder.EndVector(columnsLength)
}
DF4Start(builder)
DF4AddVersion(builder, t.Version)
DF4AddHead(builder, headOffset)
DF4AddMeta(builder, metaOffset)
DF4AddColumns(builder, columnsOffset)
return DF4End(builder)
}
func (rcv *DF4) UnPack() *DF4T {
if rcv == nil {
return nil
}
t := &DF4T{}
t.Version = rcv.Version()
t.Head = rcv.Head(nil).UnPack()
metaLength := rcv.MetaLength()
t.Meta = make([]*ColumnMetaDataT, metaLength)
for j := 0; j < metaLength; j++ {
x := ColumnMetaData{}
rcv.Meta(&x, j)
t.Meta[j] = x.UnPack()
}
columnsLength := rcv.ColumnsLength()
t.Columns = make([]*SeriesContainerT, columnsLength)
for j := 0; j < columnsLength; j++ {
x := SeriesContainer{}
rcv.Columns(&x, j)
t.Columns[j] = x.UnPack()
}
return t
}
type DF4 struct {
_tab flatbuffers.Table
}
func GetRootAsDF4(buf []byte, offset flatbuffers.UOffsetT) *DF4 {
n := flatbuffers.GetUOffsetT(buf[offset:])
x := &DF4{}
x.Init(buf, n+offset)
return x
}
func (rcv *DF4) Init(buf []byte, i flatbuffers.UOffsetT) {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
}
func (rcv *DF4) Table() flatbuffers.Table {
return rcv._tab
}
func (rcv *DF4) Version() uint32 {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
return rcv._tab.GetUint32(o + rcv._tab.Pos)
}
return 0
}
func (rcv *DF4) MutateVersion(n uint32) bool {
return rcv._tab.MutateUint32Slot(4, n)
}
func (rcv *DF4) Head(obj *GlobalMetaData) *GlobalMetaData {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
x := rcv._tab.Indirect(o + rcv._tab.Pos)
if obj == nil {
obj = new(GlobalMetaData)
}
obj.Init(rcv._tab.Bytes, x)
return obj
}
return nil
}
func (rcv *DF4) Meta(obj *ColumnMetaData, j int) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
return true
}
return false
}
func (rcv *DF4) MetaLength() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func (rcv *DF4) Columns(obj *SeriesContainer, j int) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
return true
}
return false
}
func (rcv *DF4) ColumnsLength() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func DF4Start(builder *flatbuffers.Builder) {
builder.StartObject(4)
}
func DF4AddVersion(builder *flatbuffers.Builder, version uint32) {
builder.PrependUint32Slot(0, version, 0)
}
func DF4AddHead(builder *flatbuffers.Builder, head flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(head), 0)
}
func DF4AddMeta(builder *flatbuffers.Builder, meta flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(meta), 0)
}
func DF4StartMetaVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
func DF4AddColumns(builder *flatbuffers.Builder, columns flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(columns), 0)
}
func DF4StartColumnsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
func DF4End(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
} | vendor/github.com/circonus-labs/gosnowth/fb/fetch/circonus_df4_generated.go | 0.673406 | 0.476701 | circonus_df4_generated.go | starcoder |
package types
import (
"math"
"reflect"
"strings"
"github.com/lyraproj/issue/issue"
"github.com/lyraproj/pcore/px"
"github.com/lyraproj/semver/semver"
)
const tagName = "puppet"
type reflector struct {
c px.Context
}
var pValueType = reflect.TypeOf((*px.Value)(nil)).Elem()
func NewReflector(c px.Context) px.Reflector {
return &reflector{c}
}
func Methods(t reflect.Type) []reflect.Method {
if t.Kind() == reflect.Ptr {
// Pointer may have methods
if t.NumMethod() == 0 {
t = t.Elem()
}
}
nm := t.NumMethod()
ms := make([]reflect.Method, nm)
for i := 0; i < nm; i++ {
ms[i] = t.Method(i)
}
return ms
}
func Fields(t reflect.Type) []reflect.StructField {
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
nf := 0
if t.Kind() == reflect.Struct {
nf = t.NumField()
}
fs := make([]reflect.StructField, nf)
for i := 0; i < nf; i++ {
fs[i] = t.Field(i)
}
return fs
}
// NormalizeType ensures that pointers to interface is converted to interface and that struct is converted to
// pointer to struct
func NormalizeType(rt reflect.Type) reflect.Type {
switch rt.Kind() {
case reflect.Struct:
rt = reflect.PtrTo(rt)
case reflect.Ptr:
re := rt.Elem()
if re.Kind() == reflect.Interface {
rt = re
}
}
return rt
}
func (r *reflector) Methods(t reflect.Type) []reflect.Method {
return Methods(t)
}
func (r *reflector) Fields(t reflect.Type) []reflect.StructField {
return Fields(t)
}
func (r *reflector) FieldName(f *reflect.StructField) string {
return FieldName(f)
}
func FieldName(f *reflect.StructField) string {
if tagHash, ok := TagHash(f); ok {
if nv, ok := tagHash.Get4(`name`); ok {
return nv.String()
}
}
return issue.FirstToLower(f.Name)
}
func (r *reflector) Reflect(src px.Value) reflect.Value {
if sn, ok := src.(px.Reflected); ok {
return sn.Reflect(r.c)
}
panic(px.Error(px.UnreflectableValue, issue.H{`type`: src.PType()}))
}
func (r *reflector) Reflect2(src px.Value, rt reflect.Type) reflect.Value {
if rt.Kind() == reflect.Interface {
if rt.Name() == `` {
// Destination type is interface{}, derive type from source
return r.Reflect(src)
}
if rt.AssignableTo(pValueType) {
sv := reflect.ValueOf(src)
if sv.Type().AssignableTo(rt) {
return sv
}
}
}
v := reflect.New(rt).Elem()
r.ReflectTo(src, v)
return v
}
// ReflectTo assigns the native value of src to dest
func (r *reflector) ReflectTo(src px.Value, dest reflect.Value) {
dt := dest.Type()
assertSettable(&dest)
if dt.Kind() == reflect.Interface && dt.AssignableTo(pValueType) {
sv := reflect.ValueOf(src)
if !sv.Type().AssignableTo(dt) {
panic(px.Error(px.AttemptToSetWrongKind, issue.H{`expected`: sv.Type().String(), `actual`: dest.Type().String()}))
}
dest.Set(sv)
} else {
switch src := src.(type) {
case px.Reflected:
if dt.Kind() == reflect.Interface && dt.Name() == `` {
// Destination is an interface{}, derive type from source
dest.Set(src.Reflect(r.c))
} else {
src.ReflectTo(r.c, dest)
}
case px.PuppetObject:
src.PType().(px.ObjectType).ToReflectedValue(r.c, src, dest)
default:
panic(px.Error(px.InvalidSourceForSet, issue.H{`type`: src.PType()}))
}
}
}
func (r *reflector) ReflectType(src px.Type) (reflect.Type, bool) {
return ReflectType(r.c, src)
}
func ReflectType(c px.Context, src px.Type) (reflect.Type, bool) {
if sn, ok := src.(px.ReflectedType); ok {
return sn.ReflectType(c)
}
return nil, false
}
func (r *reflector) TagHash(f *reflect.StructField) (px.OrderedMap, bool) {
return TagHash(f)
}
func TagHash(f *reflect.StructField) (px.OrderedMap, bool) {
return ParseTagHash(f.Tag.Get(tagName))
}
func ParseTagHash(tag string) (px.OrderedMap, bool) {
if tag != `` {
tagExpr := Parse(`{` + tag + `}`)
return tagExpr.(px.OrderedMap), true
}
return nil, false
}
var errorType = reflect.TypeOf((*error)(nil)).Elem()
func (r *reflector) FunctionDeclFromReflect(name string, mt reflect.Type, withReceiver bool) px.OrderedMap {
returnsError := false
takesContext := false
var rt px.Type
var err error
oc := mt.NumOut()
switch oc {
case 0:
rt = DefaultAnyType()
case 1:
ot := mt.Out(0)
if ot.AssignableTo(errorType) {
returnsError = true
} else {
rt, err = wrapReflectedType(r.c, mt.Out(0))
if err != nil {
panic(err)
}
}
case 2:
rt, err = wrapReflectedType(r.c, mt.Out(0))
if err != nil {
panic(err)
}
ot := mt.Out(1)
if ot.AssignableTo(errorType) {
returnsError = true
} else {
var rt2 px.Type
rt2, err = wrapReflectedType(r.c, mt.Out(1))
if err != nil {
panic(err)
}
rt = NewTupleType([]px.Type{rt, rt2}, nil)
}
default:
ot := mt.Out(oc - 1)
if ot.AssignableTo(errorType) {
returnsError = true
oc = oc - 1
}
ts := make([]px.Type, oc)
for i := 0; i < oc; i++ {
ts[i], err = wrapReflectedType(r.c, mt.Out(i))
if err != nil {
panic(err)
}
}
rt = NewTupleType(ts, nil)
}
var pt *TupleType
pc := mt.NumIn()
ix := 0
if withReceiver {
// First argument is the receiver itself
ix = 1
}
if pc == ix {
pt = EmptyTupleType()
} else {
if mt.In(ix).AssignableTo(px.ContextType) {
// First parameter is context. It should not be included
takesContext = true
ix++
if pc == ix {
pt = EmptyTupleType()
}
}
if pt == nil {
ps := make([]px.Type, pc-ix)
for p := ix; p < pc; p++ {
ps[p-ix], err = wrapReflectedType(r.c, mt.In(p))
if err != nil {
panic(err)
}
}
var sz *IntegerType
if mt.IsVariadic() {
last := pc - ix - 1
ps[last] = ps[last].(*ArrayType).ElementType()
sz = NewIntegerType(int64(last), math.MaxInt64)
}
pt = NewTupleType(ps, sz)
}
}
ds := make([]*HashEntry, 0, 4)
ds = append(ds, WrapHashEntry2(keyType, NewCallableType(pt, rt, nil)))
ds = append(ds, WrapHashEntry2(KeyGoName, stringValue(name)))
if returnsError {
ds = append(ds, WrapHashEntry2(keyReturnsError, BooleanTrue))
}
if takesContext {
ds = append(ds, WrapHashEntry2(keyTakesContext, BooleanTrue))
}
return WrapHash(ds)
}
func (r *reflector) InitializerFromTagged(typeName string, parent px.Type, tg px.AnnotatedType) px.OrderedMap {
rf := tg.Type()
ie := make([]*HashEntry, 0, 2)
if rf.Kind() == reflect.Func {
fn := rf.Name()
if fn == `` {
fn = `do`
}
ie = append(ie, WrapHashEntry2(keyFunctions, singletonMap(`do`, r.FunctionDeclFromReflect(fn, rf, false))))
} else {
tags := tg.Tags()
otherTags := tg.OtherTags()
fs := r.Fields(rf)
nf := len(fs)
var pt reflect.Type
if nf > 0 {
es := make([]*HashEntry, 0, nf)
for i, f := range fs {
if i == 0 && f.Anonymous {
// Parent
pt = reflect.PtrTo(f.Type)
continue
}
if f.PkgPath != `` {
// Unexported
continue
}
name, decl := r.ReflectFieldTags(&f, tags[f.Name], otherTags[f.Name])
es = append(es, WrapHashEntry2(name, decl))
}
ie = append(ie, WrapHashEntry2(keyAttributes, WrapHash(es)))
}
ms := r.Methods(rf)
nm := len(ms)
if nm > 0 {
es := make([]*HashEntry, 0, nm)
for _, m := range ms {
if m.PkgPath != `` {
// Not exported struct method
continue
}
if pt != nil {
if _, ok := pt.MethodByName(m.Name); ok {
// Redeclaration's of parent method are not included
continue
}
}
es = append(es, WrapHashEntry2(issue.FirstToLower(m.Name), r.FunctionDeclFromReflect(m.Name, m.Type, rf.Kind() != reflect.Interface)))
}
ie = append(ie, WrapHashEntry2(keyFunctions, WrapHash(es)))
}
}
ats := tg.Annotations()
if ats != nil && !ats.IsEmpty() {
ie = append(ie, WrapHashEntry2(keyAnnotations, ats))
}
return WrapHash(ie)
}
func (r *reflector) TypeFromReflect(typeName string, parent px.Type, rf reflect.Type) px.ObjectType {
return r.TypeFromTagged(typeName, parent, newTaggedType(rf, nil), nil)
}
func (r *reflector) TypeFromTagged(typeName string, parent px.Type, tg px.AnnotatedType, rcFunc px.Doer) px.ObjectType {
return BuildObjectType(typeName, parent, func(obj px.ObjectType) px.OrderedMap {
obj.(*objectType).goType = tg
r.c.ImplementationRegistry().RegisterType(obj, tg.Type())
if rcFunc != nil {
rcFunc()
}
return r.InitializerFromTagged(typeName, parent, tg)
})
}
func (r *reflector) ReflectFieldTags(f *reflect.StructField, fh px.OrderedMap, otherTags map[string]string) (name string, decl px.OrderedMap) {
as := make([]*HashEntry, 0)
var val px.Value
var typ px.Type
if fh != nil {
if v, ok := fh.Get4(keyName); ok {
name = v.String()
}
if v, ok := fh.GetEntry(keyKind); ok {
as = append(as, v.(*HashEntry))
}
if v, ok := fh.GetEntry(keyValue); ok {
val = v.Value()
as = append(as, v.(*HashEntry))
}
if v, ok := fh.Get4(keyType); ok {
switch v := v.(type) {
case *DeferredType:
typ = v.Resolve(r.c)
case px.Type:
typ = v
}
}
}
if typ == nil {
var err error
if typ, err = px.WrapReflectedType(r.c, f.Type); err != nil {
panic(err)
}
}
_, optional := typ.(*OptionalType)
if optional {
if val == nil {
// If no value is declared and the type is declared as optional, then
// value is an implicit undef
as = append(as, WrapHashEntry2(keyValue, undef))
}
} else {
if val != nil && val.Equals(undef, nil) {
// Convenience. If a value is declared as being undef, then ensure that
// type accepts undef
typ = NewOptionalType(typ)
optional = true
}
}
if optional {
switch f.Type.Kind() {
case reflect.Ptr, reflect.Interface:
// OK. Can be nil
default:
// The field will always have a value (the Go zero value), so it cannot be nil.
panic(px.Error(px.ImpossibleOptional, issue.H{`name`: f.Name, `type`: typ.String()}))
}
}
as = append(as, WrapHashEntry2(keyType, typ))
as = append(as, WrapHashEntry2(KeyGoName, stringValue(f.Name)))
if len(otherTags) > 0 {
as = append(as, WrapHashEntry2(keyAnnotations, singleMap(TagsAnnotationType, WrapStringToStringMap(otherTags))))
}
if name == `` {
name = issue.FirstToLower(f.Name)
}
return name, WrapHash(as)
}
func (r *reflector) TypeSetFromReflect(typeSetName string, version semver.Version, aliases map[string]string, rTypes ...reflect.Type) px.TypeSet {
types := make([]*HashEntry, 0)
prefix := typeSetName + `::`
for _, rt := range rTypes {
var parent px.Type
fs := r.Fields(rt)
nf := len(fs)
if nf > 0 {
f := fs[0]
if f.Anonymous && f.Type.Kind() == reflect.Struct {
parent = NewTypeReferenceType(typeName(prefix, aliases, f.Type))
}
}
name := typeName(prefix, aliases, rt)
types = append(types, WrapHashEntry2(
name[strings.LastIndex(name, `::`)+2:],
r.TypeFromReflect(name, parent, rt)))
}
es := make([]*HashEntry, 0)
es = append(es, WrapHashEntry2(px.KeyPcoreUri, stringValue(string(px.PcoreUri))))
es = append(es, WrapHashEntry2(px.KeyPcoreVersion, WrapSemVer(px.PcoreVersion)))
es = append(es, WrapHashEntry2(KeyVersion, WrapSemVer(version)))
es = append(es, WrapHashEntry2(KeyTypes, WrapHash(types)))
return NewTypeSet(px.RuntimeNameAuthority, typeSetName, WrapHash(es))
}
func ParentType(t reflect.Type) reflect.Type {
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
if t.Kind() == reflect.Struct && t.NumField() > 0 {
f := t.Field(0)
if f.Anonymous && f.Type.Kind() == reflect.Struct {
return f.Type
}
}
return nil
}
func typeName(prefix string, aliases map[string]string, rt reflect.Type) string {
if rt.Kind() == reflect.Ptr {
// Pointers have no names
rt = rt.Elem()
}
name := rt.Name()
if aliases != nil {
if alias, ok := aliases[name]; ok {
name = alias
}
}
return prefix + name
}
func assertSettable(value *reflect.Value) {
if !value.CanSet() {
panic(px.Error(px.AttemptToSetUnsettable, issue.H{`kind`: value.Type().String()}))
}
}
// isNilAndUnknown returns true if rt is reflect.Type((*interface{}(nil)).Elem()
func isNilAndUnknown(rt reflect.Type) bool {
return rt.Kind() == reflect.Interface && rt.Name() == ``
} | types/reflector.go | 0.540681 | 0.432962 | reflector.go | starcoder |
// Package summarybar provides renderers for summary bar.
package summarybar
import (
"bytes"
"errors"
"fmt"
"io"
"math"
"sort"
"strings"
"github.com/aws/copilot-cli/internal/pkg/term/progress"
)
var errTotalIsZero = errors.New("the data sums up to zero")
// summaryBarComponent returns a summary bar given data and the string representations of each data category.
type summaryBarComponent struct {
data []Datum
width int
emptyRep string
}
// Datum is the basic unit of summary bar.
// Each Datum is composed of a value and a representation. The value will be represented with the representation in the
// rendered summary bar.
type Datum struct {
Representation string
Value int
}
// Opt configures an option for summaryBarComponent.
type Opt func(*summaryBarComponent)
// WithWidth is an opt that configures the width for summaryBarComponent.
func WithWidth(width int) Opt {
return func(c *summaryBarComponent) {
c.width = width
}
}
// WithEmptyRep is an opt that configures the empty representation for summaryBarComponent.
func WithEmptyRep(representation string) Opt {
return func(c *summaryBarComponent) {
c.emptyRep = representation
}
}
// New returns a summaryBarComponent configured against opts.
func New(data []Datum, opts ...Opt) progress.Renderer {
component := &summaryBarComponent{
data: data,
}
for _, opt := range opts {
opt(component)
}
return component
}
// Render writes the summary bar to ouT without a new line.
func (c *summaryBarComponent) Render(out io.Writer) (numLines int, err error) {
if c.width <= 0 {
return 0, fmt.Errorf("invalid width %d for summary bar", c.width)
}
if hasNegativeValue(c.data) {
return 0, fmt.Errorf("input data contains negative values")
}
var data []int
var representations []string
for _, d := range c.data {
data = append(data, d.Value)
representations = append(representations, d.Representation)
}
buf := new(bytes.Buffer)
portions, err := c.calculatePortions(data)
if err != nil {
if !errors.Is(err, errTotalIsZero) {
return 0, err
}
if _, err := buf.WriteString(fmt.Sprint(strings.Repeat(c.emptyRep, c.width))); err != nil {
return 0, fmt.Errorf("write empty bar to buffer: %w", err)
}
if _, err := buf.WriteTo(out); err != nil {
return 0, fmt.Errorf("write buffer to out: %w", err)
}
return 0, nil
}
var bar string
for idx, p := range portions {
bar += fmt.Sprint(strings.Repeat(representations[idx], p))
}
if _, err := buf.WriteString(bar); err != nil {
return 0, fmt.Errorf("write bar to buffer: %w", err)
}
if _, err := buf.WriteTo(out); err != nil {
return 0, fmt.Errorf("write buffer to out: %w", err)
}
return 0, nil
}
func (c *summaryBarComponent) calculatePortions(data []int) ([]int, error) {
type estimation struct {
index int
dec float64
portion int
}
var sum int
for _, v := range data {
sum += v
}
if sum <= 0 {
return nil, errTotalIsZero
}
// We first underestimate how many units each data value would take in the summary bar of length Length.
// Then we distribute the rest of the units to each estimation.
var underestimations []estimation
for idx, v := range data {
rawFraction := (float64)(v) / (float64)(sum) * (float64)(c.width)
_, decPart := math.Modf(rawFraction)
underestimations = append(underestimations, estimation{
dec: decPart,
portion: (int)(math.Max(math.Floor(rawFraction), 0)),
index: idx,
})
}
// Calculate the sum of the underestimated units and see how far we are from filling the bar of length `Length`.
var currLength int
for _, underestimated := range underestimations {
currLength += underestimated.portion
}
unitsLeft := c.width - currLength
// Sort by decimal places from larger to smaller.
sort.SliceStable(underestimations, func(i, j int) bool {
return underestimations[i].dec > underestimations[j].dec
})
// Distribute extra values first to portions with larger decimal places.
out := make([]int, len(data))
for _, d := range underestimations {
if unitsLeft > 0 {
d.portion += 1
unitsLeft -= 1
}
out[d.index] = d.portion
}
return out, nil
}
func hasNegativeValue(data []Datum) bool {
for _, d := range data {
if d.Value < 0 {
return true
}
}
return false
} | internal/pkg/term/progress/summarybar/summarybar.go | 0.83056 | 0.455138 | summarybar.go | starcoder |
package net
import (
"encoding/csv"
"log"
"os"
"strings"
slices "github.com/fabricioism/go-text-classification/utils/slice"
str "github.com/fabricioism/go-text-classification/utils/str"
)
type data struct {
words []string
classes []string
ignore []string
}
// This function takes a file
// and read file and returns matrices with 0's and 1's
// We model the file.
func loadData(file *os.File) ([]string, []string) {
// Reading the file
reader := csv.NewReader(file)
reader.FieldsPerRecord = 2
// Read in all of the CSV records
rawCSVData, err := reader.ReadAll()
if err != nil {
log.Fatal(err)
}
// inputsData and inputClasses have
// float values that will eventually be
// used to form matrices.
inputsData := make([]string, 1*len(rawCSVData))
inputClasses := make([]string, 1*len(rawCSVData))
// inputsIndex will track the current index of inputs matrix values.
var inputsIndex int
var labelsIndex int
for idx, record := range rawCSVData {
// Skip the csv's header row
if idx == 0 {
continue
}
// Loop over the float columns.
for i, val := range record {
// Add to the inputClasses if relevant.
if i == 1 {
inputClasses[labelsIndex] = val
labelsIndex++
continue
}
// Add the float value to the slice of floats.
inputsData[inputsIndex] = val
inputsIndex++
}
}
return inputsData, inputClasses
}
// This functions takes a string
// and returns a tokenize sentence
func GetTokenizeSentence(sentence string) []string {
return strings.Split(sentence, " ")
}
// Here we store characters that we'll avoid
func SymbolsToAvoid() []string {
symbols := []string{"?", "!", "¿", "¡"}
return symbols
}
// This function takes a file
// and returns arrays with words, classes of the file.
// We'll need this function for math computations
func OrganizeData(file *os.File) ([]string, []string, [][]string, []string) {
var words, classes []string
symbolsToAvoid := SymbolsToAvoid()
inputsData, inputClasses := loadData(file)
var slicedInputData [][]string
for i, instance := range inputsData {
// We tokenize each sentence (instance)
tokens := GetTokenizeSentence(instance)
// Add words if is not in slice
for _, w := range tokens {
if !slices.SliceContains(words, str.CleanWord(w)) && !slices.SliceContains(symbolsToAvoid, w) {
words = append(words, str.CleanWord(w))
}
}
// Add class if is not in slice
if !slices.SliceContains(classes, inputClasses[i]) {
classes = append(classes, inputClasses[i])
}
slicedInputData = append(slicedInputData, slices.CleanSlice(tokens))
}
words = words[:len(words)-1]
classes = classes[:len(classes)-1]
slicedInputData = slicedInputData[:len(slicedInputData)-1]
inputClasses = inputClasses[:len(inputClasses)-1]
return words, classes, slicedInputData, inputClasses
}
// This functions takes a file
// and returns data ready for computations
func GetProcessedData(file *os.File) ([][]float64, [][]float64, int, int, []string, []string) {
// Bag of words
var bag, output []float64
var trainingData, outputs [][]float64
words, classes, slicedInputData, inputClasses := OrganizeData(file)
// iterating over inputdata
for i, instance := range slicedInputData {
bag = nil
output = nil
for _, w := range words {
if slices.SliceContains(instance, w) {
bag = append(bag, 1.0)
} else {
bag = append(bag, 0.0)
}
}
trainingData = append(trainingData, bag)
for _, c := range classes {
if inputClasses[i] == c {
output = append(output, 1.0)
} else {
output = append(output, 0.0)
}
}
outputs = append(outputs, output)
}
return trainingData, outputs, len(words), len(classes), words, classes
}
// This function takes a sentence
// and will process that string
// ready for computations
func GetTestProcessedData(sentence string, words []string) []float64 {
var processedSentence []float64
slicedSentence := GetTokenizeSentence(sentence)
for _, w := range words {
if slices.SliceContains(slicedSentence, w) {
processedSentence = append(processedSentence, 1.0)
} else {
processedSentence = append(processedSentence, 0.0)
}
}
return processedSentence
} | net/preprocessing/main.go | 0.643665 | 0.416322 | main.go | starcoder |
package token
import (
"github.com/zimmski/container/list/linkedlist"
)
// Walk traverses a token graph beginning from the given token and calls for every newly visited token the given function.
// A depth-first algorithm is used to traverse the graph. If the given walk function returns an error, the whole walk process ends by returning the error back to the caller
func Walk(root Token, walkFunc func(tok Token) error) error {
queue := linkedlist.New()
queue.Unshift(root)
walked := make(map[Token]struct{})
for !queue.Empty() {
v, _ := queue.Shift()
tok := v.(Token)
if err := walkFunc(tok); err != nil {
return err
}
switch t := tok.(type) {
case ForwardToken:
if v := t.Get(); v != nil {
if _, ok := walked[v]; !ok {
queue.Unshift(v)
}
}
case ListToken:
for i := t.Len() - 1; i >= 0; i-- {
c, _ := t.Get(i)
if _, ok := walked[c]; !ok {
queue.Unshift(c)
}
}
}
}
return nil
}
// WalkInternal traverses a internal token graph beginning from the given token and calls for every newly visited token the given function.
// A depth-first algorithm is used to traverse the graph. If the given walk function returns an error, the whole walk process ends by returning the error back to the caller
func WalkInternal(root Token, walkFunc func(tok Token) error) error {
queue := linkedlist.New()
queue.Unshift(root)
walked := make(map[Token]struct{})
for !queue.Empty() {
v, _ := queue.Shift()
tok := v.(Token)
if err := walkFunc(tok); err != nil {
return err
}
if t, ok := v.(Follow); ok && !t.Follow() {
continue
}
switch t := tok.(type) {
case ForwardToken:
if v := t.InternalGet(); v != nil {
if _, ok := walked[v]; !ok {
queue.Unshift(v)
}
}
case ListToken:
for i := t.InternalLen() - 1; i >= 0; i-- {
c, _ := t.InternalGet(i)
if _, ok := walked[c]; !ok {
queue.Unshift(c)
}
}
}
}
return nil
}
// WalkInternalTail traverses a internal token graph beginning from the given token and calls for every newly visited token the given function after it has traversed all children.
// A depth-first algorithm is used to traverse the graph. If the given walk function returns an error, the whole walk process ends by returning the error back to the caller
func WalkInternalTail(root Token, walkFunc func(tok Token) error) error {
if t, ok := root.(Follow); !ok || t.Follow() {
switch t := root.(type) {
case ForwardToken:
if v := t.InternalGet(); v != nil {
if err := WalkInternalTail(v, walkFunc); err != nil {
return err
}
}
case ListToken:
for i := 0; i < t.InternalLen(); i++ {
c, _ := t.InternalGet(i)
if err := WalkInternalTail(c, walkFunc); err != nil {
return err
}
}
}
}
if err := walkFunc(root); err != nil {
return err
}
return nil
}
// ReleaseTokens traverses the token graph and calls Release for every release token
func ReleaseTokens(root Token) {
_ = Walk(root, func(tok Token) error {
if t, ok := tok.(ReleaseToken); ok {
t.Release()
}
return nil
})
_ = WalkInternal(root, func(tok Token) error {
if t, ok := tok.(ReleaseToken); ok {
t.Release()
}
return nil
})
} | token/walk.go | 0.776623 | 0.402686 | walk.go | starcoder |
package gluamapper
import (
"errors"
"fmt"
"reflect"
"strings"
assert "github.com/arl/assertgo"
"github.com/yuin/gopher-lua"
)
var (
OutputValueIsNilError = errors.New("output value is nil")
)
// Mapper maps a Lua table to a Go struct pointer.
type Mapper struct {
// A struct tag name for Lua table keys.
TagName string
}
// NewMapper returns a new mapper.
func NewMapper() *Mapper {
return &Mapper{}
}
// NewMapperWithTagName returns a new mapper with tag name.
func NewMapperWithTagName(tagName string) *Mapper {
return &Mapper{
TagName: tagName,
}
}
// Map maps the Lua value to the given Go pointer.
func (m *Mapper) Map(lv lua.LValue, output interface{}) error {
rv := reflect.ValueOf(output)
if rv.Kind() != reflect.Ptr {
return &OutputIsNotAPointerError{outputValue: rv}
}
return m.MapValue(lv, rv.Elem())
}
// MapValue maps the Lua value to Go value.
func (m *Mapper) MapValue(lv lua.LValue, rv reflect.Value) error {
if lv != lua.LNil {
return m.mapNonNilValue(lv, rv)
}
// do not call rv.Type() if rv is zero Value
if rv.IsValid() {
rv.Set(reflect.Zero(rv.Type()))
return nil
}
return OutputValueIsNilError
}
func (m *Mapper) mapNonNilValue(lv lua.LValue, rv reflect.Value) error {
assert.True(lv != lua.LNil) // lv is not *lua.LNilType
TBI := errors.New("to be implemented")
switch rv.Kind() {
case reflect.Invalid:
return OutputValueIsNilError
case reflect.Bool:
return mapBool(lv, rv)
case reflect.Int:
return mapInt(lv, rv)
case reflect.Int8:
return mapInt8(lv, rv)
case reflect.Int16:
return mapInt16(lv, rv)
case reflect.Int32:
return mapInt32(lv, rv)
case reflect.Int64:
return mapInt64(lv, rv)
case reflect.Uint:
return mapUint(lv, rv)
case reflect.Uint8:
return mapUint8(lv, rv)
case reflect.Uint16:
return mapUint16(lv, rv)
case reflect.Uint32:
return mapUint32(lv, rv)
case reflect.Uint64:
return mapUint64(lv, rv)
case reflect.Uintptr:
return TBI
case reflect.Float32:
return mapFloat32(lv, rv)
case reflect.Float64:
return mapFloat64(lv, rv)
case reflect.Complex64:
return TBI
case reflect.Complex128:
return TBI
case reflect.Array:
return m.mapArray(lv, rv)
case reflect.Chan:
return TBI
case reflect.Func:
return TBI
case reflect.Interface:
return mapInterface(lv, rv)
case reflect.Map:
return m.mapMap(lv, rv)
case reflect.Ptr:
return m.mapPtr(lv, rv)
case reflect.Slice:
return m.mapSlice(lv, rv)
case reflect.String:
return mapString(lv, rv)
case reflect.Struct:
return m.mapStruct(lv, rv)
case reflect.UnsafePointer:
return TBI
}
return fmt.Errorf("unsupported type: %s", rv.Kind())
}
func (m *Mapper) mapArray(lv lua.LValue, rv reflect.Value) error {
assert.True(lv != lua.LNil)
assert.True(rv.Kind() == reflect.Array)
switch v := lv.(type) {
case *lua.LTable:
return m.mapLuaTableToGoArray(v, rv)
case *lua.LUserData:
return mapLuaUserDataToGoValue(v, rv)
}
return newTypeError(lv, rv)
}
func (m *Mapper) mapMap(lv lua.LValue, rv reflect.Value) error {
assert.True(lv != lua.LNil)
assert.True(rv.Kind() == reflect.Map)
switch v := lv.(type) {
case *lua.LTable:
return m.mapLuaTableToGoMap(v, rv)
case *lua.LUserData:
return mapLuaUserDataToGoValue(v, rv)
}
return newTypeError(lv, rv)
}
func (m *Mapper) mapPtr(lv lua.LValue, rv reflect.Value) error {
assert.True(lv != lua.LNil)
assert.True(rv.Kind() == reflect.Ptr)
if ud, ok := lv.(*lua.LUserData); ok {
return mapLuaUserDataToGoValue(ud, rv)
}
elemPtr := reflect.New(rv.Type().Elem())
if err := m.mapNonNilValue(lv, elemPtr.Elem()); err != nil {
return err
}
rv.Set(elemPtr)
return nil
}
func (m *Mapper) mapSlice(lv lua.LValue, rv reflect.Value) error {
assert.True(rv.Kind() == reflect.Slice)
switch v := lv.(type) {
case *lua.LTable:
return m.mapLuaTableToGoSlice(v, rv)
case *lua.LUserData:
return mapLuaUserDataToGoValue(v, rv)
}
return newTypeError(lv, rv)
}
func (m *Mapper) mapLuaTableToGoArray(tbl *lua.LTable, rv reflect.Value) error {
assert.True(tbl != nil)
assert.True(rv.Kind() == reflect.Array)
arrLen := rv.Len()
for i := 0; i < arrLen; i++ {
if err := m.MapValue(tbl.RawGetInt(i+1), rv.Index(i)); err != nil {
return fmt.Errorf("array[%d]: %w", i, err)
}
}
return nil
}
func (m *Mapper) mapLuaTableToGoSlice(tbl *lua.LTable, rv reflect.Value) error {
assert.True(tbl != nil)
assert.True(rv.Kind() == reflect.Slice)
tblLen := tbl.Len()
rvCap := rv.Cap()
if rvCap < tblLen {
// reset to a new slice if need more capacity
rv.Set(reflect.MakeSlice(rv.Type(), tblLen, tblLen))
} else if rv.Len() != tblLen {
// set len if capacity is large enough
rv.SetLen(tblLen)
}
for i := 0; i < tblLen; i++ {
if err := m.MapValue(tbl.RawGetInt(i+1), rv.Index(i)); err != nil {
return fmt.Errorf("slice[%d]: %w", i, err)
}
}
return nil
}
func (m *Mapper) mapStruct(lv lua.LValue, rv reflect.Value) error {
assert.True(lv != lua.LNil)
assert.True(rv.Kind() == reflect.Struct)
switch v := lv.(type) {
case *lua.LTable:
return m.mapLuaTableToGoStruct(v, rv)
case *lua.LUserData:
return mapLuaUserDataToGoValue(v, rv)
}
return newTypeError(lv, rv)
}
func (m *Mapper) mapLuaTableToGoStruct(tbl *lua.LTable, rv reflect.Value) error {
assert.True(tbl != nil)
assert.True(rv.Kind() == reflect.Struct)
rvType := rv.Type()
for i := 0; i < rv.NumField(); i++ {
fldVal := rv.Field(i)
if !fldVal.CanSet() {
continue // unexported field
}
field := rvType.Field(i)
fieldName := getFieldName(field, m.TagName)
if err := m.MapValue(tbl.RawGet(lua.LString(fieldName)), fldVal); err != nil {
return fmt.Errorf("%s: %w", field.Name, err)
}
}
return nil
}
// getFieldName get the struct field name.
func getFieldName(field reflect.StructField, tagName string) string {
fieldName := field.Name
if tagName == "" {
return fieldName
}
tagValue := field.Tag.Get(tagName)
tagSubValue := strings.SplitN(tagValue, ",", 2)[0]
if tagSubValue != "" {
return tagSubValue // use field name from tag value
}
return fieldName
}
// Always returns nil
func (m *Mapper) mapLuaTableToGoMap(tbl *lua.LTable, rv reflect.Value) error {
assert.True(tbl != nil)
assert.True(rv.Kind() == reflect.Map)
mapType := rv.Type()
keyType := mapType.Key()
elemType := mapType.Elem()
if rv.IsNil() || rv.Len() > 0 { // reset map
rv.Set(reflect.MakeMap(mapType))
}
tbl.ForEach(func(lKey, lVal lua.LValue) {
rvKeyPtr := reflect.New(keyType) // rvKeyPtr is a pointer to a new zero key
rvKey := rvKeyPtr.Elem()
if err := m.MapValue(lKey, rvKeyPtr.Elem()); err != nil {
return // skip field if error
}
rvElemPtr := reflect.New(elemType)
rvElem := rvElemPtr.Elem()
if err := m.MapValue(lVal, rvElemPtr.Elem()); err != nil {
return // skip field if error
}
rv.SetMapIndex(rvKey, rvElem)
})
return nil
} | mapper.go | 0.66061 | 0.441312 | mapper.go | starcoder |
package internal
import (
"math"
"reflect"
"time"
"github.com/lyraproj/dgo/dgo"
)
type (
timeType int
exactTimeType struct {
exactType
value *timeVal
}
timeVal time.Time
)
// DefaultTimeType is the unconstrainted Time type
const DefaultTimeType = timeType(0)
var reflectTimeType = reflect.TypeOf(time.Time{})
func (t timeType) Assignable(ot dgo.Type) bool {
switch ot.(type) {
case timeType, *exactTimeType:
return true
}
return CheckAssignableTo(nil, ot, t)
}
func (t timeType) Equals(v interface{}) bool {
return t == v
}
func (t timeType) HashCode() int {
return int(dgo.TiTime)
}
func (t timeType) Instance(v interface{}) bool {
switch v.(type) {
case *timeVal, *time.Time, time.Time:
return true
}
return false
}
func (t timeType) New(arg dgo.Value) dgo.Value {
return newTime(t, arg)
}
func (t timeType) ReflectType() reflect.Type {
return reflectTimeType
}
func (t timeType) String() string {
return TypeString(t)
}
func (t timeType) Type() dgo.Type {
return &metaType{t}
}
func (t timeType) TypeIdentifier() dgo.TypeIdentifier {
return dgo.TiTime
}
func (t *exactTimeType) Generic() dgo.Type {
return DefaultTimeType
}
func (t *exactTimeType) IsInstance(tv time.Time) bool {
return (*time.Time)(t.value).Equal(tv)
}
func (t *exactTimeType) New(arg dgo.Value) dgo.Value {
return newTime(t, arg)
}
func (t *exactTimeType) ReflectType() reflect.Type {
return reflectTimeType
}
func (t *exactTimeType) TypeIdentifier() dgo.TypeIdentifier {
return dgo.TiTimeExact
}
func (t *exactTimeType) ExactValue() dgo.Value {
return t.value
}
func newTime(t dgo.Type, arg dgo.Value) dgo.Time {
if args, ok := arg.(dgo.Arguments); ok {
args.AssertSize(`time`, 1, 1)
arg = args.Get(0)
}
var tv dgo.Time
switch arg := arg.(type) {
case dgo.Time:
tv = arg
case dgo.Integer:
tv = Time(time.Unix(arg.GoInt(), 0))
case dgo.Float:
s, f := math.Modf(arg.GoFloat())
tv = Time(time.Unix(int64(s), int64(f*1000000000.0)))
case dgo.String:
tv = TimeFromString(arg.GoString())
default:
panic(illegalArgument(`time`, `time|string`, []interface{}{arg}, 0))
}
if !t.Instance(tv) {
panic(IllegalAssignment(t, tv))
}
return tv
}
// Time returns the given timestamp as a dgo.Time
func Time(ts time.Time) dgo.Time {
return (*timeVal)(&ts)
}
// TimeFromString returns the given time string as a dgo.Time. The string must conform to
// the time.RFC3339 or time.RFC3339Nano format. The goFunc will panic if the given string
// cannot be parsed.
func TimeFromString(s string) dgo.Time {
ts, err := time.Parse(time.RFC3339Nano, s)
if err != nil {
panic(err)
}
return (*timeVal)(&ts)
}
func (v *timeVal) Equals(other interface{}) bool {
switch ov := other.(type) {
case *timeVal:
return (*time.Time)(v).Equal(*(*time.Time)(ov))
case time.Time:
return (*time.Time)(v).Equal(ov)
case *time.Time:
return (*time.Time)(v).Equal(*ov)
}
return false
}
func (v *timeVal) SecondsWithFraction() float64 {
t := (*time.Time)(v)
y := t.Year()
// Timestamps that represent a date before the year 1678 or after 2262 can
// be represented as nanoseconds in an int64.
if 1678 < y && y < 2262 {
return float64(t.UnixNano()) / 1000000000.0
}
// Fall back to microsecond precision
us := t.Unix()*1000000 + int64(t.Nanosecond())/1000
return float64(us) / 1000000.0
}
func (v *timeVal) GoTime() time.Time {
return *(*time.Time)(v)
}
func (v *timeVal) HashCode() int {
return int((*time.Time)(v).UnixNano())
}
func (v *timeVal) ReflectTo(value reflect.Value) {
rv := reflect.ValueOf((*time.Time)(v))
k := value.Kind()
if !(k == reflect.Ptr || k == reflect.Interface) {
rv = rv.Elem()
}
value.Set(rv)
}
func (v *timeVal) String() string {
return (*time.Time)(v).Format(time.RFC3339Nano)
}
func (v *timeVal) Type() dgo.Type {
ea := &exactTimeType{value: v}
ea.ExactType = ea
return ea
} | internal/time.go | 0.762866 | 0.460713 | time.go | starcoder |
package ksql
import (
"errors"
"io"
"strconv"
)
// LexerResult represents a token lexed result
type LexerResult struct {
token Token
end int
}
// Token represents a lexed token with value
type Token struct {
kind TokenKind
value any
}
// TokenKind is the type of token lexed.
type TokenKind uint8
const (
Identifier = iota
String
Number
Boolean
Null
Equals
Add
Subtract
Multiply
Divide
Gt
Gte
Lt
Lte
And
Or
Not
Contains
In
StartsWith
EndsWith
OpenBracket
CloseBracket
Comma
OpenParen
CloseParen
)
/// Try to lex a single token from the input stream.
func tokenizeSingleToken(data []byte) (result LexerResult, err error) {
b := data[0]
switch b {
case '=':
if len(data) > 1 && data[1] == '=' {
result = LexerResult{token: Token{kind: Equals}, end: 2}
} else {
result = LexerResult{token: Token{kind: Equals}, end: 1}
}
case '+':
result = LexerResult{token: Token{kind: Add}, end: 1}
case '-':
result = LexerResult{token: Token{kind: Subtract}, end: 1}
case '*':
result = LexerResult{token: Token{kind: Multiply}, end: 1}
case '/':
result = LexerResult{token: Token{kind: Divide}, end: 1}
case '>':
if len(data) > 1 && data[1] == '=' {
result = LexerResult{token: Token{kind: Gte}, end: 2}
} else {
result = LexerResult{token: Token{kind: Gt}, end: 1}
}
case '<':
if len(data) > 1 && data[1] == '=' {
result = LexerResult{token: Token{kind: Lte}, end: 2}
} else {
result = LexerResult{token: Token{kind: Lt}, end: 1}
}
case '(':
result = LexerResult{token: Token{kind: OpenParen}, end: 1}
case ')':
result = LexerResult{token: Token{kind: CloseParen}, end: 1}
case '[':
result = LexerResult{token: Token{kind: OpenBracket}, end: 1}
case ']':
result = LexerResult{token: Token{kind: CloseBracket}, end: 1}
case ',':
result = LexerResult{token: Token{kind: Comma}, end: 1}
case '!':
result = LexerResult{token: Token{kind: Not}, end: 1}
case '"', '\'':
result, err = tokenizeString(data, b)
case '.':
result, err = tokenizeIdentifier(data)
case 't', 'f':
result, err = tokenizeBool(data)
case '&':
if len(data) > 1 && data[1] == '&' {
result = LexerResult{token: Token{kind: And}, end: 2}
} else {
err = ErrUnsupportedCharacter{b: b}
}
case '|':
if len(data) > 1 && data[1] == '|' {
result = LexerResult{token: Token{kind: Or}, end: 2}
} else {
err = ErrUnsupportedCharacter{b: b}
}
case 'C':
result, err = tokenizeKeyword(data, "CONTAINS", Contains)
case 'I':
result, err = tokenizeKeyword(data, "IN", In)
case 'S':
result, err = tokenizeKeyword(data, "STARTSWITH", StartsWith)
case 'E':
result, err = tokenizeKeyword(data, "ENDSWITH", EndsWith)
case 'N':
result, err = tokenizeNull(data)
default:
if isDigit(b) {
result, err = tokenizeNumber(data)
} else {
err = ErrUnsupportedCharacter{b: b}
}
}
return
}
func tokenizeNumber(data []byte) (result LexerResult, err error) {
var dotSeen, badNumber bool
end := takeWhile(data, func(b byte) bool {
switch b {
case '.':
if dotSeen {
badNumber = true
return false
}
dotSeen = true
return true
case '-', '+':
return true
default:
return isAlphanumeric(b)
}
})
if end > 0 && !badNumber {
var n float64
n, err = strconv.ParseFloat(string(data[:end]), 64)
if err != nil {
err = ErrInvalidNumber{s: string(data[:end])}
} else {
result = LexerResult{
token: Token{
kind: Number,
value: n,
},
end: end,
}
}
} else {
err = ErrInvalidNumber{s: string(data)}
}
return
}
func tokenizeKeyword(data []byte, keyword string, kind TokenKind) (result LexerResult, err error) {
end := takeWhile(data, func(b byte) bool {
return !isWhitespace(b)
})
if end > 0 && string(data[:end]) == keyword && len(data) > len(keyword) {
result = LexerResult{
token: Token{
kind: kind,
},
end: end,
}
} else {
err = ErrInvalidKeyword{s: string(data)}
}
return
}
func tokenizeNull(data []byte) (result LexerResult, err error) {
end := takeWhile(data, func(b byte) bool {
return isAlphabetical(b)
})
if end > 0 && string(data[:end]) == "NULL" {
result = LexerResult{
token: Token{
kind: Null,
},
end: end,
}
} else {
err = ErrInvalidKeyword{s: string(data)}
}
return
}
func tokenizeBool(data []byte) (result LexerResult, err error) {
end := takeWhile(data, func(b byte) bool {
return isAlphabetical(b)
})
if end > 0 {
switch string(data[:end]) {
case "true":
result = LexerResult{
token: Token{
kind: Boolean,
value: true,
},
end: end,
}
case "false":
result = LexerResult{
token: Token{
kind: Boolean,
value: false,
},
end: end,
}
default:
err = ErrInvalidBool{s: string(data)}
}
} else {
err = ErrInvalidBool{s: string(data)}
}
return
}
func tokenizeIdentifier(data []byte) (result LexerResult, err error) {
end := takeWhile(data[1:], func(b byte) bool {
return !isWhitespace(b) && b != ')' && b != ']'
})
if end > 0 {
if len(data) > end {
end += 1
}
result = LexerResult{token: Token{
kind: Identifier,
value: string(data[1:end]),
}, end: end}
} else {
err = ErrInvalidIdentifier{s: string(data)}
}
return
}
func tokenizeString(data []byte, quote byte) (result LexerResult, err error) {
var lastBackslash, endedWithTerminator bool
end := takeWhile(data[1:], func(b byte) bool {
switch b {
case '\\':
lastBackslash = true
return true
case quote:
if lastBackslash {
lastBackslash = false
return true
}
endedWithTerminator = true
return false
default:
return true
}
})
if end > 0 {
if endedWithTerminator {
result = LexerResult{token: Token{
kind: String,
value: string(data[1 : end+1]),
}, end: end + 2}
} else {
err = ErrUnterminatedString{s: string(data)}
}
} else {
if !endedWithTerminator || len(data) < 2 {
err = ErrUnterminatedString{s: string(data)}
} else {
result = LexerResult{token: Token{
kind: String,
value: string(data[:0]),
}, end: 2}
}
}
return
}
/// Consumes bytes while a predicate evaluates to true.
func takeWhile(data []byte, pred func(byte) bool) (end int) {
for _, b := range data {
if !pred(b) {
break
}
end++
}
return
}
type Tokenizer struct {
current int
remaining []byte
}
func skipWhitespace(data []byte) int {
return takeWhile(data, func(b byte) bool {
return isWhitespace(b)
})
}
// NewTokenizer creates a new tokenizer for use
func newTokenizer(src []byte) *Tokenizer {
return &Tokenizer{
current: 0,
remaining: src,
}
}
func (t *Tokenizer) nextToken() (token Token, err error) {
t.skipWhitespace()
if len(t.remaining) == 0 {
err = io.EOF
return
}
return t.next()
}
func (t *Tokenizer) skipWhitespace() {
skipped := skipWhitespace(t.remaining)
t.chomp(skipped)
}
func (t *Tokenizer) next() (token Token, err error) {
var result LexerResult
result, err = tokenizeSingleToken(t.remaining)
if err != nil {
return
}
t.chomp(result.end)
return result.token, nil
}
func (t *Tokenizer) chomp(num int) {
t.remaining = t.remaining[num:]
t.current += num
}
// Tokenize tokenizes the input and returns tokens or error lexing them.
func Tokenize(src []byte) (tokens []Token, err error) {
tokenizer := newTokenizer(src)
for {
token, err := tokenizer.nextToken()
if err != nil {
if errors.Is(err, io.EOF) {
return tokens, nil
}
return tokens, err
}
tokens = append(tokens, token)
}
}
func isAlphanumeric(c byte) bool {
return isLower(c) || isUpper(c) || isDigit(c)
}
func isAlphabetical(c byte) bool {
return isLower(c) || isUpper(c)
}
func isUpper(c byte) bool {
return c >= 'A' && c <= 'Z'
}
func isLower(c byte) bool {
return c >= 'a' && c <= 'z'
}
func isDigit(c byte) bool {
return c >= '0' && c <= '9'
}
func isWhitespace(b byte) bool {
switch b {
case '\t', '\n', '\v', '\f', '\r', ' ', 0x85, 0xA0:
return true
default:
return false
}
} | lexer.go | 0.522689 | 0.483039 | lexer.go | starcoder |
package bintb
import (
"fmt"
"io"
"regexp"
"strconv"
"time"
"github.com/moisespsena-go/aorm/types"
)
var (
reDTime = regexp.MustCompile(`^(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)$`)
reDTimeZ = regexp.MustCompile(`^(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+) ([+\-]\d+):(\d+)$`)
)
func (TimeZero) Zero() interface{} {
var t time.Time
return t
}
type ToolDateTimer interface {
DateTimerOf(value interface{}) time.Time
}
type DateTime struct{}
func (DateTool) DateTimerOf(value interface{}) time.Time {
return value.(time.Time)
}
func IsDateTimeTool(typ ColumnType) (ok bool) {
if tool := ColumnTypeTool.Get(typ); tool != nil {
_, ok = tool.(ToolDateTimer)
}
return
}
type CTdtime struct {
TimeZero
DateTool
DateTime
}
func (CTdtime) Description() string {
return "Date Time. (2006-12-31 15:04:05)"
}
func (CTdtime) Decode(value string) (v interface{}, err error) {
result := reDTime.FindAllStringSubmatch(value, 1)
if len(result) == 0 {
return nil, fmt.Errorf("bad date time format `%s`", value)
}
r0 := result[0][1:]
Y, _ := strconv.Atoi(r0[0])
M, _ := strconv.Atoi(r0[1])
D, _ := strconv.Atoi(r0[2])
h, _ := strconv.Atoi(r0[3])
m, _ := strconv.Atoi(r0[4])
s, _ := strconv.Atoi(r0[5])
return time.Date(Y, time.Month(M), D, h, m, s, 0, time.UTC), nil
}
func (CTdtime) Encode(value interface{}) string {
return value.(time.Time).Format("2006-01-02 15:04:05")
}
func (CTdtime) BinRead(r io.Reader) (v interface{}, err error) {
var b [8]byte
if _, err = r.Read(b[:]); err != nil {
return
}
return time.Unix(int64(binOrder.Uint64(b[:])), 0).In(time.UTC), nil
}
func (CTdtime) BinWrite(w io.Writer, v interface{}) (err error) {
t := v.(time.Time)
return bw(w, t.Unix())
}
type CTdtimeZ struct {
TimeZero
DateTool
DateTime
}
func (CTdtimeZ) Zone(value interface{}) (h, m int) {
return types.TZOfTime(value.(time.Time))
}
func (CTdtimeZ) Description() string {
return "Date Time with Zone. (2006-12-31 15:04:05 -07:00)"
}
func (CTdtimeZ) Decode(value string) (v interface{}, err error) {
result := reDTimeZ.FindAllStringSubmatch(value, 1)
if len(result) == 0 {
return nil, fmt.Errorf("bad date timez format `%s`", value)
}
r0 := result[0][1:]
Y, _ := strconv.Atoi(r0[0])
M, _ := strconv.Atoi(r0[1])
D, _ := strconv.Atoi(r0[2])
h, _ := strconv.Atoi(r0[3])
m, _ := strconv.Atoi(r0[4])
s, _ := strconv.Atoi(r0[5])
zh, _ := strconv.Atoi(r0[6])
zm, _ := strconv.Atoi(r0[7])
return time.Parse("2006-01-02 15:04:05 -07:00",
fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d %03d:%02d", Y, M, D, h, m, s, zh, zm))
}
func (CTdtimeZ) Encode(value interface{}) string {
return value.(time.Time).Format("2006-01-02 15:04:05 -07:00")
}
func (CTdtimeZ) BinRead(r io.Reader) (v interface{}, err error) {
var b [8]byte
if _, err = r.Read(b[:]); err != nil {
return
}
var zh, zm int8
if err = br(r, &zh, &zm); err != nil {
return
}
loc, _ := time.Parse("-0700", fmt.Sprintf("%03d%02d", zh, zm))
return time.Unix(int64(binOrder.Uint64(b[:])), 0).In(loc.Location()), nil
}
func (CTdtimeZ) BinWrite(w io.Writer, v interface{}) (err error) {
t := v.(time.Time)
loc := t.Format("-0700")
zh, _ := strconv.Atoi(loc[0:3])
zm, _ := strconv.Atoi(loc[3:])
return bw(w, t.Unix(), int8(zh), int8(zm))
}
const (
CtDTime ColumnType = "T"
CtDTimeZ ColumnType = "Tz"
)
func init() {
ColumnTypeTool.Set(CtDTime, CTdtime{}, "dtime", "timestamp")
ColumnTypeTool.Set(CtDTimeZ, CTdtimeZ{}, "dtimez", "timestampz")
} | ctDateTime.go | 0.652463 | 0.439447 | ctDateTime.go | starcoder |
package dtables
import (
"io"
"sort"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/libraries/doltcore/diff"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
)
// UnscopedDiffTable is a sql.Table implementation of a system table that shows which tables have
// changed in each commit, across all branches.
type UnscopedDiffTable struct {
ddb *doltdb.DoltDB
head *doltdb.Commit
}
// NewUnscopedDiffTable creates an UnscopedDiffTable
func NewUnscopedDiffTable(_ *sql.Context, ddb *doltdb.DoltDB, head *doltdb.Commit) sql.Table {
return &UnscopedDiffTable{ddb: ddb, head: head}
}
// Name is a sql.Table interface function which returns the name of the table which is defined by the constant
// LogTableName
func (dt *UnscopedDiffTable) Name() string {
return doltdb.DiffTableName
}
// String is a sql.Table interface function which returns the name of the table which is defined by the constant
// DiffTableName
func (dt *UnscopedDiffTable) String() string {
return doltdb.DiffTableName
}
// Schema is a sql.Table interface function that returns the sql.Schema for this system table.
func (dt *UnscopedDiffTable) Schema() sql.Schema {
return []*sql.Column{
{Name: "commit_hash", Type: sql.Text, Source: doltdb.LogTableName, PrimaryKey: true},
{Name: "committer", Type: sql.Text, Source: doltdb.LogTableName, PrimaryKey: false},
{Name: "email", Type: sql.Text, Source: doltdb.LogTableName, PrimaryKey: false},
{Name: "date", Type: sql.Datetime, Source: doltdb.LogTableName, PrimaryKey: false},
{Name: "message", Type: sql.Text, Source: doltdb.LogTableName, PrimaryKey: false},
{Name: "table_name", Type: sql.Text, Source: doltdb.LogTableName, PrimaryKey: true},
}
}
// Partitions is a sql.Table interface function that returns a partition of the data. Currently data is unpartitioned.
func (dt *UnscopedDiffTable) Partitions(*sql.Context) (sql.PartitionIter, error) {
return index.SinglePartitionIterFromNomsMap(nil), nil
}
// PartitionRows is a sql.Table interface function that gets a row iterator for a partition.
func (dt *UnscopedDiffTable) PartitionRows(ctx *sql.Context, _ sql.Partition) (sql.RowIter, error) {
return NewUnscopedDiffTableItr(ctx, dt.ddb, dt.head)
}
// UnscopedDiffTableItr is a sql.RowItr implementation which iterates over each commit as if it's a row in the table.
type UnscopedDiffTableItr struct {
ctx *sql.Context
ddb *doltdb.DoltDB
commits []*doltdb.Commit
commitIdx int
tableNames []string
tableNameIdx int
}
// NewUnscopedDiffTableItr creates a UnscopedDiffTableItr from the current environment.
func NewUnscopedDiffTableItr(ctx *sql.Context, ddb *doltdb.DoltDB, head *doltdb.Commit) (*UnscopedDiffTableItr, error) {
commits, err := actions.TimeSortedCommits(ctx, ddb, head, -1)
if err != nil {
return nil, err
}
return &UnscopedDiffTableItr{ctx, ddb, commits, 0, nil, -1}, nil
}
// HasNext returns true if this UnscopedDiffItr has more elements left.
func (itr *UnscopedDiffTableItr) HasNext() bool {
// There are more diff records to iterate over if:
// 1) there is more than one commit left to process, or
// 2) the tableNames array isn't nilled out and has data to process
return itr.commitIdx+1 < len(itr.commits) || itr.tableNames != nil
}
// Next retrieves the next row. It will return io.EOF if it's the last row.
// After retrieving the last row, Close will be automatically closed.
func (itr *UnscopedDiffTableItr) Next(*sql.Context) (sql.Row, error) {
if !itr.HasNext() {
return nil, io.EOF
}
defer func() {
// Increment the table name index, and if it's the end of the table names array,
// move to the next commit and reset the table name index
itr.tableNameIdx++
if itr.tableNameIdx >= len(itr.tableNames) {
itr.tableNameIdx = -1
itr.tableNames = nil
itr.commitIdx++
}
}()
// Load table names if we don't have them for this commit yet
for itr.tableNames == nil {
err := itr.loadTableNames(itr.commits[itr.commitIdx])
if err != nil {
return nil, err
}
}
commit := itr.commits[itr.commitIdx]
hash, err := commit.HashOf()
if err != nil {
return nil, err
}
meta, err := commit.GetCommitMeta()
if err != nil {
return nil, err
}
return sql.NewRow(hash.String(), meta.Name, meta.Email, meta.Time(),
meta.Description, itr.tableNames[itr.tableNameIdx]), nil
}
// loadTableNames loads the set of changed tables for the current commit into this iterator, taking
// care of advancing the iterator if that commit didn't mutate any tables and checking for EOF condition.
func (itr *UnscopedDiffTableItr) loadTableNames(commit *doltdb.Commit) error {
tableNames, err := itr.calculateChangedTables(commit)
if err != nil {
return err
}
// If there are no table deltas for this commit (e.g. a "dolt doc" commit),
// advance to the next commit, checking for EOF condition.
if len(tableNames) == 0 {
itr.commitIdx++
if !itr.HasNext() {
return io.EOF
}
} else {
itr.tableNames = tableNames
itr.tableNameIdx = 0
}
return nil
}
// calculateChangedTables calculates the tables that changed in the specified commit, by comparing that
// commit with its immediate ancestor commit.
func (itr *UnscopedDiffTableItr) calculateChangedTables(commit *doltdb.Commit) ([]string, error) {
toRootValue, err := commit.GetRootValue()
if err != nil {
return nil, err
}
parent, err := itr.ddb.ResolveParent(itr.ctx, commit, 0)
if err != nil {
return nil, err
}
fromRootValue, err := parent.GetRootValue()
if err != nil {
return nil, err
}
deltas, err := diff.GetTableDeltas(itr.ctx, fromRootValue, toRootValue)
if err != nil {
return nil, err
}
tablesMap := make(map[string]bool)
for _, delta := range deltas {
// Use toName by default for the table name by default, but if it's
// nil (e.g. when dropping a table), check to see if fromName is available.
tableName := delta.ToName
if len(tableName) == 0 {
if len(delta.FromName) > 0 {
tableName = delta.FromName
}
}
if len(tableName) > 0 {
tablesMap[tableName] = true
}
}
// Not all commits mutate tables (e.g. empty commits)
if len(tablesMap) == 0 {
return nil, nil
}
tables := make([]string, len(tablesMap))
i := 0
for key := range tablesMap {
tables[i] = key
i++
}
sort.Strings(tables)
return tables, nil
}
// Close closes the iterator.
func (itr *UnscopedDiffTableItr) Close(*sql.Context) error {
return nil
} | go/libraries/doltcore/sqle/dtables/unscoped_diff_table.go | 0.641984 | 0.440409 | unscoped_diff_table.go | starcoder |
package image
import (
"image"
"image/draw"
)
// And returns the result of ANDing img1 with img2, offset by offs (intersection). The images are converted to
// image.Gray if not already so. For a pair of pixels, p1 & p2 returns min(p1, p2).
func And(img1, img2 image.Image, offs image.Point) *image.Gray {
r := img1.Bounds()
res := image.NewGray(r)
rect := r.Intersect(img2.Bounds().Add(r.Min.Sub(offs)))
if rect.Empty() {
// Implicit: AND with 0 => 0
return res
}
process(img1, img2, res, offs, min)
return res
}
// Or returns the result of ORing img1 with img2, offset by offs (union). The images are converted to
// image.Gray if not already so. For a pair of pixels, p1 & p2 returns max(p1, p2).
func Or(img1, img2 image.Image, offs image.Point) *image.Gray {
r := img1.Bounds()
res := image.NewGray(r)
rect := r.Intersect(img2.Bounds().Add(r.Min.Sub(offs)))
if rect.Empty() {
// Implicit: OR with 0 => img1
draw.Draw(res, r, img1, r.Min, draw.Src)
return res
}
process(img1, img2, res, offs, max)
return res
}
// Xor returns of XORing img1 with img2, offset by offs (union - intersection). The images are converted to
// image.Gray if not already so. For a pair of pixels, p1 ^ p2 returns min(max(p1, p2), 1-min(p1, p2)).
func Xor(img1, img2 image.Image, offs image.Point) *image.Gray {
r := img1.Bounds()
res := image.NewGray(r)
process(img1, img2, res, offs, max)
return res
}
// Sub returns the result of subtracting img2 from img1, offset by offs. The images are converted to
// image.Gray if not already so. For a pair of pixels, p1 & p2 returns max(0, p1-p2).
func Sub(img1, img2 image.Image, offs image.Point) *image.Gray {
r := img1.Bounds()
res := image.NewGray(r)
rect := r.Intersect(img2.Bounds().Add(r.Min.Sub(offs)))
if rect.Empty() {
// Implicit: Sub with 0 => img1
draw.Draw(res, r, img1, r.Min, draw.Src)
return res
}
process(img1, img2, res, offs, sub)
return res
}
// utility function to perform grayscale conversion and apply f.
func process(img1, img2 image.Image, res *image.Gray, offs image.Point, f func(uint8, uint8) uint8) {
r := img1.Bounds()
// Convert to grayscale if necessary
gray1, ok := img1.(*image.Gray)
if !ok {
gray1 = image.NewGray(r)
draw.Draw(gray1, r, img1, r.Min, draw.Src)
}
// Convert to grayscale if necessary and restrict to r after offset
gray2 := image.NewGray(r)
draw.Draw(gray2, r, img2, offs, draw.Src)
// gray2 and res have the same bounds and strides, gray1 may or may not
for y := r.Min.Y; y < r.Max.Y; y++ {
g1offs := gray1.PixOffset(r.Min.X, y)
roffs := res.PixOffset(r.Min.X, y)
for x := r.Min.X; x < r.Max.X; x++ {
a := gray1.Pix[g1offs]
b := gray2.Pix[roffs]
v := f(a, b)
res.Pix[roffs] = v
g1offs++
roffs++
}
}
}
// Not returns 1 - img.
func Not(img image.Image) *image.Gray {
r := img.Bounds()
res := image.NewGray(r)
// Convert to grayscale if necessary
gray, ok := img.(*image.Gray)
if !ok {
gray = image.NewGray(r)
draw.Draw(gray, r, img, r.Min, draw.Src)
}
for y := r.Min.Y; y < r.Max.Y; y++ {
goffs := gray.PixOffset(r.Min.X, y)
roffs := res.PixOffset(r.Min.X, y)
for x := r.Min.X; x < r.Max.X; x++ {
res.Pix[roffs] = 0xff - gray.Pix[goffs]
goffs++
roffs++
}
}
return res
}
// Equal returns true if img1 within img2 at offset offs, matches.
func Equal(img1, img2 image.Image, offs image.Point) bool {
r := img1.Bounds()
// Convert to grayscale if necessary
gray1, ok := img1.(*image.Gray)
if !ok {
gray1 = image.NewGray(r)
draw.Draw(gray1, r, img1, r.Min, draw.Src)
}
// Convert to grayscale if necessary and restrict to r after offset
gray2 := image.NewGray(r)
draw.Draw(gray2, r, img2, offs, draw.Src)
// Compare
for y := r.Min.Y; y < r.Max.Y; y++ {
g1offs := gray1.PixOffset(r.Min.X, y)
g2offs := gray2.PixOffset(r.Min.X, y)
for x := r.Min.X; x < r.Max.X; x++ {
if gray1.Pix[g1offs] != gray2.Pix[g2offs] {
return false
}
g1offs++
g2offs++
}
}
return true
}
// Copy creates a grayscale copy of img
func Copy(img image.Image) *image.Gray {
r := img.Bounds()
res := image.NewGray(r)
draw.Draw(res, r, img, r.Min, draw.Src)
return res
}
// AlphaToGray does a shallow copy (vs going through the ColorModel).
func AlphaToGray(a *image.Alpha) *image.Gray {
return &image.Gray{a.Pix, a.Stride, a.Rect}
}
// GrayToAlpha does a shallow copy (vs going through the ColorModel).
func GrayToAlpha(a *image.Gray) *image.Alpha {
return &image.Alpha{a.Pix, a.Stride, a.Rect}
}
// AlphaAnd returns img1 & img2 (intersection).
// For a pair of pixels, p1 & p2 returns min(p1, p2).
func AlphaAnd(img1, img2 *image.Alpha, offs image.Point) *image.Alpha {
g1, g2 := AlphaToGray(img1), AlphaToGray(img2)
return GrayToAlpha(And(g1, g2, offs))
}
// AlphaOr returns img1 | img2 (union).
// For a pair of pixels, p1 | p2 returns max(p1, p2).
func AlphaOr(img1, img2 *image.Alpha, offs image.Point) *image.Alpha {
g1, g2 := AlphaToGray(img1), AlphaToGray(img2)
return GrayToAlpha(Or(g1, g2, offs))
}
// AlphaXor returns img1 ^ img2 (union - intersection).
// For a pair of pixels, p1 ^ p2 returns min(max(p1, p2), 1-min(p1, p2)).
func AlphaXor(img1, img2 *image.Alpha, offs image.Point) *image.Alpha {
g1, g2 := AlphaToGray(img1), AlphaToGray(img2)
return GrayToAlpha(Xor(g1, g2, offs))
}
// AlphaSub returns img1 - img2 (img1 minus the intersection of img1 and img2).
// For a pair of pixels, p1 - p2 returns min(p1, 1-p2).
func AlphaSub(img1, img2 *image.Alpha, offs image.Point) *image.Alpha {
g1, g2 := AlphaToGray(img1), AlphaToGray(img2)
return GrayToAlpha(Sub(g1, g2, offs))
}
// AlphaNot returns 1 - img.
func AlphaNot(img *image.Alpha) *image.Alpha {
g1 := AlphaToGray(img)
return GrayToAlpha(Not(g1))
}
// AlphaEqual returns true if two images are the same.
func AlphaEqual(img1, img2 *image.Alpha, offs image.Point) bool {
g1, g2 := AlphaToGray(img1), AlphaToGray(img2)
return Equal(g1, g2, offs)
}
// AlphaCopy returns a deep copy of img.
func AlphaCopy(img *image.Alpha) *image.Alpha {
g1 := AlphaToGray(img)
return GrayToAlpha(Copy(g1))
}
// vs inline?
func min(a, b uint8) uint8 {
if a > b {
return b
}
return a
}
func max(a, b uint8) uint8 {
if b > a {
return b
}
return a
}
func sub(a, b uint8) uint8 {
if b > a {
return 0
}
return a - b
}
func xor(a, b uint8) uint8 {
return min(max(a, b), sub(0xff, min(a, b)))
} | image/logical.go | 0.909335 | 0.569912 | logical.go | starcoder |
package cp
import (
"fmt"
"math"
)
const (
INFINITY = math.MaxFloat64
MAGIC_EPSILON = 1e-5
RadianConst = math.Pi / 180
DegreeConst = 180 / math.Pi
POOLED_BUFFER_SIZE = 1024
)
type CollisionBeginFunc func(arb *Arbiter, space *Space, userData interface{}) bool
type CollisionPreSolveFunc func(arb *Arbiter, space *Space, userData interface{}) bool
type CollisionPostSolveFunc func(arb *Arbiter, space *Space, userData interface{})
type CollisionSeparateFunc func(arb *Arbiter, space *Space, userData interface{})
type CollisionType uintptr
/// Struct that holds function callback pointers to configure custom collision handling.
/// Collision handlers have a pair of types; when a collision occurs between two shapes that have these types, the collision handler functions are triggered.
type CollisionHandler struct {
/// Collision type identifier of the first shape that this handler recognizes.
/// In the collision handler callback, the shape with this type will be the first argument. Read only.
TypeA CollisionType
/// Collision type identifier of the second shape that this handler recognizes.
/// In the collision handler callback, the shape with this type will be the second argument. Read only.
TypeB CollisionType
/// This function is called when two shapes with types that match this collision handler begin colliding.
BeginFunc CollisionBeginFunc
/// This function is called each step when two shapes with types that match this collision handler are colliding.
/// It's called before the collision solver runs so that you can affect a collision's outcome.
PreSolveFunc CollisionPreSolveFunc
/// This function is called each step when two shapes with types that match this collision handler are colliding.
/// It's called after the collision solver runs so that you can read back information about the collision to trigger events in your game.
PostSolveFunc CollisionPostSolveFunc
/// This function is called when two shapes with types that match this collision handler stop colliding.
SeparateFunc CollisionSeparateFunc
/// This is a user definable context pointer that is passed to all of the collision handler functions.
UserData interface{}
}
// Arbiter states
const (
// Arbiter is active and its the first collision.
CP_ARBITER_STATE_FIRST_COLLISION = iota
// Arbiter is active and its not the first collision.
CP_ARBITER_STATE_NORMAL
// Collision has been explicitly ignored.
// Either by returning false from a begin collision handler or calling cpArbiterIgnore().
CP_ARBITER_STATE_IGNORE
// Collison is no longer active. A space will cache an arbiter for up to cpSpace.collisionPersistence more steps.
CP_ARBITER_STATE_CACHED
// Collison arbiter is invalid because one of the shapes was removed.
CP_ARBITER_STATE_INVALIDATED
)
type Contact struct {
r1, r2 Vector
nMass, tMass float64
bounce float64 // TODO: look for an alternate bounce solution
jnAcc, jtAcc, jBias float64
bias float64
hash HashValue
}
func (c *Contact) Clone() Contact {
return Contact{
r1: c.r1,
r2: c.r2,
nMass: c.nMass,
tMass: c.tMass,
bounce: c.bounce,
jnAcc: c.jnAcc,
jtAcc: c.jtAcc,
jBias: c.jBias,
bias: c.bias,
hash: c.hash,
}
}
type CollisionInfo struct {
a, b *Shape
collisionId uint32
n Vector
count int
arr []Contact
}
func (info *CollisionInfo) PushContact(p1, p2 Vector, hash HashValue) {
assert(info.count < MAX_CONTACTS_PER_ARBITER, "Internal error: Tried to push too many contacts.")
con := &info.arr[info.count]
con.r1 = p1
con.r2 = p2
con.hash = hash
info.count++
}
type ShapeMassInfo struct {
m, i, area float64
cog Vector
}
type PointQueryInfo struct {
/// The nearest shape, NULL if no shape was within range.
Shape *Shape
/// The closest point on the shape's surface. (in world space coordinates)
Point Vector
/// The distance to the point. The distance is negative if the point is inside the shape.
Distance float64
/// The gradient of the signed distance function.
/// The value should be similar to info.p/info.d, but accurate even for very small values of info.d.
Gradient Vector
}
type SegmentQueryInfo struct {
/// The shape that was hit, or NULL if no collision occurred.
Shape *Shape
/// The point of impact.
Point Vector
/// The normal of the surface hit.
Normal Vector
/// The normalized distance along the query segment in the range [0, 1].
Alpha float64
}
type SplittingPlane struct {
v0, n Vector
}
var (
NO_GROUP uint = 0
ALL_CATEGORIES uint = ^uint(0)
)
var SHAPE_FILTER_ALL = ShapeFilter{NO_GROUP, ALL_CATEGORIES, ALL_CATEGORIES}
var SHAPE_FILTER_NONE = ShapeFilter{NO_GROUP, ^ALL_CATEGORIES, ^ALL_CATEGORIES}
type ShapeFilter struct {
/// Two objects with the same non-zero group value do not collide.
/// This is generally used to group objects in a composite object together to disable self collisions.
Group uint
/// A bitmask of user definable categories that this object belongs to.
/// The category/mask combinations of both objects in a collision must agree for a collision to occur.
Categories uint
/// A bitmask of user definable category types that this object object collides with.
/// The category/mask combinations of both objects in a collision must agree for a collision to occur.
Mask uint
}
func NewShapeFilter(group, categories, mask uint) ShapeFilter {
return ShapeFilter{group, categories, mask}
}
func (a ShapeFilter) Reject(b ShapeFilter) bool {
// Reject the collision if:
return (a.Group != 0 && a.Group == b.Group) ||
// One of the category/mask combinations fails.
(a.Categories&b.Mask) == 0 ||
(b.Categories&a.Mask) == 0
}
func MomentForCircle(m, r1, r2 float64, offset Vector) float64 {
return m * (0.5*(r1*r1+r2*r2) + offset.LengthSq())
}
func AreaForCircle(r1, r2 float64) float64 {
return math.Pi * math.Abs(r1*r1-r2*r2)
}
func MomentForSegment(m float64, a, b Vector, r float64) float64 {
offset := a.Lerp(b, 0.5)
length := b.Distance(a) + 2.0*r
return m * ((length*length+4.0*r*r)/12.0 + offset.LengthSq())
}
func AreaForSegment(a, b Vector, r float64) float64 {
return r * (math.Pi*r + 2.0*a.Distance(b))
}
func MomentForPoly(m float64, count int, verts []Vector, offset Vector, r float64) float64 {
if count == 2 {
return MomentForSegment(m, verts[0], verts[1], 0)
}
var sum1 float64
var sum2 float64
for i := 0; i < count; i++ {
v1 := verts[i].Add(offset)
v2 := verts[(i+1)%count].Add(offset)
a := v2.Cross(v1)
b := v1.Dot(v1) + v1.Dot(v2) + v2.Dot(v2)
sum1 += a * b
sum2 += a
}
return (m * sum1) / (6.0 * sum2)
}
func AreaForPoly(count int, verts []Vector, r float64) float64 {
var area float64
var perimeter float64
for i := 0; i < count; i++ {
v1 := verts[i]
v2 := verts[(i+1)%count]
area += v1.Cross(v2)
perimeter += v1.Distance(v2)
}
return r*(math.Pi*math.Abs(r)+perimeter) + area/2.0
}
func CentroidForPoly(count int, verts []Vector) Vector {
var sum float64
vsum := Vector{}
for i := 0; i < count; i++ {
v1 := verts[i]
v2 := verts[(i+1)%count]
cross := v1.Cross(v2)
sum += cross
vsum = vsum.Add(v1.Add(v2).Mult(cross))
}
return vsum.Mult(1.0 / (3.0 * sum))
}
func MomentForBox(m, width, height float64) float64 {
return m * (width*width + height*height) / 12.0
}
func MomentForBox2(m float64, box BB) float64 {
width := box.R - box.L
height := box.T - box.B
offset := Vector{box.L + box.R, box.B + box.T}.Mult(0.5)
// TODO: NaN when offset is 0 and m is INFINITY
return MomentForBox(m, width, height) + m*offset.LengthSq()
}
func k_scalar_body(body *Body, r, n Vector) float64 {
rcn := r.Cross(n)
return body.m_inv + body.i_inv*rcn*rcn
}
func k_scalar(a, b *Body, r1, r2, n Vector) float64 {
return k_scalar_body(a, r1, n) + k_scalar_body(b, r2, n)
}
func normal_relative_velocity(a, b *Body, r1, r2, n Vector) float64 {
return relative_velocity(a, b, r1, r2).Dot(n)
}
func k_tensor(a, b *Body, r1, r2 Vector) Mat2x2 {
m_sum := a.m_inv + b.m_inv
// start with Identity*m_sum
k11 := m_sum
k12 := 0.0
k21 := 0.0
k22 := m_sum
// add the influence from r1
a_i_inv := a.i_inv
r1xsq := r1.X * r1.X * a_i_inv
r1ysq := r1.Y * r1.Y * a_i_inv
r1nxy := -r1.X * r1.Y * a_i_inv
k11 += r1ysq
k12 += r1nxy
k21 += r1nxy
k22 += r1xsq
// add the influence from r2
b_i_inv := b.i_inv
r2xsq := r2.X * r2.X * b_i_inv
r2ysq := r2.Y * r2.Y * b_i_inv
r2nxy := -r2.X * r2.Y * b_i_inv
k11 += r2ysq
k12 += r2nxy
k21 += r2nxy
k22 += r2xsq
// invert
det := k11*k22 - k12*k21
assert(det != 0.0, "Unsolvable constraint")
det_inv := 1.0 / det
return Mat2x2{
k22 * det_inv, -k12 * det_inv,
-k21 * det_inv, k11 * det_inv,
}
}
func bias_coef(errorBias, dt float64) float64 {
return 1.0 - math.Pow(errorBias, dt)
}
type Mat2x2 struct {
a, b, c, d float64
}
func (m *Mat2x2) Transform(v Vector) Vector {
return Vector{v.X*m.a + v.Y*m.b, v.X*m.c + v.Y*m.d}
}
var maxArbiters, maxPoints, maxConstraints int
func DebugInfo(space *Space) string {
arbiters := len(space.arbiters)
points := 0
for i := 0; i < arbiters; i++ {
points += int(space.arbiters[i].count)
}
constraints := len(space.constraints) + points*int(space.Iterations)
if arbiters > maxArbiters {
maxArbiters = arbiters
}
if points > maxPoints {
maxPoints = points
}
if constraints > maxConstraints {
maxConstraints = constraints
}
var ke float64
for _, body := range space.dynamicBodies {
if body.m == INFINITY || body.i == INFINITY {
continue
}
ke += body.m*body.v.Dot(body.v) + body.i*body.w*body.w
}
return fmt.Sprintf(`Arbiters: %d (%d) - Contact Points: %d (%d)
Other Constraints: %d, Iterations: %d
Constraints x Iterations: %d (%d)
KE: %e`, arbiters, maxArbiters,
points, maxPoints, len(space.constraints), space.Iterations, constraints, maxConstraints, ke)
} | everything.go | 0.688783 | 0.522872 | everything.go | starcoder |
package main
import (
"fmt"
"strconv"
"github.com/fancxxy/algorithm/list/singlylinkedlist"
)
/*
f1(x) = 5x^2 + 4x^1 + 2x^0
f2(x) = 5x^1 + 5x^0
f1(x) + f2(x) = 5x^2 + 9x^1 +7x^0
f1(x) * f2(x) = 25x^3 + 45x^2 + 30x^1 + 10x^0
*/
type polynomial struct {
Coefficient int
Exponent int
}
func (p *polynomial) String() string {
return strconv.Itoa(p.Coefficient) + "x^" + strconv.Itoa(p.Exponent)
}
func addPolynomial(poly1, poly2 *singlylinkedlist.List) *singlylinkedlist.List {
var (
poly3 = singlylinkedlist.New()
node1 = poly1.First()
node2 = poly2.First()
node3 = poly3.Head()
)
for node1 != nil && node2 != nil {
value1, value2 := node1.Value.(*polynomial), node2.Value.(*polynomial)
if value1.Exponent > value2.Exponent {
node3 = poly3.Insert(&polynomial{
Coefficient: value1.Coefficient,
Exponent: value1.Exponent,
}, node3)
node1 = node1.Next
} else if value1.Exponent < value2.Exponent {
node3 = poly3.Insert(&polynomial{
Coefficient: value2.Coefficient,
Exponent: value2.Exponent,
}, node3)
node2 = node2.Next
} else {
node3 = poly3.Insert(&polynomial{
Coefficient: value1.Coefficient + value2.Coefficient,
Exponent: value1.Exponent,
}, node3)
node1 = node1.Next
node2 = node2.Next
}
}
for node1 != nil {
value1 := node1.Value.(*polynomial)
node3 = poly3.Insert(&polynomial{
Coefficient: value1.Coefficient,
Exponent: value1.Exponent,
}, node3)
node1 = node1.Next
}
for node2 != nil {
value2 := node2.Value.(*polynomial)
node3 = poly3.Insert(&polynomial{
Coefficient: value2.Coefficient,
Exponent: value2.Exponent,
}, node3)
node2 = node2.Next
}
return poly3
}
func multiplyPolynomial(poly1, poly2 *singlylinkedlist.List) *singlylinkedlist.List {
var (
poly3 = singlylinkedlist.New()
node1 = poly1.First()
node2 = poly2.First()
node3 = poly3.Head()
)
for node1 != nil {
value1 := node1.Value.(*polynomial)
node2 = poly2.First()
for node2 != nil {
value2 := node2.Value.(*polynomial)
node3 = poly3.Insert(&polynomial{
Coefficient: value1.Coefficient * value2.Coefficient,
Exponent: value1.Exponent + value2.Exponent,
}, node3)
node2 = node2.Next
}
node1 = node1.Next
}
curr := poly3.First()
for curr != nil && curr.Next != nil {
currValue := curr.Value.(*polynomial)
// dup是可能需要被合并结点的结点
dup := curr.Next
for dup != nil {
dupValue := dup.Value.(*polynomial)
if currValue.Exponent == dupValue.Exponent {
currValue.Coefficient += dupValue.Coefficient
poly3.Remove(dup)
} else {
dup = dup.Next
}
}
curr = curr.Next
}
return poly3
}
func main() {
poly1 := singlylinkedlist.New()
node := poly1.Insert(&polynomial{Coefficient: 5, Exponent: 2}, poly1.Head())
node = poly1.Insert(&polynomial{Coefficient: 4, Exponent: 1}, node)
node = poly1.Insert(&polynomial{Coefficient: 2, Exponent: 0}, node)
poly2 := singlylinkedlist.New()
node = poly2.Insert(&polynomial{Coefficient: 5, Exponent: 1}, poly2.Head())
node = poly2.Insert(&polynomial{Coefficient: 5, Exponent: 0}, node)
for _, value := range addPolynomial(poly1, poly2).Values() {
fmt.Printf("%v ", value)
}
fmt.Println()
for _, value := range multiplyPolynomial(poly1, poly2).Values() {
fmt.Printf("%v ", value)
}
} | examples/list/polynomial/main.go | 0.551815 | 0.485661 | main.go | starcoder |
package quadtree
import (
vec "github.com/etic4/vecmath"
rl "github.com/gen2brain/raylib-go/raylib"
)
//Centered ...
type Centered interface {
Center() vec.Vec2
Width() float64
Height() float64
Intersect(Centered) bool
}
//Quadtree ...
type Quadtree struct {
points []Centered
maxPoints int
divided bool
rect *Rectangle
ne *Quadtree
nw *Quadtree
se *Quadtree
sw *Quadtree
root *Quadtree
pointsMap map[vec.Vec2][]*Quadtree // stocke la liste des quadtrees dans lesquels se trouvent
// chaque forme
}
//NewQuadtree retourne un nouveau Quadtree.
func NewQuadtree(rect *Rectangle, maxPoints int, root *Quadtree) *Quadtree {
q := &Quadtree{}
q.rect = rect
q.maxPoints = maxPoints
q.root = root
if root == nil {
q.root = q
q.pointsMap = map[vec.Vec2][]*Quadtree{}
}
return q
}
//Size ...
func (q *Quadtree) Size() int {
sze := 0
sze += len(q.points)
if q.divided {
sze += q.ne.Size()
sze += q.nw.Size()
sze += q.se.Size()
sze += q.sw.Size()
}
return sze
}
//GetQuadtreesFor retourne la liste des Quadtrees auxquels appartient 'c'
func (q *Quadtree) GetQuadtreesFor(v vec.Vec2) []*Quadtree {
return q.pointsMap[v]
}
//Insert ...
func (q *Quadtree) Insert(c Centered) {
if !q.Intersect(c) {
return
}
if len(q.points) < q.maxPoints {
q.points = append(q.points, c)
_, ok := q.root.pointsMap[c.Center()]
if !ok {
q.root.pointsMap[c.Center()] = []*Quadtree{}
}
q.root.pointsMap[c.Center()] = append(q.root.pointsMap[c.Center()], q)
} else {
if !q.divided {
W2 := q.rect.W / 2
H2 := q.rect.H / 2
rect := NewRectangleCentered(q.rect.C.Add(vec.Vec2{X: -W2, Y: -H2}), W2, H2)
q.ne = NewQuadtree(rect, 4, q.root)
rect = NewRectangleCentered(q.rect.C.Add(vec.Vec2{X: W2, Y: -H2}), W2, H2)
q.nw = NewQuadtree(rect, 4, q.root)
rect = NewRectangleCentered(q.rect.C.Add(vec.Vec2{X: W2, Y: H2}), W2, H2)
q.se = NewQuadtree(rect, 4, q.root)
rect = NewRectangleCentered(q.rect.C.Add(vec.Vec2{X: -W2, Y: H2}), W2, H2)
q.sw = NewQuadtree(rect, 4, q.root)
q.divided = true
}
q.ne.Insert(c)
q.nw.Insert(c)
q.se.Insert(c)
q.sw.Insert(c)
}
}
//Remove ...
func (q *Quadtree) Remove(c Centered) {
if qtrees, ok := q.root.pointsMap[c.Center()]; ok {
for _, qtree := range qtrees {
i := 0
length := len(qtree.points)
for i < length && qtree.points[i].Center() != c.Center() {
i++
}
if i < length {
qtree.points[i] = qtree.points[length-1]
qtree.points[length-1] = nil
qtree.points = qtree.points[:length-1]
}
}
delete(q.pointsMap, c.Center())
}
}
//Clear ...
func (q *Quadtree) Clear() {
q.points = nil
q.points = []Centered{}
if q.divided {
q.ne.Clear()
q.nw.Clear()
q.se.Clear()
q.sw.Clear()
}
q.root.pointsMap = nil
q.root.pointsMap = map[vec.Vec2][]*Quadtree{}
}
//Intersect ...
func (q *Quadtree) Intersect(r Centered) bool {
rect := q.rect
return !(rect.Center().X+rect.W < r.Center().X-r.Width() ||
rect.Center().X-rect.W > r.Center().X+r.Width() ||
rect.Center().Y+rect.H < r.Center().Y-r.Height() ||
rect.Center().Y-rect.H > r.Center().Y+r.Height())
}
//QueryRange ...
func (q *Quadtree) QueryRange(rect Centered) []Centered {
res := []Centered{}
if q.Intersect(rect) {
res = append(res, q.points...) //inutile je pense de checker s'ils intersectent avec rect
if q.divided {
res = append(res, q.ne.QueryRange(rect)...)
res = append(res, q.nw.QueryRange(rect)...)
res = append(res, q.se.QueryRange(rect)...)
res = append(res, q.sw.QueryRange(rect)...)
}
}
return res
}
//Draw ...
func (q *Quadtree) Draw() {
pos := q.rect.Center().Sub(vec.Vec2{X: q.rect.W, Y: q.rect.H})
rl.DrawRectangleLines(int32(pos.X), int32(pos.Y), int32(q.rect.W*2), int32(q.rect.H*2), rl.White)
if q.divided {
q.ne.Draw()
q.nw.Draw()
q.se.Draw()
q.sw.Draw()
}
}
//DrawOne ...
func (q *Quadtree) DrawOne() {
pos := q.rect.Center().Sub(vec.Vec2{X: q.rect.W, Y: q.rect.H})
rl.DrawRectangleLines(int32(pos.X), int32(pos.Y), int32(q.rect.W*2), int32(q.rect.H*2), rl.White)
} | quadtree.go | 0.586286 | 0.45423 | quadtree.go | starcoder |
package charter
import (
"fmt"
"strconv"
gochart "github.com/regorov/go-chart"
"github.com/regorov/go-chart/drawing"
)
type LabelGetterFunc func(c *Curve) string
// LabelFunc describes function parameters
type LabelFunc func(c *Curve, r gochart.Renderer, width, height int)
// LabelMaxMin puts max and min values in to the top/left and top/rigt corners.
func LabelMaxMin(c *Curve, rend gochart.Renderer, width, height int) {
minlabel := "-"
maxlabel := "-"
if c.MinIndex >= 0 {
minlabel = strconv.FormatFloat(c.Value[c.MinIndex], 'f', 0, 64)
}
if c.MaxIndex >= 0 {
maxlabel = strconv.FormatFloat(c.Value[c.MaxIndex], 'f', 0, 64)
}
rend.SetFontSize(9)
//rend.SetFontColor(gochart.ColorRed) // Color{R: 0x00, G: 0x66, B: 0xff, A: 255})
rend.SetFontColor(drawing.Color{R: 255, G: 0, B: 0, A: 255})
rend.Text(minlabel, 4, 12)
rend.SetFontColor(drawing.Color{R: 0x23, G: 0xd1, B: 0x60, A: 255}) // (drawing.Color{R: 0x00, G: 0xFF, B: 0x00, A: 255})
rend.Text(maxlabel, width-2-rend.MeasureText(maxlabel).Right, height-2)
}
// LabelLastMaxMin puts last, max and min values in to the top/center, top/left and top/rigt corners.
func LabelLastMaxMin(c *Curve, rend gochart.Renderer, width, height int) {
LabelMaxMin(c, rend, width, height)
v, ok := c.lastValue()
if !ok {
return
}
label := fmt.Sprintf("%0.2f", v)
rend.SetFontColor(drawing.ColorBlack)
rend.Text(label, width/2-2-rend.MeasureText(label).Right/2, 10)
}
// LabelLast
func LabelLast(c *Curve, rend gochart.Renderer, width, height int) {
if len(c.Value) == 0 {
return
}
label := fmt.Sprintf("%0.2f", c.Value[len(c.Value)-1])
rend.SetFontColor(drawing.ColorBlue)
rend.SetFontSize(8)
rend.Text(label, width-2-rend.MeasureText(label).Right, 10)
}
// func LabelLast(c *Curve, rend gochart.Renderer, width, height int) {
// if len(c.Value) == 0 {
// return
// }
// label := fmt.Sprintf("%0.2f", c.Value[len(c.Value)-1])
// rend.SetFontColor(drawing.ColorBlue)
// rend.SetFontSize(8)
// rend.Text(label, width-2-rend.MeasureText(label).Right, 10)
// }
func Label(x, y int, label LabelGetterFunc, color drawing.Color) LabelFunc {
return func(c *Curve, rend gochart.Renderer, width, height int) {
x1, y1 := x, y
s := label(c)
mt := rend.MeasureText(s)
if x < 0 {
x1 = width + x - mt.Width()
}
if y < 0 {
y1 = height + y
}
rend.SetFontColor(color)
rend.Text(s, x1, y1)
}
}
func Color(c drawing.Color) func() drawing.Color {
return func() drawing.Color {
return c
}
}
func Last(c *Curve) string {
if len(c.Value) == 0 {
return ""
}
return fmt.Sprintf("%0.2f", c.Value[len(c.Value)-1])
}
func Min(c *Curve) string {
if c.MinIndex < 0 {
return "-"
}
return fmt.Sprintf("%0.2f", c.Value[c.MinIndex])
}
func Max(c *Curve) string {
if c.MaxIndex < 0 {
return "-"
}
return fmt.Sprintf("%0.2f", c.Value[c.MaxIndex])
}
func LabelMinMaxInterval(c *Curve) string {
min, max := "", ""
if c.MinIndex >= 0 {
min = strconv.FormatFloat(c.Value[c.MinIndex], 'f', 0, 64)
}
if c.MinIndex >= 0 {
max = strconv.FormatFloat(c.Value[c.MaxIndex], 'f', 0, 64)
}
return min + "..." + max
}
func MaxFloor(c *Curve) string {
if c.MaxIndex < 0 {
return "-"
}
return strconv.FormatFloat(c.Value[c.MaxIndex], 'f', 0, 64)
} | service/charter/labels.go | 0.596903 | 0.461441 | labels.go | starcoder |
package iso20022
// Amount of money associated with a service.
type Fee2 struct {
// Type of fee (charge/commission).
Type *ChargeType5Choice `xml:"Tp"`
// Method used to calculate the fee (charge/commission).
Basis *ChargeBasis2Choice `xml:"Bsis,omitempty"`
// Standard fee (charge/commission) amount as specified in the fund prospectus or agreed for the account.
StandardAmount *ActiveCurrencyAndAmount `xml:"StdAmt,omitempty"`
// Standard fee (charge/commission) rate used to calculate the amount of the charge or fee, as specified in the fund prospectus or agreed for the account.
StandardRate *PercentageRate `xml:"StdRate,omitempty"`
// Discount or waiver applied to the fee (charge/commission).
DiscountDetails *ChargeOrCommissionDiscount1 `xml:"DscntDtls,omitempty"`
// Fee (charge/commission) amount applied to the transaction.
AppliedAmount *ActiveCurrencyAndAmount `xml:"ApldAmt,omitempty"`
// Final rate used to calculate the fee (charge/commission) amount.
AppliedRate *PercentageRate `xml:"ApldRate,omitempty"`
// Reference to a sales agreement that overrides normal processing or the Service Level Agreement (SLA), such as a fee (charge/commission).
NonStandardSLAReference *Max35Text `xml:"NonStdSLARef,omitempty"`
// Party entitled to the amount of money resulting from a fee (charge/commission).
RecipientIdentification *PartyIdentification113 `xml:"RcptId,omitempty"`
// Indicates the information is provided for information purposes only. When the value is ‘false’ or ‘0’ the amount provided is taken into consideration in the transaction overhead. When the value is ‘true’ or ‘1’ the amount provided is not taken into consideration in the transaction overhead.
InformativeIndicator *YesNoIndicator `xml:"InftvInd"`
}
func (f *Fee2) AddType() *ChargeType5Choice {
f.Type = new(ChargeType5Choice)
return f.Type
}
func (f *Fee2) AddBasis() *ChargeBasis2Choice {
f.Basis = new(ChargeBasis2Choice)
return f.Basis
}
func (f *Fee2) SetStandardAmount(value, currency string) {
f.StandardAmount = NewActiveCurrencyAndAmount(value, currency)
}
func (f *Fee2) SetStandardRate(value string) {
f.StandardRate = (*PercentageRate)(&value)
}
func (f *Fee2) AddDiscountDetails() *ChargeOrCommissionDiscount1 {
f.DiscountDetails = new(ChargeOrCommissionDiscount1)
return f.DiscountDetails
}
func (f *Fee2) SetAppliedAmount(value, currency string) {
f.AppliedAmount = NewActiveCurrencyAndAmount(value, currency)
}
func (f *Fee2) SetAppliedRate(value string) {
f.AppliedRate = (*PercentageRate)(&value)
}
func (f *Fee2) SetNonStandardSLAReference(value string) {
f.NonStandardSLAReference = (*Max35Text)(&value)
}
func (f *Fee2) AddRecipientIdentification() *PartyIdentification113 {
f.RecipientIdentification = new(PartyIdentification113)
return f.RecipientIdentification
}
func (f *Fee2) SetInformativeIndicator(value string) {
f.InformativeIndicator = (*YesNoIndicator)(&value)
} | Fee2.go | 0.811676 | 0.478224 | Fee2.go | starcoder |
// Package ui provides methods to draw a user interface onto the
// the screen and manage resizing.
package ui
const (
scaledWidth, scaledHeight = 854, 480
)
var (
// DrawMode is the scaling mode used.
DrawMode = Scaled
// Scale controls the scaling manually when DrawModel is Unscaled
Scale = 1.0
drawables []drawRef
)
func ForceDraw() {
lastWidth = -1
for _, d := range drawables {
switch dd := d.Drawable.(type) {
case *Text:
dd.Update(dd.Value())
case *Formatted:
dd.Update(dd.value)
}
}
}
type drawRef struct {
Drawable
}
// Region is an area for a Drawable to draw to
type Region struct {
X, Y, W, H float64
}
// Drawable is a scalable element that can be drawn to an
// area.
type Drawable interface {
Draw(r Region, delta float64)
Size() (float64, float64)
// Offset is the offset from the attachment point on
// each axis
Offset() (float64, float64)
ShouldDraw() bool
AttachedTo() Drawable
AttachTo(d Drawable)
Attachment() (vAttach, hAttach AttachPoint)
Layer() int
OnRemove(d Drawable)
SetRemoveHook(func(Drawable))
isDirty() bool
flagDirty()
clearDirty()
}
type Interactable interface {
Click(r Region, x, y float64)
Hover(r Region, x, y float64, over bool)
}
// AddDrawable adds the drawable to the draw list.
func AddDrawable(d Drawable) {
d.flagDirty()
drawables = append(drawables, drawRef{Drawable: d})
}
var screen = Region{W: scaledWidth, H: scaledHeight}
var (
lastWidth, lastHeight int
forceDirty bool
)
// Draw draws all drawables in the draw list to the screen.
func Draw(width, height int, delta float64) {
sw := scaledWidth / float64(width)
sh := scaledHeight / float64(height)
if DrawMode == Unscaled {
sw, sh = Scale, Scale
}
for _, d := range drawables {
if !d.ShouldDraw() {
continue
}
r := getDrawRegion(d, sw, sh)
if r.intersects(screen) {
d.Draw(r, delta)
}
}
for _, d := range drawables {
// Handle parents that aren't drawing too
for r := d.Drawable; r != nil; r = r.AttachedTo() {
r.clearDirty()
}
}
forceDirty = false
if lastWidth != width || lastHeight != height {
forceDirty = true
lastWidth, lastHeight = width, height
}
}
func (r Region) intersects(o Region) bool {
return !(r.X+r.W < o.X ||
r.X > o.X+o.W ||
r.Y+r.H < o.Y ||
r.Y > o.Y+o.H)
}
// Hover calls Hover on all interactables at the passed location.
func Hover(x, y float64, width, height int) {
sw := scaledWidth / float64(width)
sh := scaledHeight / float64(height)
if DrawMode == Unscaled {
sw, sh = Scale, Scale
}
x = (x / float64(width)) * scaledWidth
y = (y / float64(height)) * scaledHeight
for i := range drawables {
d := drawables[len(drawables)-1-i]
inter, ok := d.Drawable.(Interactable)
if !ok {
continue
}
r := getDrawRegion(d, sw, sh)
if x >= r.X && x <= r.X+r.W && y >= r.Y && y <= r.Y+r.H {
inter.Hover(r, x, y, true)
} else {
inter.Hover(r, x, y, false)
}
}
}
// Click calls Click on all interactables at the passed location.
func Click(x, y float64, width, height int) {
sw := scaledWidth / float64(width)
sh := scaledHeight / float64(height)
if DrawMode == Unscaled {
sw, sh = Scale, Scale
}
x = (x / float64(width)) * scaledWidth
y = (y / float64(height)) * scaledHeight
for i := range drawables {
d := drawables[len(drawables)-1-i]
inter, ok := d.Drawable.(Interactable)
if !ok {
continue
}
r := getDrawRegion(d, sw, sh)
if x >= r.X && x <= r.X+r.W && y >= r.Y && y <= r.Y+r.H {
inter.Click(r, x, y)
break
}
}
}
// Intersects returns whether the point x,y intersects with the drawable
func Intersects(d Drawable, x, y float64, width, height int) (float64, float64, bool) {
sw := scaledWidth / float64(width)
sh := scaledHeight / float64(height)
if DrawMode == Unscaled {
sw, sh = Scale, Scale
}
x = (x / float64(width)) * scaledWidth
y = (y / float64(height)) * scaledHeight
r := getDrawRegion(d, sw, sh)
if x >= r.X && x <= r.X+r.W && y >= r.Y && y <= r.Y+r.H {
w, h := d.Size()
ox := ((x - r.X) / r.W) * w
oy := ((y - r.Y) / r.H) * h
return ox, oy, true
}
return 0, 0, false
}
func getDrawRegion(d Drawable, sw, sh float64) Region {
parent := d.AttachedTo()
var superR Region
if parent != nil {
superR = getDrawRegion(parent, sw, sh)
} else {
superR = screen
}
r := Region{}
w, h := d.Size()
ox, oy := d.Offset()
r.W = w * sw
r.H = h * sh
vAttach, hAttach := d.Attachment()
switch hAttach {
case Left:
r.X = ox * sw
case Middle:
r.X = (superR.W / 2) - (r.W / 2) + ox*sw
case Right:
r.X = superR.W - ox*sw - r.W
}
switch vAttach {
case Top:
r.Y = oy * sh
case Middle:
r.Y = (superR.H / 2) - (r.H / 2) + oy*sh
case Right:
r.Y = superR.H - oy*sh - r.H
}
r.X += superR.X
r.Y += superR.Y
return r
}
// Remove removes the drawable from the screen.
func Remove(d Drawable) {
d.OnRemove(d)
for i, dd := range drawables {
if dd.Drawable == d {
drawables = append(drawables[:i], drawables[i+1:]...)
return
}
}
}
type baseElement struct {
parent Drawable
visible bool
vAttach, hAttach AttachPoint
layer int
dirty bool
isNew bool
data []byte
removeHook func(d Drawable)
}
func (b *baseElement) OnRemove(d Drawable) {
if b.removeHook != nil {
b.removeHook(d)
}
}
func (b *baseElement) SetRemoveHook(r func(Drawable)) {
b.removeHook = r
}
// Attachment returns the sides where this element is attached too.
func (b *baseElement) Attachment() (vAttach, hAttach AttachPoint) {
return b.vAttach, b.hAttach
}
// ShouldDraw returns whether this should be drawn at this time.
func (b *baseElement) ShouldDraw() bool {
return b.visible
}
func (b *baseElement) SetDraw(shouldDraw bool) {
if shouldDraw != b.visible {
b.visible = shouldDraw
b.dirty = true
}
}
// AttachedTo returns the Drawable this is attached to or nil.
func (b *baseElement) AttachedTo() Drawable {
return b.parent
}
func (b *baseElement) AttachTo(d Drawable) {
if b.parent != d {
b.parent = d
b.dirty = true
}
}
func (b *baseElement) Layer() int {
if b.parent != nil {
return b.layer + b.parent.Layer()
}
return b.layer
}
func (b *baseElement) SetLayer(l int) {
if b.layer != l {
b.layer = l
b.dirty = true
}
}
func (b *baseElement) isDirty() bool {
return b.dirty || (b.parent != nil && b.parent.isDirty())
}
func (b *baseElement) flagDirty() {
b.dirty = true
}
func (b *baseElement) clearDirty() {
b.dirty = false
} | ui/ui.go | 0.762336 | 0.45532 | ui.go | starcoder |
package lshensemble
import "errors"
var (
errDomainSizeOrder = errors.New("Domain records must be sorted in ascending order of size")
)
func bootstrapOptimalPartitions(domains <-chan *DomainRecord, numPart int) ([]Partition, int) {
sizes, counts := computeSizeDistribution(domains)
partitions := optimalPartitions(sizes, counts, numPart)
return partitions, len(sizes)
}
func bootstrapOptimal(index *LshEnsemble, sortedDomains <-chan *DomainRecord) error {
var currPart int
var currSize int
for rec := range sortedDomains {
if currSize > rec.Size {
return errDomainSizeOrder
}
currSize = rec.Size
if currSize > index.Partitions[currPart].Upper {
currPart++
}
if currPart >= len(index.Partitions) ||
!(index.Partitions[currPart].Lower <= currSize &&
currSize <= index.Partitions[currPart].Upper) {
return errors.New("Domain records does not match the existing partitions")
}
index.Add(rec.Key, rec.Signature, currPart)
}
index.Index()
return nil
}
// BootstrapLshEnsembleOptimal builds an index from domains using optimal
// partitioning.
// The returned index consists of MinHash LSH implemented using LshForest.
// numPart is the number of partitions to create.
// numHash is the number of hash functions in MinHash.
// maxK is the maximum value for the MinHash parameter K - the number of hash
// functions per "band".
// sortedDomainFactory is factory function that returns a DomainRecord channel
// emitting domains in sorted order by their sizes.
func BootstrapLshEnsembleOptimal(numPart, numHash, maxK int,
sortedDomainFactory func() <-chan *DomainRecord) (*LshEnsemble, error) {
partitions, count := bootstrapOptimalPartitions(sortedDomainFactory(), numPart)
index := NewLshEnsemble(partitions, numHash, maxK, count)
err := bootstrapOptimal(index, sortedDomainFactory())
if err != nil {
return nil, err
}
return index, nil
}
// BootstrapLshEnsemblePlusOptimal builds an index from domains using optimal
// partitioning.
// The returned index consists of MinHash LSH implemented using LshForestArray.
// numPart is the number of partitions to create.
// numHash is the number of hash functions in MinHash.
// maxK is the maximum value for the MinHash parameter K - the number of hash
// functions per "band".
// sortedDomainFactory is factory function that returns a DomainRecord channel
// emitting domains in sorted order by their sizes.
func BootstrapLshEnsemblePlusOptimal(numPart, numHash, maxK int,
sortedDomainFactory func() <-chan *DomainRecord) (*LshEnsemble, error) {
partitions, count := bootstrapOptimalPartitions(sortedDomainFactory(), numPart)
index := NewLshEnsemblePlus(partitions, numHash, maxK, count)
err := bootstrapOptimal(index, sortedDomainFactory())
if err != nil {
return nil, err
}
return index, nil
}
func bootstrapEquiDepth(index *LshEnsemble, totalNumDomains int, sortedDomains <-chan *DomainRecord) error {
numPart := len(index.Partitions)
depth := totalNumDomains / numPart
var currDepth, currPart int
var currSize int
for rec := range sortedDomains {
if currSize > rec.Size {
return errDomainSizeOrder
}
currSize = rec.Size
index.Add(rec.Key, rec.Signature, currPart)
currDepth++
index.Partitions[currPart].Upper = rec.Size
if currDepth >= depth && currPart < numPart-1 {
currPart++
index.Partitions[currPart].Lower = rec.Size
currDepth = 0
}
}
index.Index()
return nil
}
// BootstrapLshEnsembleEquiDepth builds an index from a channel of domains
// using equi-depth partitions -- partitions have approximately the same
// number of domains.
// The returned index consists of MinHash LSH implemented using LshForest.
// numPart is the number of partitions to create.
// numHash is the number of hash functions in MinHash.
// maxK is the maximum value for the MinHash parameter K - the number of hash functions per "band".
// sortedDomains is a DomainRecord channel emitting domains in sorted order by their sizes.
func BootstrapLshEnsembleEquiDepth(numPart, numHash, maxK, totalNumDomains int,
sortedDomains <-chan *DomainRecord) (*LshEnsemble, error) {
index := NewLshEnsemble(make([]Partition, numPart), numHash, maxK,
totalNumDomains)
err := bootstrapEquiDepth(index, totalNumDomains, sortedDomains)
if err != nil {
return nil, err
}
return index, nil
}
// BootstrapLshEnsemblePlusEquiDepth builds an index from a channel of domains
// using equi-depth partitions -- partitions have approximately the same
// number of domains.
// The returned index consists of MinHash LSH implemented using LshForestArray.
// numPart is the number of partitions to create.
// numHash is the number of hash functions in MinHash.
// maxK is the maximum value for the MinHash parameter K - the number of hash functions per "band".
// sortedDomains is a DomainRecord channel emitting domains in sorted order by their sizes.
func BootstrapLshEnsemblePlusEquiDepth(numPart, numHash, maxK,
totalNumDomains int, sortedDomains <-chan *DomainRecord) (*LshEnsemble, error) {
index := NewLshEnsemblePlus(make([]Partition, numPart), numHash, maxK,
totalNumDomains)
err := bootstrapEquiDepth(index, totalNumDomains, sortedDomains)
if err != nil {
return nil, err
}
return index, nil
}
// Recs2Chan is a utility function that converts a DomainRecord slice in memory to a DomainRecord channel.
func Recs2Chan(recs []*DomainRecord) <-chan *DomainRecord {
c := make(chan *DomainRecord, 1000)
go func() {
for _, r := range recs {
c <- r
}
close(c)
}()
return c
} | bootstrap.go | 0.682256 | 0.45175 | bootstrap.go | starcoder |
package lmath
import (
"fmt"
"math"
)
// Vec2 represents a 2D vector or point.
type Vec2 struct {
X, Y float64
}
// String returns an string representation of this vector.
func (a Vec2) String() string {
return fmt.Sprintf("Vec2(X=%f, Y=%f)", a.X, a.Y)
}
// AlmostEquals tells if a == b using the specified epsilon value.
func (a Vec2) AlmostEquals(b Vec2, epsilon float64) bool {
return AlmostEqual(a.X, b.X, epsilon) && AlmostEqual(a.Y, b.Y, epsilon)
}
// Equals tells if a == b using the default EPSILON value.
func (a Vec2) Equals(b Vec2) bool {
return a.AlmostEquals(b, EPSILON)
}
// Add performs a componentwise addition of the two vectors, returning a + b.
func (a Vec2) Add(b Vec2) Vec2 {
return Vec2{a.X + b.X, a.Y + b.Y}
}
// AddScalar performs a componentwise scalar addition of a + b.
func (a Vec2) AddScalar(b float64) Vec2 {
return Vec2{a.X + b, a.Y + b}
}
// Sub performs a componentwise subtraction of the two vectors, returning
// a - b.
func (a Vec2) Sub(b Vec2) Vec2 {
return Vec2{a.X - b.X, a.Y - b.Y}
}
// SubScalar performs a componentwise scalar subtraction of a - b.
func (a Vec2) SubScalar(b float64) Vec2 {
return Vec2{a.X - b, a.Y - b}
}
// Mul performs a componentwise multiplication of the two vectors, returning
// a * b.
func (a Vec2) Mul(b Vec2) Vec2 {
return Vec2{a.X * b.X, a.Y * b.Y}
}
// MulScalar performs a componentwise scalar multiplication of a * b.
func (a Vec2) MulScalar(b float64) Vec2 {
return Vec2{a.X * b, a.Y * b}
}
// Div performs a componentwise division of the two vectors, returning a * b.
func (a Vec2) Div(b Vec2) Vec2 {
return Vec2{a.X / b.X, a.Y / b.Y}
}
// DivScalar performs a componentwise scalar division of a * b.
func (a Vec2) DivScalar(b float64) Vec2 {
return Vec2{a.X / b, a.Y / b}
}
// IsNaN tells if any components of this vector are not an number.
func (a Vec2) IsNaN() bool {
return math.IsNaN(a.X) || math.IsNaN(a.Y)
}
// Less tells if a is componentwise less than b:
// return a.X < b.X && a.Y < b.Y
func (a Vec2) Less(b Vec2) bool {
return a.X < b.X && a.Y < b.Y
}
// Greater tells if a is componentwise greater than b:
// return a.X > b.X && a.Y > b.Y
func (a Vec2) Greater(b Vec2) bool {
return a.X > b.X && a.Y > b.Y
}
// AnyLess tells if a is componentwise any less than b:
// return a.X < b.X || a.Y < b.Y
func (a Vec2) AnyLess(b Vec2) bool {
return a.X < b.X || a.Y < b.Y
}
// AnyGreater tells if a is componentwise any greater than b:
// return a.X > b.X || a.Y > b.Y
func (a Vec2) AnyGreater(b Vec2) bool {
return a.X > b.X || a.Y > b.Y
}
// Clamp clamps each value in the vector to the range of [min, max] and returns
// it.
func (a Vec2) Clamp(min, max float64) Vec2 {
return Vec2{
Clamp(a.X, min, max),
Clamp(a.Y, min, max),
}
}
// Radians converts each value in the vector from degrees to radians and
// returns it.
func (a Vec2) Radians() Vec2 {
return Vec2{
Radians(a.X),
Radians(a.Y),
}
}
// Degrees converts each value in the vector from radians to degrees and
// returns it.
func (a Vec2) Degrees() Vec2 {
return Vec2{
Degrees(a.X),
Degrees(a.Y),
}
}
// Rounded rounds each value in the vector to the nearest whole number and
// returns it.
func (a Vec2) Rounded() Vec2 {
return Vec2{
Rounded(a.X),
Rounded(a.Y),
}
}
// Dot returns the dot product of a and b.
func (a Vec2) Dot(b Vec2) float64 {
return a.X*b.X + a.Y*b.Y
}
// Inverse returns the inverse (negated) vector -a.
func (a Vec2) Inverse() Vec2 {
return Vec2{-a.X, -a.Y}
}
// LengthSq returns the magnitude squared of this vector, useful for comparing
// distances.
func (a Vec2) LengthSq() float64 {
return a.X*a.X + a.Y*a.Y
}
// Length returns the magnitude of this vector. To avoid a sqrt call when
// strictly comparing distances, LengthSq can be used instead.
func (a Vec2) Length() float64 {
return math.Sqrt(a.X*a.X + a.Y*a.Y)
}
// Normalized returns the normalized (i.e. length/magnitude == 1) vector of a.
// If the vector's length is zero (and division by zero would occur) then
// [Vec2Zero, false] is returned.
func (a Vec2) Normalized() (v Vec2, ok bool) {
length := math.Sqrt(a.X*a.X + a.Y*a.Y)
if Equal(length, 0) {
return Vec2Zero, false
}
return Vec2{
a.X / length,
a.Y / length,
}, true
}
// Proj returns a vector representing the projection of vector a onto b.
func (a Vec2) Proj(b Vec2) Vec2 {
return b.MulScalar(a.Dot(b) / b.LengthSq())
}
// Min returns a vector representing the smallest components of both the
// vectors.
func (a Vec2) Min(b Vec2) Vec2 {
var r Vec2
if a.X < b.X {
r.X = a.X
} else {
r.X = b.X
}
if a.Y < b.Y {
r.Y = a.Y
} else {
r.Y = b.Y
}
return r
}
// Max returns a vector representing the largest components of both the
// vectors.
func (a Vec2) Max(b Vec2) Vec2 {
var r Vec2
if a.X > b.X {
r.X = a.X
} else {
r.X = b.X
}
if a.Y > b.Y {
r.Y = a.Y
} else {
r.Y = b.Y
}
return r
}
// Lerp returns a vector representing the linear interpolation between the
// vectors a and b. The t parameter is the amount to interpolate (0.0 - 1.0)
// between the vectors.
func (a Vec2) Lerp(b Vec2, t float64) Vec2 {
return a.Mul(b.MulScalar(t))
}
// Angle returns the angle in radians between the two vectors.
func (a Vec2) Angle(b Vec2) float64 {
return math.Atan2(b.Y-a.Y, b.X-a.X)
}
// TransformVec2 transforms a 2-component point vector by the matrix (without
// translation component) and returns the result.
// This function assumes that the matrix is an affine transformation.
func (a Vec2) TransformVec2(b Mat3) Vec2 {
return Vec2{
a.X*b[0][0] + a.Y*b[1][0],
a.X*b[0][1] + a.Y*b[1][1],
}
}
// TransformPointVec2 transforms a 2-component point vector by the matrix (with
// translation component) and returns the result.
// This function assumes that the matrix is an affine transformation.
func (a Vec2) TransformPointVec2(b Mat3) Vec2 {
return Vec2{
a.X*b[0][0] + a.Y*b[1][0] + b[2][0],
a.X*b[0][1] + a.Y*b[1][1] + b[2][1],
}
}
var (
Vec2One = Vec2{1, 1}
Vec2XUnit = Vec2{1, 0}
Vec2YUnit = Vec2{0, 1}
Vec2Zero = Vec2{0, 0}
) | lmath/vec2.go | 0.939755 | 0.770551 | vec2.go | starcoder |
package flare
// GetAnimationEnd returns the AnimationEnd field if it's non-nil, zero value otherwise.
func (a *Animation) GetAnimationEnd() float64 {
if a == nil || a.AnimationEnd == nil {
return 0.0
}
return *a.AnimationEnd
}
// GetAnimationStart returns the AnimationStart field if it's non-nil, zero value otherwise.
func (a *Animation) GetAnimationStart() float64 {
if a == nil || a.AnimationStart == nil {
return 0.0
}
return *a.AnimationStart
}
// GetDuration returns the Duration field if it's non-nil, zero value otherwise.
func (a *Animation) GetDuration() float64 {
if a == nil || a.Duration == nil {
return 0.0
}
return *a.Duration
}
// GetFPS returns the FPS field if it's non-nil, zero value otherwise.
func (a *Animation) GetFPS() float64 {
if a == nil || a.FPS == nil {
return 0.0
}
return *a.FPS
}
// GetIsLooping returns the IsLooping field if it's non-nil, zero value otherwise.
func (a *Animation) GetIsLooping() bool {
if a == nil || a.IsLooping == nil {
return false
}
return *a.IsLooping
}
// GetName returns the Name field if it's non-nil, zero value otherwise.
func (a *Animation) GetName() string {
if a == nil || a.Name == nil {
return ""
}
return *a.Name
}
// GetType returns the Type field if it's non-nil, zero value otherwise.
func (a *Animation) GetType() string {
if a == nil || a.Type == nil {
return ""
}
return *a.Type
}
// GetClipContents returns the ClipContents field if it's non-nil, zero value otherwise.
func (a *Artboard) GetClipContents() bool {
if a == nil || a.ClipContents == nil {
return false
}
return *a.ClipContents
}
// GetColor returns the Color field.
func (a *Artboard) GetColor() *Color {
if a == nil {
return nil
}
return a.Color
}
// GetHeight returns the Height field if it's non-nil, zero value otherwise.
func (a *Artboard) GetHeight() float64 {
if a == nil || a.Height == nil {
return 0.0
}
return *a.Height
}
// GetName returns the Name field if it's non-nil, zero value otherwise.
func (a *Artboard) GetName() string {
if a == nil || a.Name == nil {
return ""
}
return *a.Name
}
// GetType returns the Type field if it's non-nil, zero value otherwise.
func (a *Artboard) GetType() string {
if a == nil || a.Type == nil {
return ""
}
return *a.Type
}
// GetWidth returns the Width field if it's non-nil, zero value otherwise.
func (a *Artboard) GetWidth() float64 {
if a == nil || a.Width == nil {
return 0.0
}
return *a.Width
}
// GetComponent returns the Component field if it's non-nil, zero value otherwise.
func (k *Key) GetComponent() int {
if k == nil || k.Component == nil {
return 0
}
return *k.Component
}
// GetCubicX1 returns the CubicX1 field if it's non-nil, zero value otherwise.
func (k *KeyNode) GetCubicX1() float64 {
if k == nil || k.CubicX1 == nil {
return 0.0
}
return *k.CubicX1
}
// GetCubicX2 returns the CubicX2 field if it's non-nil, zero value otherwise.
func (k *KeyNode) GetCubicX2() float64 {
if k == nil || k.CubicX2 == nil {
return 0.0
}
return *k.CubicX2
}
// GetCubicY1 returns the CubicY1 field if it's non-nil, zero value otherwise.
func (k *KeyNode) GetCubicY1() float64 {
if k == nil || k.CubicY1 == nil {
return 0.0
}
return *k.CubicY1
}
// GetCubicY2 returns the CubicY2 field if it's non-nil, zero value otherwise.
func (k *KeyNode) GetCubicY2() float64 {
if k == nil || k.CubicY2 == nil {
return 0.0
}
return *k.CubicY2
}
// GetInterpolatorType returns the InterpolatorType field.
func (k *KeyNode) GetInterpolatorType() *InterpolatorType {
if k == nil {
return nil
}
return k.InterpolatorType
}
// GetTime returns the Time field if it's non-nil, zero value otherwise.
func (k *KeyNode) GetTime() float64 {
if k == nil || k.Time == nil {
return 0.0
}
return *k.Time
}
// GetBlendMode returns the BlendMode field.
func (n *Node) GetBlendMode() *BlendModeType {
if n == nil {
return nil
}
return n.BlendMode
}
// GetCap returns the Cap field if it's non-nil, zero value otherwise.
func (n *Node) GetCap() float64 {
if n == nil || n.Cap == nil {
return 0.0
}
return *n.Cap
}
// GetClips returns the Clips field if it's non-nil, zero value otherwise.
func (n *Node) GetClips() []int {
if n == nil || n.Clips == nil {
return nil
}
return *n.Clips
}
// GetColor returns the Color field.
func (n *Node) GetColor() *Color {
if n == nil {
return nil
}
return n.Color
}
// GetDrawOrder returns the DrawOrder field if it's non-nil, zero value otherwise.
func (n *Node) GetDrawOrder() int {
if n == nil || n.DrawOrder == nil {
return 0
}
return *n.DrawOrder
}
// GetFillRule returns the FillRule field if it's non-nil, zero value otherwise.
func (n *Node) GetFillRule() int {
if n == nil || n.FillRule == nil {
return 0
}
return *n.FillRule
}
// GetHeight returns the Height field if it's non-nil, zero value otherwise.
func (n *Node) GetHeight() float64 {
if n == nil || n.Height == nil {
return 0.0
}
return *n.Height
}
// GetIsClosed returns the IsClosed field if it's non-nil, zero value otherwise.
func (n *Node) GetIsClosed() bool {
if n == nil || n.IsClosed == nil {
return false
}
return *n.IsClosed
}
// GetIsCollapsed returns the IsCollapsed field if it's non-nil, zero value otherwise.
func (n *Node) GetIsCollapsed() bool {
if n == nil || n.IsCollapsed == nil {
return false
}
return *n.IsCollapsed
}
// GetIsVisible returns the IsVisible field if it's non-nil, zero value otherwise.
func (n *Node) GetIsVisible() bool {
if n == nil || n.IsVisible == nil {
return false
}
return *n.IsVisible
}
// GetJoin returns the Join field if it's non-nil, zero value otherwise.
func (n *Node) GetJoin() float64 {
if n == nil || n.Join == nil {
return 0.0
}
return *n.Join
}
// GetName returns the Name field if it's non-nil, zero value otherwise.
func (n *Node) GetName() string {
if n == nil || n.Name == nil {
return ""
}
return *n.Name
}
// GetNumColorStops returns the NumColorStops field if it's non-nil, zero value otherwise.
func (n *Node) GetNumColorStops() int {
if n == nil || n.NumColorStops == nil {
return 0
}
return *n.NumColorStops
}
// GetOpacity returns the Opacity field if it's non-nil, zero value otherwise.
func (n *Node) GetOpacity() float64 {
if n == nil || n.Opacity == nil {
return 0.0
}
return *n.Opacity
}
// GetParent returns the Parent field if it's non-nil, zero value otherwise.
func (n *Node) GetParent() int {
if n == nil || n.Parent == nil {
return 0
}
return *n.Parent
}
// GetRotation returns the Rotation field if it's non-nil, zero value otherwise.
func (n *Node) GetRotation() float64 {
if n == nil || n.Rotation == nil {
return 0.0
}
return *n.Rotation
}
// GetSecondaryRadiusScale returns the SecondaryRadiusScale field if it's non-nil, zero value otherwise.
func (n *Node) GetSecondaryRadiusScale() float64 {
if n == nil || n.SecondaryRadiusScale == nil {
return 0.0
}
return *n.SecondaryRadiusScale
}
// GetTrim returns the Trim field if it's non-nil, zero value otherwise.
func (n *Node) GetTrim() float64 {
if n == nil || n.Trim == nil {
return 0.0
}
return *n.Trim
}
// GetType returns the Type field.
func (n *Node) GetType() *NodeType {
if n == nil {
return nil
}
return n.Type
}
// GetWidth returns the Width field if it's non-nil, zero value otherwise.
func (n *Node) GetWidth() float64 {
if n == nil || n.Width == nil {
return 0.0
}
return *n.Width
}
// GetPointType returns the PointType field.
func (p *Point) GetPointType() *PointType {
if p == nil {
return nil
}
return p.PointType
}
// GetRadius returns the Radius field if it's non-nil, zero value otherwise.
func (p *Point) GetRadius() float64 {
if p == nil || p.Radius == nil {
return 0.0
}
return *p.Radius
}
// GetVersion returns the Version field if it's non-nil, zero value otherwise.
func (r *Root) GetVersion() int {
if r == nil || r.Version == nil {
return 0
}
return *r.Version
} | flare/flare-accessors.go | 0.890416 | 0.606615 | flare-accessors.go | starcoder |
package parse
import (
"fmt"
"reflect"
)
type SemanticError struct {
node Node
msg string
}
func (s *SemanticError) Error() string {
return fmt.Sprintf("Semantic error on `%v`: %v", s.node, s.msg)
}
func NewSemanticError(node Node, msg string) error {
return &SemanticError{node, msg}
}
type TranslationUnit struct {
File string
Funcs []FunctionNode
Vars []Node
}
func (t TranslationUnit) String() string {
str := fmt.Sprintf("%s:", t.File)
for _, v := range t.Vars {
str += fmt.Sprintf("%v\n", v)
}
str += "\n\n"
for _, f := range t.Funcs {
str += fmt.Sprintf("%v\n", f)
}
return str
}
func (t TranslationUnit) Verify() error {
if err := t.ResolveDuplicates(); err != nil {
return err
}
for _, fn := range t.Funcs {
if err := t.VerifyFunction(fn); err != nil {
return err
}
if err := t.VerifyAssignments(fn); err != nil {
return err
}
if err := t.ResolveLabels(fn); err != nil {
return err
}
}
return nil
}
func (t TranslationUnit) expectLHS(node Node) error {
switch node.(type) {
case ArrayAccessNode, IdentNode:
return nil
case UnaryNode:
if node.(UnaryNode).Oper == "*" {
return nil
}
}
return NewSemanticError(node, "expected lvalue")
}
func (t TranslationUnit) expectRHS(node Node) error {
if IsExpr(node) {
return nil
}
return NewSemanticError(node, "expected rvalue")
}
func (t TranslationUnit) expectStatement(node Node) error {
if IsStatement(node) {
return nil
}
return NewSemanticError(node, "expected statement, got "+reflect.TypeOf(node).Name())
}
func (t TranslationUnit) expectNodeType(node Node, kind reflect.Type) error {
if reflect.TypeOf(node) != kind {
return NewSemanticError(node, "expected "+kind.Name())
}
return nil
}
func (t TranslationUnit) visitExpressions(node Node, visit func(Node) error) error {
if IsExpr(node) {
return visit(node)
}
switch node.(type) {
case BlockNode:
for _, n := range node.(BlockNode).Nodes {
if err := t.visitExpressions(n, visit); err != nil {
return err
}
}
case FunctionNode:
if err := t.visitExpressions(node.(FunctionNode).Body, visit); err != nil {
return err
}
case IfNode:
if err := visit(node.(IfNode).Cond); err != nil {
return err
}
if err := t.visitExpressions(node.(IfNode).Body, visit); err != nil {
return err
}
if node.(IfNode).HasElse {
if err := t.visitExpressions(node.(IfNode).ElseBody, visit); err != nil {
return err
}
}
case SwitchNode:
if err := visit(node.(SwitchNode).Cond); err != nil {
return err
}
for _, stmt := range node.(SwitchNode).DefaultCase {
if err := t.visitExpressions(stmt, visit); err != nil {
return err
}
}
for _, case_ := range node.(SwitchNode).Cases {
if err := visit(case_.Cond); err != nil {
return err
}
if err := t.visitExpressions(case_, visit); err != nil {
return err
}
}
case WhileNode:
if err := visit(node.(WhileNode).Cond); err != nil {
return err
}
if err := t.visitExpressions(node.(WhileNode).Body, visit); err != nil {
return err
}
}
return nil
}
func (t TranslationUnit) visitStatements(node Node, visit func(Node) error) error {
if err := t.expectStatement(node); err != nil {
return err
}
switch node.(type) {
case BlockNode:
for _, n := range node.(BlockNode).Nodes {
if err := t.expectStatement(n); err != nil {
return err
}
if err := t.visitStatements(n, visit); err != nil {
return err
}
}
case FunctionNode:
if err := t.expectStatement(node.(FunctionNode).Body); err != nil {
return err
}
if err := t.visitStatements(node.(FunctionNode).Body, visit); err != nil {
return err
}
case GotoNode:
if err := visit(node); err != nil {
return err
}
case IfNode:
if err := t.visitStatements(node.(IfNode).Body, visit); err != nil {
return err
}
if node.(IfNode).HasElse {
if err := t.visitStatements(node.(IfNode).ElseBody, visit); err != nil {
return err
}
}
case BreakNode, ExternVarDeclNode, ExternVarInitNode,
ExternVecInitNode, LabelNode, ReturnNode, StatementNode, VarDeclNode:
if err := visit(node); err != nil {
return err
}
case SwitchNode:
for _, stmt := range node.(SwitchNode).DefaultCase {
if err := t.visitStatements(stmt, visit); err != nil {
return err
}
}
for _, case_ := range node.(SwitchNode).Cases {
if err := t.visitStatements(case_, visit); err != nil {
return err
}
}
case WhileNode:
if err := t.visitStatements(node.(WhileNode).Body, visit); err != nil {
return err
}
}
return nil
}
func (t TranslationUnit) VerifyFunction(fn FunctionNode) error {
if err := t.expectNodeType(fn.Body, reflect.TypeOf(BlockNode{})); err != nil {
return err
}
// Ensure variables are declared at the beginning of functions
endDecls := false
visiter := func(stmt Node) error {
switch stmt.(type) {
case ExternVarDeclNode, VarDeclNode:
if endDecls {
return NewSemanticError(stmt, "var declaration in middle of block")
}
default:
endDecls = true
}
return nil
}
if err := t.visitStatements(fn.Body, visiter); err != nil {
return err
}
return nil
}
// Verify that all assignments have a proper LHS and RHS
func (t TranslationUnit) VerifyAssignments(fn FunctionNode) error {
visit := func(node Node) error {
stmt, ok := node.(StatementNode)
if !ok {
return nil
}
if bin, ok := stmt.Expr.(BinaryNode); ok {
if bin.Oper == "=" {
if err := t.expectLHS(bin.Left); err != nil {
return err
}
if err := t.expectRHS(bin.Right); err != nil {
return err
}
}
}
return nil
}
return t.visitStatements(fn.Body, visit)
}
// TODO: resolve auto variable declarations within function definitions
func (t TranslationUnit) ResolveDuplicates() error {
idents := map[string]Node{}
for _, fn := range t.Funcs {
if _, ok := idents[fn.Name]; ok {
return NewSemanticError(fn, "Duplicate function name")
}
idents[fn.Name] = fn
}
for _, v := range t.Vars {
var name string
switch v.(type) {
case ExternVecInitNode:
name = v.(ExternVecInitNode).Name
case ExternVarInitNode:
name = v.(ExternVarInitNode).Name
default:
return NewSemanticError(v, "Not variable init")
}
if _, ok := idents[name]; ok {
return NewSemanticError(v, "Duplicate variable name")
}
idents[name] = v
}
return nil
}
// Make sure all goto jump to valid places
func (t TranslationUnit) ResolveLabels(fn FunctionNode) error {
labels := map[string]bool{}
gotos := []GotoNode{}
visiter := func(node Node) error {
switch node.(type) {
case LabelNode:
if _, ok := labels[node.(LabelNode).Name]; ok {
return NewSemanticError(node, "duplicate label definition")
}
labels[node.(LabelNode).Name] = true
case GotoNode:
gotos = append(gotos, node.(GotoNode))
}
return nil
}
if err := t.visitStatements(fn, visiter); err != nil {
return err
}
for _, node := range gotos {
if _, ok := labels[node.Label]; !ok {
return NewSemanticError(node, "unresolved goto")
}
}
return nil
} | parse/analyze.go | 0.6137 | 0.42662 | analyze.go | starcoder |
package mki3d
/* data structures for textured triangles */
//Vector2dType is 2D vector in MKI3D - used for UV texture coordinates.
type Vector2dType [2]float32
//TriangleUVType is a sequence of UV coordinates of endpoints of a textured triangle.
type TriangleUVType [3]Vector2dType
// TexturionDefType is a Texturion definition of a textue.
// See: https://mki1967.github.io/texturion/
type TexturionDefType struct {
Label string `json:"label"`
R string `json:"R"`
G string `json:"G"`
B string `json:"B"`
A string `json:"A"`
}
// TexturedTriangleType is a triangle with its UV endpoint's texture coordinates.
type TexturedTriangleType struct {
Triangle TriangleType `json:"triangle"`
TriangleUV TriangleUVType `json:"triangleUV"`
}
// TexturedTrianglesType is a sequence of TexturedTriangleType
type TexturedTrianglesType []TexturedTriangleType
// Get the array of triangles of type TrianglesType from TexturedTrianglesType
func (textured TexturedTrianglesType) GetTriangles() TrianglesType {
triangles := make([]TriangleType, 0, len(textured))
for _, texTriangle := range textured {
triangles = append(triangles, texTriangle.Triangle)
}
return TrianglesType(triangles)
}
// Gets array which is a sequence of enpoints' UV coordinates of textured triangles
func (texTriangles TexturedTrianglesType) GetUVArrays() []float32 {
data := make([]float32, 0, 6*len(texTriangles)) // each triangleUV has 3*2 coordinates
for _, texTriangle := range texTriangles {
for j := 0; j < 3; j++ {
data = append(data, texTriangle.TriangleUV[j][0:2]...)
}
}
return data
}
// TextureElementType is a texture definition with the sequence of triangles textured with this texture.
type TextureElementType struct {
Def TexturionDefType `json:"def"`
TexturedTriangles TexturedTrianglesType `json:"texturedTriangles"`
}
// TextureElementsType is a sequence of TextureElementType
type TextureElementsType []TextureElementType
// TextureType is a set of textures with triangles textured with the textures.
type TextureType struct {
Elements TextureElementsType `json:"elements"`
Index int `json:"index"`
} | mki3d/texture.go | 0.82478 | 0.674816 | texture.go | starcoder |
package kmeans
import (
"bytes"
"encoding/json"
"fmt"
"math"
"math/rand"
"strings"
"time"
)
// EuclideanDistance:
// sqrt((p1 - q1)^2 + (p2 - q2)^2 + (p3 - q3)^2 + ...(pn - qn)^2)
func TransformedEuclideanDistance(t Transformer, p, q Vector) (float64, error) {
pLen, qLen := p.Len(), q.Len()
if pLen != qLen {
return 0, fmt.Errorf("len(p)=%d != len(q)=%d", pLen, qLen)
}
var fn func(interface{}) float64
if t != nil {
fn = t.Transform
}
var sqrdD float64
for i := 0; i < pLen; i++ {
pDim, _ := p.Dimension(i)
qDim, _ := q.Dimension(i)
pi := quantifyAsFloat64(fn, pDim)
qi := quantifyAsFloat64(fn, qDim)
pq := pi - qi
sqrdD += (pq * pq)
}
return math.Sqrt(sqrdD), nil
}
func EuclideanDistance(p, q Vector) (float64, error) {
return TransformedEuclideanDistance(nil, p, q)
}
type Cluster map[Vector][]Vector
type KMean struct {
K int
Vectors []Vector
Seed int64
}
func KMeans(k int, points ...Vector) (Cluster, error) {
return KMeanify(&KMean{K: k, Vectors: points})
}
func KMeanify(km *KMean) (Cluster, error) {
k := km.K
points := km.Vectors
seed := km.Seed
if k < 2 {
return nil, fmt.Errorf("at least 2 centroids are to be picked")
}
// 1. Pick k centroids at random as the initial centroids
if k >= len(points) {
return nil, fmt.Errorf("k=%d >= len(points)=%d", k, len(points))
}
if seed <= 0 {
seed = time.Now().Unix()
}
randSource := rand.New(rand.NewSource(seed))
centroidIndices := randSource.Perm(len(points))[:k]
centroidIndicesMap := make(map[int]bool)
var centroids []Vector
for _, i := range centroidIndices {
centroids = append(centroids, points[i])
centroidIndicesMap[i] = true
}
var lastCluster Cluster
passes := uint64(0)
for {
curCluster := make(Cluster)
// Step 2: Assign each object to the centroid closest to it.
for i, p := range points {
if _, isCentroid := centroidIndicesMap[i]; isCentroid {
curCluster[p] = curCluster[p]
continue
}
indicesToDistances := distances(nil, p, centroids...)
minDistanceCentroidIndex := minDistanceIndex(indicesToDistances)
closestCentroid := centroids[minDistanceCentroidIndex]
curCluster[closestCentroid] = append(curCluster[closestCentroid], p)
}
if ClustersEqual(lastCluster, curCluster) { // Centroids are no longer moving
break
}
passes += 1
lastCluster = curCluster
}
return lastCluster, nil
}
func signatureMap(c Cluster) map[interface{}][]Vector {
signatures := make(map[interface{}][]Vector)
for k, vec := range c {
signatures[k.Signature()] = vec
}
return signatures
}
func ClustersEqual(cA, cB Cluster) bool {
if len(cA) != len(cB) {
return false
}
cASignatures := signatureMap(cA)
cBSignatures := signatureMap(cB)
for kA, vecA := range cASignatures {
vecB, inB := cBSignatures[kA]
if !inB {
return false
}
if !vectorSlicesEqual(vecA, vecB) {
return false
}
}
return true
}
func vectorSlicesEqual(va, vb []Vector) bool {
if va == nil || vb == nil {
return va == nil && vb == nil
}
if len(va) != len(vb) {
return false
}
// Even if they aren't sorted, they'll be equal
hA := make(map[interface{}]struct{})
for _, vai := range va {
hA[vai.Signature()] = struct{}{}
}
for _, vbi := range vb {
sig := vbi.Signature()
_, ok := hA[sig]
if !ok {
return false
}
}
return true
}
func minDistanceIndex(indicesToDistances map[int]float64) int {
minDIndex := 0
minD := indicesToDistances[minDIndex]
for index, dist := range indicesToDistances {
if dist < minD {
minD = dist
minDIndex = index
}
}
return minDIndex
}
func distances(t Transformer, subject Vector, others ...Vector) map[int]float64 {
if len(others) < 1 {
return nil
}
ds := make(map[int]float64)
for i, other := range others {
d, _ := TransformedEuclideanDistance(t, subject, other)
ds[i] = d
}
return ds
}
func (c *Cluster) MarshalJSON() ([]byte, error) {
if c == nil {
return []byte("{}"), nil
}
var strs []string
for centroid, elements := range *c {
cBlob, err := json.Marshal(centroid)
if err != nil {
return nil, err
}
elemsBlob, err := json.Marshal(elements)
if err != nil {
return nil, err
}
strs = append(strs, fmt.Sprintf("%q: %s", cBlob, elemsBlob))
}
buf := new(bytes.Buffer)
buf.Write([]byte("{"))
buf.Write([]byte(strings.Join(strs, ",")))
buf.Write([]byte("}"))
return buf.Bytes(), nil
} | kmeans.go | 0.721841 | 0.476884 | kmeans.go | starcoder |
package rect
import (
"fmt"
"math"
disc "github.com/briannoyama/bvh/discreet"
)
const DIMENSIONS int = 2
type Orthotope struct {
Point [DIMENSIONS]int32
Delta [DIMENSIONS]int32
}
var ACCURACY uint = 13
func (o *Orthotope) Overlaps(orth *Orthotope) bool {
intersects := true
for index, p0 := range orth.Point {
p1 := orth.Delta[index] + p0
intersects = intersects && o.Point[index] <= p1 &&
p0 <= o.Point[index]+o.Delta[index]
}
return intersects
}
func (o *Orthotope) Contains(orth *Orthotope) bool {
contains := true
for index, p0 := range o.Point {
p1 := o.Delta[index] + p0
contains = contains && orth.Point[index] >= p0 &&
p1 >= orth.Point[index]+orth.Delta[index]
}
return contains
}
/*Let orth represent a direction (a vector where delta defines direction).
*Return t > 0 for where it intersects, or -1 if it does not intersect.
*/
func (orth *Orthotope) Intersects(o *Orthotope) int32 {
inT := int32(0)
outT := int32(math.MaxInt32)
for index, p0 := range o.Point {
p1 := o.Delta[index] + p0
if orth.Delta[index] == 0 {
if orth.Point[index] < p0 || p1 < orth.Point[index] {
return -1
}
} else {
if orth.Delta[index] < 0 {
// Swap p0 and p1 for negative directions.
p0, p1 = p1, p0
}
p0T := ((p0 - orth.Point[index]) << ACCURACY) / orth.Delta[index]
inT = disc.Max(inT, p0T)
p1T := ((p1 - orth.Point[index]) << ACCURACY) / orth.Delta[index]
outT = disc.Min(outT, p1T)
}
}
if inT < outT && inT >= 0 {
return inT
}
return -1
}
func (o *Orthotope) MinBounds(others ...*Orthotope) {
o.Point = others[0].Point
o.Delta = others[0].Delta
for index, p0 := range o.Point {
p1 := p0 + o.Delta[index]
for _, other := range others[1:] {
o.Point[index] = disc.Min(p0, other.Point[index])
p1 = disc.Max(p1, other.Point[index]+other.Delta[index])
}
o.Delta[index] = p1 - o.Point[index]
}
}
func (o *Orthotope) Score() int32 {
score := int32(0)
for _, d := range o.Delta {
score += d
}
return score
}
func (o *Orthotope) Equals(other *Orthotope) bool {
for index, point := range other.Point {
if o.Point[index] != point {
return false
} else if o.Delta[index] != other.Delta[index] {
return false
}
}
// Return 0 if the orthtopes are equal
return true
}
// Get a string representation of this orthotope.
func (o *Orthotope) String() string {
return fmt.Sprintf("Point %v, Delta %v", o.Point, o.Delta)
} | rect/orthotope.go | 0.716417 | 0.44897 | orthotope.go | starcoder |
package gl
import (
"unsafe"
"github.com/thinkofdeath/gl/v3.2-core/gl"
)
// TextureTarget is a target were a texture can be bound to.
type TextureTarget uint32
// Valid texture targets.
const (
Texture2D TextureTarget = gl.TEXTURE_2D
Texture2DMultisample TextureTarget = gl.TEXTURE_2D_MULTISAMPLE
Texture2DArray TextureTarget = gl.TEXTURE_2D_ARRAY
Texture3D TextureTarget = gl.TEXTURE_3D
)
// TextureFormat is the format of a texture either internally or
// to be uploaded.
type TextureFormat uint32
// Valid texture formats.
const (
Red TextureFormat = gl.RED
RGB TextureFormat = gl.RGB
RGBA TextureFormat = gl.RGBA
RGBA8 TextureFormat = gl.RGBA8
RGBA16F TextureFormat = gl.RGBA16F
R16F TextureFormat = gl.R16F
DepthComponent24 TextureFormat = gl.DEPTH_COMPONENT24
DepthComponent TextureFormat = gl.DEPTH_COMPONENT
)
// TextureParameter is a parameter that can be read or set on a texture.
type TextureParameter uint32
// Valid texture parameters.
const (
TextureMinFilter TextureParameter = gl.TEXTURE_MIN_FILTER
TextureMagFilter TextureParameter = gl.TEXTURE_MAG_FILTER
TextureWrapS TextureParameter = gl.TEXTURE_WRAP_S
TextureWrapT TextureParameter = gl.TEXTURE_WRAP_T
TextureMaxLevel TextureParameter = gl.TEXTURE_MAX_LEVEL
)
// TextureValue is a value that be set on a texture's parameter.
type TextureValue int32
// Valid texture values.
const (
Nearest TextureValue = gl.NEAREST
Linear TextureValue = gl.LINEAR
LinearMipmapLinear TextureValue = gl.LINEAR_MIPMAP_LINEAR
LinearMipmapNearest TextureValue = gl.LINEAR_MIPMAP_NEAREST
NearestMipmapNearest TextureValue = gl.NEAREST_MIPMAP_NEAREST
NearestMipmapLinear TextureValue = gl.NEAREST_MIPMAP_LINEAR
ClampToEdge TextureValue = gl.CLAMP_TO_EDGE
)
// State tracking
var (
currentTexture Texture
currentTextureTarget TextureTarget
)
// Texture is a buffer of data used by fragment shaders to produce color.
type Texture struct {
internal uint32
}
// CreateTexture allocates a new texture.
func CreateTexture() Texture {
var texture Texture
gl.GenTextures(1, &texture.internal)
return texture
}
// Bind binds the texture to the passed target, if the texture is already bound
// then this does nothing.
func (t Texture) Bind(target TextureTarget) {
if currentTexture == t && currentTextureTarget == target {
return
}
gl.BindTexture(uint32(target), t.internal)
currentTexture = t
currentTextureTarget = target
}
func (t Texture) Get(level int, format TextureFormat, ty Type, pixels []byte) {
if t != currentTexture {
panic("texture not bound")
}
gl.GetTexImage(uint32(currentTextureTarget), int32(level), uint32(format), uint32(ty), gl.Ptr(pixels))
}
// Image3D uploads a 3D texture to the GPU.
func (t Texture) Image3D(level, width, height, depth int, format TextureFormat, ty Type, pix []byte) {
if t != currentTexture {
panic("texture not bound")
}
var ptr unsafe.Pointer
if len(pix) != 0 {
ptr = gl.Ptr(pix)
}
gl.TexImage3D(uint32(currentTextureTarget), int32(level), int32(format), int32(width), int32(height), int32(depth), int32(0), uint32(format), uint32(ty), ptr)
}
// SubImage3D updates a region of a 3D texture.
func (t Texture) SubImage3D(level, x, y, z, width, height, depth int, format TextureFormat, ty Type, pix []byte) {
if t != currentTexture {
panic("texture not bound")
}
gl.TexSubImage3D(uint32(currentTextureTarget), int32(level), int32(x), int32(y), int32(z), int32(width), int32(height), int32(depth), uint32(format), uint32(ty), gl.Ptr(pix))
}
// Image2D uploads a 2D texture to the GPU.
func (t Texture) Image2D(level, width, height int, format TextureFormat, ty Type, pix []byte) {
t.Image2DEx(level, width, height, format, format, ty, pix)
}
// Image2DEx uploads a 2D texture to the GPU.
func (t Texture) Image2DEx(level, width, height int, internalFormat, format TextureFormat, ty Type, pix []byte) {
if t != currentTexture {
panic("texture not bound")
}
var ptr unsafe.Pointer
if pix != nil {
ptr = gl.Ptr(pix)
}
gl.TexImage2D(
uint32(currentTextureTarget),
int32(level),
int32(internalFormat),
int32(width),
int32(height),
0,
uint32(format),
uint32(ty),
ptr,
)
}
// Image2DEx uploads a 2D texture to the GPU.
func (t Texture) Image2DSample(samples, width, height int, format TextureFormat, fixed bool) {
if t != currentTexture {
panic("texture not bound")
}
gl.TexImage2DMultisample(
uint32(currentTextureTarget),
int32(samples),
uint32(format),
int32(width),
int32(height),
fixed,
)
}
// SubImage2D updates a region of a 2D texture.
func (t Texture) SubImage2D(level int, x, y, width, height int, format TextureFormat, ty Type, pix []byte) {
if t != currentTexture {
panic("texture not bound")
}
gl.TexSubImage2D(
uint32(currentTextureTarget),
int32(level),
int32(x),
int32(y),
int32(width),
int32(height),
uint32(format),
uint32(ty),
gl.Ptr(pix),
)
}
// Parameter sets a parameter on the texture to passed value.
func (t Texture) Parameter(param TextureParameter, val TextureValue) {
if t != currentTexture {
panic("texture not bound")
}
gl.TexParameteri(uint32(currentTextureTarget), uint32(param), int32(val))
}
func (t Texture) Delete() {
gl.DeleteTextures(1, &t.internal)
} | render/gl/texture.go | 0.687525 | 0.471467 | texture.go | starcoder |
package ratingutil
import (
"fmt"
"sort"
"time"
"github.com/mashiike/rating"
"github.com/pkg/errors"
)
//ApplyStrategy is an alias for a function.
//This function shows how to reflect in a multiplayer game when reflecting the result of Match in Rating.
type ApplyStrategy func(map[Element]rating.Rating, map[Element]float64) error
//Match is a model that represents multiple Team / Player battles
type Match struct {
scores map[Element]float64
applyStrategy ApplyStrategy
}
//Add function adds a Score.
func (m *Match) Add(element Element, score float64) error {
if _, ok := m.scores[element]; !ok {
return errors.New("this element not join match")
}
m.scores[element] += score
return nil
}
//Reset returns to the zero score
func (m *Match) Reset() {
for element := range m.scores {
m.scores[element] = 0.0
}
}
//Scores return copy internal scores
func (m *Match) Scores() map[Element]float64 {
ret := make(map[Element]float64, len(m.scores))
for elem, score := range m.scores {
ret[elem] = score
}
return ret
}
//Ratings return match joined Team/Players current Rating
func (m *Match) Ratings() map[Element]rating.Rating {
ratings := make(map[Element]rating.Rating, len(m.scores))
for target := range m.scores {
ratings[target] = target.Rating()
}
return ratings
}
//Apply function determines the current score and reflects it on Team / Player's Rating.
func (m *Match) Apply(scoresAt time.Time, config *Config) error {
scores := make(map[Element]float64, len(m.scores))
for target, score := range m.scores {
if err := target.Prepare(scoresAt, config); err != nil {
return errors.Wrapf(err, "failed prepare %v", target.Name())
}
scores[target] = score
}
ratings := m.Ratings()
m.Reset()
return m.applyStrategy(ratings, scores)
}
//WinProbs returns the probability that each Team / Player will be first
func (m *Match) WinProbs() map[Element]float64 {
probs := make(map[Element]float64, len(m.scores))
ratings := m.Ratings()
for target := range m.scores {
probs[target] = 1.0
for opponent, r := range ratings {
if target == opponent {
continue
}
probs[target] *= ratings[target].WinProb(r)
}
}
return probs
}
func (m *Match) String() string {
probs := m.WinProbs()
sortedKey := make([]Element, 0, len(probs))
for elem := range probs {
sortedKey = append(sortedKey, elem)
}
sort.Slice(sortedKey, func(i, j int) bool { return sortedKey[i].Name() < sortedKey[j].Name() })
str := "["
for _, elem := range sortedKey {
str += fmt.Sprintf(" %s(%0.2f) ", elem, probs[elem])
}
return str + "]"
}
//AsRoundrobin considers Multiplayer Matches to be a round-trip tournament ApplyStrategy
func AsRoundrobin(ratings map[Element]rating.Rating, scores map[Element]float64) error {
for target, score1 := range scores {
for opponent, score2 := range scores {
if target == opponent {
continue
}
score := rating.ScoreLose
if score1 > score2 {
score = rating.ScoreWin
}
if score1 == score2 {
score = rating.ScoreDraw
}
if err := target.ApplyMatch(ratings[opponent], score); err != nil {
return errors.Wrapf(err, "failed apply %v vs %v", target.Name(), opponent.Name())
}
}
}
return nil
} | ratingutil/match.go | 0.762336 | 0.468851 | match.go | starcoder |
package mark
import (
"bufio"
"io"
"os"
)
type parseState struct {
*Document
line string
index, lineNum int
start, startName, startUri int
endDefName int
canDefine bool
opened, referencing, defining bool
code, multiCode bool
}
func (d *Document) newParseState(lineNum int, line string, multi bool) *parseState {
// Reference definitions are only (possibly) allowed if they sit alone
// on a line.
return &parseState{lineNum: lineNum, line: line, Document: d,
canDefine: line[0] == '[', multiCode: multi}
}
func (s *parseState) chr() uint8 {
return s.line[s.index]
}
func (s *parseState) rest() string {
return s.line[s.index:]
}
func (s *parseState) eol() bool {
return s.index >= len(s.line)
}
func (s *parseState) isWhitespace() bool {
switch s.chr() {
case ' ', '\t', '\n':
return true
default:
return false
}
}
func (s *parseState) skipWhitespace() bool {
for !s.eol() && s.isWhitespace() {
s.index++
}
// Was there anything left after the whitespace?
return !s.eol()
}
func (s *parseState) startDefine() {
if !s.canDefine {
return
}
s.defining = true
s.endDefName = s.index - 1
// Skip ':'.
s.index++
if s.skipWhitespace() {
s.startUri = s.index
} else {
// Abort, nothing after the whitespace.
s.defining = false
}
}
func (s *parseState) finishDefine() {
// Has to be on its own line starting with '['.
name := s.line[1:s.endDefName]
// Skip newline (we have to be at the end of the line to finish a define.
uri := s.line[s.startUri : len(s.line)-1]
s.Document.define(name, uri, Location{s.lineNum, 1})
}
func (s *parseState) startReference() {
// Even [foo]:[bar][baz] is theoretically valid.
if s.defining {
return
}
s.referencing = true
// Re-open, since we're moving into 'refname' in '[text][refname]'.
s.opened = true
// Skip '['.
s.startName = s.index + 1
}
func (s *parseState) finishReference() {
s.referencing = false
name := s.line[s.startName:s.index]
// Columns are base-1 in Location.
s.Document.referTo(name, Location{s.lineNum, s.start + 1})
}
func (s *parseState) codeBlock() {
// Note that s.code => !s.multiCode.
if s.code {
// We've closed out our code section.
s.code = false
return
}
rest := s.rest()
if len(rest) >= 3 && rest[:3] == "```" {
// Skip remaining 2 quote chars.
s.index += 2
s.multiCode = !s.multiCode
return
}
if !s.multiCode {
// Otherwise we just opened a single quote code block.
s.code = true
}
}
func (s *parseState) close() {
if !s.opened || s.code || s.multiCode {
return
}
s.opened = false
if s.referencing {
s.finishReference()
return
}
// If we're at the end of the line, we can't peek.
if s.index == len(s.line)-1 {
return
}
// Peek.
s.index++
switch s.chr() {
case '[':
// If we do anything but start defining when it's possible,
// defining is no longer allowed.
s.canDefine = false
s.startReference()
case ':':
s.startDefine()
default:
s.canDefine = false
}
}
func (s *parseState) open() {
if s.opened || s.code || s.multiCode {
return
}
s.start = s.index
s.opened = true
}
func (s *parseState) Next() {
switch s.chr() {
case '`':
s.codeBlock()
case ']':
s.close()
case '[':
s.open()
}
s.index++
}
func (s *parseState) Parse() {
// newline is included, and we want to peek 1 char ahead so only look at
// len(line)-1 chars.
for s.index < len(s.line)-1 {
s.Next()
}
// Define ends at end of line (ignoring newline.)
if s.defining {
s.finishDefine()
}
}
func (d *Document) parseLine(lineNum int, line string, multi bool) *parseState {
parseState := d.newParseState(lineNum, line, multi)
parseState.Parse()
return parseState
}
func ParseFile(path string) (ret *Document, err error) {
var file *os.File
if file, err = os.Open(path); err != nil {
return
}
defer file.Close()
reader := bufio.NewReader(file)
multi := false
lineNum := 1
ret = newDocument(path)
var bytes []byte
for bytes, err = reader.ReadBytes('\n'); err != io.EOF; bytes, err = reader.ReadBytes('\n') {
if err != nil {
return
}
// We intentionally include the newline.
state := ret.parseLine(lineNum, string(bytes), multi)
multi = state.multiCode
lineNum++
}
// Clear EOF.
err = nil
ret.Lines = lineNum - 1
return
} | parser.go | 0.569853 | 0.441974 | parser.go | starcoder |
package reflect
import (
"go/ast"
)
// Funcs is a type that represents a list of functions.
type Funcs []Func
// Len returns number of functions in the list.
func (fs Funcs) Len() int { return len(fs) }
// Swap changes positions of functions with requested indexes.
func (fs Funcs) Swap(i, j int) { fs[i], fs[j] = fs[j], fs[i] }
// Less compares file names + names of two functions.
func (fs Funcs) Less(i, j int) bool {
return fs[i].Name < fs[j].Name
}
// Func is a type that represents information about a function or method.
type Func struct {
Comments Comments // Comments that are located right above the function declaration.
File string // Name of the file where the function is located.
Name string // Name of the function, e.g. "Index" or "About".
Params Args // A list of arguments this function receives.
Recv *Arg // Receiver if it is a method and nil otherwise.
Results Args // A list of arguments the function returns.
}
// FilterGroups gets a condition function and a number of group functions.
// It cuts off those Funcs that do not satisfy condition.
// And then groups the rest of them.
// For illustration:
// res, count := myFuncs.Filter(isExported, withArguments, withoutArguments)
// The result will be:
// // All this functions are satisfying isExported condition.
// []Funcs{
// Funcs{ these are functions withArguments },
// Funcs{ these are functions withoutArguments },
// }
func (fs Funcs) FilterGroups(cond func(f *Func) bool, groups ...func(f *Func) bool) ([]Funcs, int) {
res := make([]Funcs, len(groups))
count := 0
// Iterating over all available Funcs.
for _, f := range fs {
// Make sure they satisfy requested condition.
if !cond(&f) {
continue
}
count++
// Group them into categories.
for i := range groups {
if groups[i](&f) {
res[i] = append(res[i], f)
}
}
}
return res, count
}
// processFuncDecl receives an ast function declaration and
// transforms it into Func structure that is returned.
func processFuncDecl(decl *ast.FuncDecl) *Func {
// Check whether there is a receiver.
var recv *Arg
args := processFieldList(decl.Recv)
if len(args) > 0 {
recv = &args[0]
}
return &Func{
Comments: processCommentGroup(decl.Doc),
Name: decl.Name.Name,
Params: processFieldList(decl.Type.Params),
Results: processFieldList(decl.Type.Results),
Recv: recv,
}
} | internal/reflect/func.go | 0.661704 | 0.402715 | func.go | starcoder |
package calendar
import (
"time"
)
// A CachedCalendar wraps and caches a Calendar
type CachedCalendar struct {
cal Calendar
cacheRed map[time.Time]string // red day description
cacheNotable map[time.Time]string // notable day description
cacheFlag map[time.Time]bool // flag flying day
}
// Creates a new CachedCalendar that wraps and caches the given Calendar.
// A CachedCalendar is also a Calendar itself, since it implements the
// Calendar interface.
func NewCachedCalendar(cal Calendar) CachedCalendar {
var calca CachedCalendar
calca.cal = cal
calca.cacheRed = make(map[time.Time]string)
calca.cacheNotable = make(map[time.Time]string)
calca.cacheFlag = make(map[time.Time]bool)
return calca
}
// Wraps the RedDay function and caches some of the results
func (calca CachedCalendar) RedDay(date time.Time) (bool, string, bool) {
// Return from cache, if it's there
desc, ok := calca.cacheRed[date]
if ok {
return ok, desc, calca.cacheFlag[date]
}
// Get the information from the calendar
red, desc, flag := calca.cal.RedDay(date)
// Add red days to the cache
// TODO: Also cache non-red days
if red {
calca.cacheRed[date] = desc
calca.cacheFlag[date] = flag
}
return red, desc, flag
}
// Wraps the NotableDay function and caches some of the results
func (calca CachedCalendar) NotableDay(date time.Time) (bool, string, bool) {
// Return from cache, if it's there
desc, ok := calca.cacheNotable[date]
if ok {
return ok, desc, calca.cacheFlag[date]
}
// Get the information from the calendar
notable, desc, flag := calca.cal.NotableDay(date)
// Add notable days to cache
// TODO: Also cache non-notable days
if notable {
calca.cacheNotable[date] = desc
calca.cacheFlag[date] = flag
}
return notable, desc, flag
}
// --- These are here just to satisfy the Calendar interface ---
// Wraps the NotablePeriod function
func (calca CachedCalendar) NotablePeriod(date time.Time) (bool, string) {
return calca.cal.NotablePeriod(date)
}
// Wraps the DayName function
func (calca CachedCalendar) DayName(date time.Weekday) string {
return calca.cal.DayName(date)
}
// Wraps the NormalDay function
func (calca CachedCalendar) NormalDay() string {
return calca.cal.NormalDay()
}
// Wraps the MonthName function
func (calca CachedCalendar) MonthName(month time.Month) string {
return calca.cal.MonthName(month)
} | vendor/github.com/xyproto/calendar/cachedcalendar.go | 0.605566 | 0.411525 | cachedcalendar.go | starcoder |
package goterator
func (iter *Iterator) iter(f PredicateFunc) {
skipWhileIsOver := false
ElementLoop:
for {
ok := iter.generator.Next()
if !ok {
return
}
element := iter.generator.Value()
for _, m := range iter.mappers {
switch mapper := m.(type) {
case mapFunc:
element = mapper(element)
case filterFunc:
if !mapper(element) {
continue ElementLoop
}
case takeValue:
if *mapper == 0 {
return
}
*mapper--
case takeWhileFunc:
if !mapper(element) {
return
}
case skipValue:
if *mapper > 0 {
*mapper--
continue ElementLoop
}
case skipWhileFunc:
if !skipWhileIsOver && mapper(element) {
continue ElementLoop
}
skipWhileIsOver = true
}
}
if !f(element) {
return
}
}
}
// ForEach consumes elements and runs `f` for each element.
// The iteration is broken if `f` returns false.
func (iter *Iterator) ForEach(f IterFunc) {
iter.iter(func(element interface{}) bool {
f(element)
return true
})
}
// Collect consumes elements and returns a slice of converted elements.
func (iter *Iterator) Collect() []interface{} {
var elements []interface{}
iter.iter(func(element interface{}) bool {
elements = append(elements, element)
return true
})
return elements
}
// Reduce consumes elements and runs `f` for each element.
// Returns the final state after iteration over all elements.
func (iter *Iterator) Reduce(initialState interface{}, f ReduceFunc) interface{} {
iter.iter(func(element interface{}) bool {
initialState = f(initialState, element)
return true
})
return initialState
}
// Find consumes elements and returns the first element that satisfies `f` (returning `true`).
// Returns `nil, false` if no element is found.
func (iter *Iterator) Find(f PredicateFunc) (interface{}, bool) {
var elem interface{}
ok := false
iter.iter(func(element interface{}) bool {
if f(element) {
elem = element
ok = true
return false
}
return true
})
return elem, ok
}
// Min consumes elements and returns the minimum element.
func (iter *Iterator) Min(f LessFunc) interface{} {
first := true
var min interface{}
iter.iter(func(element interface{}) bool {
if first || f(element, min) {
min = element
first = false
}
return true
})
return min
}
// Max consumes elements and returns the maximum element.
func (iter *Iterator) Max(f LessFunc) interface{} {
first := true
var max interface{}
iter.iter(func(element interface{}) bool {
if first || f(max, element) {
max = element
first = false
}
return true
})
return max
}
// All consumes elements and returns true if `f` returns true for all elements.
// Returns `true` for empty iterators.
func (iter *Iterator) All(f PredicateFunc) bool {
all := true
iter.iter(func(element interface{}) bool {
if !f(element) {
all = false
return false
}
return true
})
return all
}
// Any consumes elements and returns true if `f` returns true for at least one element.
// Returns `false` for empty iterators.
func (iter *Iterator) Any(f PredicateFunc) bool {
any := false
iter.iter(func(element interface{}) bool {
if f(element) {
any = true
return false
}
return true
})
return any
}
// Last consumes elements and returns the last element.
func (iter *Iterator) Last() interface{} {
var last interface{}
iter.iter(func(element interface{}) bool {
last = element
return true
})
return last
}
// Nth consumes elements and returns the `n`th element. Indexing starts from `0`.
// Returns an nil, false` if the length of iterator is less than `n`.
func (iter *Iterator) Nth(n int) (interface{}, bool) {
var nth interface{}
ok := false
index := 0
iter.iter(func(element interface{}) bool {
if index == n {
nth = element
ok = true
return false
}
index++
return true
})
return nth, ok
}
// Count consumes elements and returns the length of elements.
func (iter *Iterator) Count() int {
count := 0
iter.iter(func(element interface{}) bool {
count++
return true
})
return count
} | consumer.go | 0.760295 | 0.433562 | consumer.go | starcoder |
package doc
import (
"regexp"
"sort"
"strconv"
"strings"
)
// PropertyListToRaw converts a list of PropertyEntry to the raw original
// document object.
func PropertyListToRaw(properties PropertyEntryList) interface{} {
sort.Sort(properties)
var rawObject interface{}
for _, property := range properties {
_, keys, vType := property.DissectKeyURI()
value := property.Value
if hasArrayFormat(keys[0]) {
index, capacity := splitArrayFormat(keys[0])
// Arrays case.
if rawObject == nil {
rawObject = make([]interface{}, capacity)
}
propertyListToRawArrays(index, rawObject, 0, keys, vType, value)
} else {
// Map case.
if rawObject == nil {
rawObject = map[string]interface{}{}
}
propertyListToRawMap(rawObject, 0, keys, vType, value)
}
}
return rawObject
}
// propertyListToRawMap recursively analyzes a PropertyEntry's Key, building
// the equivalent structure in the raw document object. This cases deals with
// the case where the current root element in the path being analyzed consists
// of a Map.
func propertyListToRawMap(parentObject interface{}, curKeyIndex int, keys []string, valueType string, value []byte) {
// Leaf object
if len(keys) == curKeyIndex+1 {
switch object := parentObject.(type) {
// Leaf object is a map.
case map[string]interface{}:
switch valueType {
case "nil":
object[keys[curKeyIndex]] = nil
case "string":
object[keys[curKeyIndex]] = string(value)
case "bool":
object[keys[curKeyIndex]] = string(value) == "true"
case "float64":
object[keys[curKeyIndex]] = BinaryToFloat64(value)
}
}
// backtrack
return
}
switch object := parentObject.(type) {
// Intermediate node object is a map.
case map[string]interface{}:
if hasArrayFormat(keys[curKeyIndex+1]) {
index, capacity := splitArrayFormat(keys[curKeyIndex+1])
// Arrays case.
if object[keys[curKeyIndex]] == nil {
object[keys[curKeyIndex]] = make([]interface{}, capacity)
}
propertyListToRawArrays(index, object[keys[curKeyIndex]], curKeyIndex+1, keys, valueType, value)
} else {
if object[keys[curKeyIndex]] == nil {
object[keys[curKeyIndex]] = map[string]interface{}{}
}
propertyListToRawMap(object[keys[curKeyIndex]], curKeyIndex+1, keys, valueType, value)
}
}
}
// propertyListToRawArrays recursively analyzes a PropertyEntry's Key, building
// the equivalent structure in the raw document object. This cases deals with
// the case where the current root element in the path being analyzed consists
// of an Array.
func propertyListToRawArrays(curArrayIndex int, parentArray interface{}, curKeyIndex int, keys []string, valueType string, value []byte) {
// Leaf object
if len(keys) == curKeyIndex+1 {
switch object := parentArray.(type) {
case []interface{}:
switch valueType {
case "nil":
object[curArrayIndex] = nil
case "string":
object[curArrayIndex] = string(value)
case "bool":
object[curArrayIndex] = string(value) == "true"
case "float64":
object[curArrayIndex] = BinaryToFloat64(value)
}
}
// backtrack
return
}
switch object := parentArray.(type) {
// Intermediate node object is a map.
case []interface{}:
if hasArrayFormat(keys[curKeyIndex+1]) {
index, capacity := splitArrayFormat(keys[curKeyIndex+1])
// Arrays case.
if object[curArrayIndex] == nil {
object[curArrayIndex] = make([]interface{}, capacity)
}
propertyListToRawArrays(index, object[curArrayIndex], curKeyIndex+1, keys, valueType, value)
} else {
if object[curArrayIndex] == nil {
object[curArrayIndex] = map[string]interface{}{}
}
propertyListToRawMap(object[curArrayIndex], curKeyIndex+1, keys, valueType, value)
}
}
}
var arrayRegExp = regexp.MustCompile(`^\[\d+\.\d+]$`)
// hasArrayFormat checks if the current node of the path describes and array element.
func hasArrayFormat(s string) bool {
// Checks for Arrays definitions of the format "[%d.%d]" where the first
// parameter describes the current index and the second the total capacity.
return arrayRegExp.MatchString(s)
}
// splitArrayFormat given a current array element, returns the associates index
// and total capacity.
func splitArrayFormat(s string) (index int, capacity int) {
if !hasArrayFormat(s) {
panic("not array format")
}
indexCapStr := strings.Trim(s, "[]")
valuesStr := strings.Split(indexCapStr, ".")
index, _ = strconv.Atoi(valuesStr[0])
capacity, _ = strconv.Atoi(valuesStr[1])
return index, capacity
} | pkg/doc/unmarshall.go | 0.658088 | 0.436742 | unmarshall.go | starcoder |
package exp
import "strings"
// OperatorGreaterThan represents an "greater than" comparison, when used in Predicates and Criteria
const OperatorGreaterThan = ">"
// OperatorGreaterOrEqual represents an "greater or equal" comparison, when used in Predicates and Criteria
const OperatorGreaterOrEqual = ">="
// OperatorEqual represents an "equals" comparison, when used in Predicates and Criteria
const OperatorEqual = "="
// OperatorNotEqual represents a "not equals" comparison, when used in Predicates and Criteria
const OperatorNotEqual = "!="
// OperatorLessOrEqual represents an "less or equal" comparison, when used in Predicates and Criteria
const OperatorLessOrEqual = "<="
// OperatorLessThan represents a "less than" comparison, when used in Predicates and Criteria
const OperatorLessThan = "<"
// OperatorBeginsWith represents a "begins with" comparison, when used in Predicates and Criteria. It is only valid for string values.
const OperatorBeginsWith = "BEGINS"
// OperatorEndsWith represents a "ends with" comparison, when used in Predicates and Criteria. It is only valid for string values.
const OperatorEndsWith = "ENDS"
// OperatorContains represents a "contains" comparison, when used in Predicates and Criteria. It is only valid for string values.
const OperatorContains = "CONTAINS"
// OperatorContainedBy represents a "contained by" comparison, when used in Predicates and Criteria. It is only valid for string values.
const OperatorContainedBy = "CONTAINED BY"
// Operator tries to convert non-standard values into standard operators
func Operator(value string) string {
value = strings.ToUpper(value)
switch value {
case OperatorGreaterThan, "GT":
return OperatorGreaterThan
case OperatorGreaterOrEqual, "GE":
return OperatorGreaterOrEqual
case OperatorEqual, "EQ":
return OperatorEqual
case OperatorNotEqual, "NE":
return OperatorNotEqual
case OperatorLessOrEqual, "LE":
return OperatorLessOrEqual
case OperatorLessThan, "LT":
return OperatorLessThan
case OperatorBeginsWith:
return OperatorBeginsWith
case OperatorEndsWith:
return OperatorEndsWith
case OperatorContains:
return OperatorContains
case OperatorContainedBy:
return OperatorContainedBy
default:
return OperatorEqual
}
} | operators.go | 0.752286 | 0.590809 | operators.go | starcoder |
package main
/*
Input layer, Hidden Layer, Output layer
Each layer has activations a[0] = X, a[2] = {a1[2]
a2[2]
a3[2]
a4[2]}
Activations are the outputs of each layer l
z[l] = w[l] * x[l] + b
a[l] = activate(z[l])
If there are L layers than out layer y = a[L]
*/
import (
"fmt"
"math"
"gonum.org/v1/gonum/mat"
)
func MatrixSumKeepDims(m *mat.Dense) *mat.Dense {
var row, col = m.Dims()
sum := 0.0
for i := 0; i < row; i++ {
for j := 0; j < col; j++ {
sum += m.At(i, j)
}
}
for i := 0; i < row; i++ {
for j := 0; j < col; j++ {
m.Set(i, j, sum)
}
}
return m
}
func MatrixSum(m *mat.Dense) float64 {
var row, col = m.Dims()
sum := 0.0
for i := 0; i < row; i++ {
for j := 0; j < col; j++ {
sum += m.At(i, j)
}
}
return sum
}
func main() {
fmt.Println("Hello World!")
// var Z mat.Dense
// Z.Mul(l.W,APrev)
// Z.Add(Z,l.b)
// l.A = l.Activate(Z)
// NewDense retuns a pointer to the new Mat object
a := mat.NewDense(4, 3, []float64{
1, 0, 0,
1, 0, 1,
0, 1, 1,
1, 1, 1,
})
b := mat.NewDense(3, 1, []float64{
0,
0,
1,
})
fmt.Println("a : ", mat.Formatted(a, mat.Prefix(" "), mat.Squeeze()))
fmt.Println("b : ", mat.Formatted(b, mat.Prefix(" "), mat.Squeeze()))
q := &a
w := &b
fmt.Println("a : ", mat.Formatted(*q, mat.Prefix(" "), mat.Squeeze()))
fmt.Println("b : ", mat.Formatted(*w, mat.Prefix(" "), mat.Squeeze()))
// This declaration return the actual object
var c mat.Dense
e := &c
e.Mul(*q, *w)
fmt.Println("e : ", mat.Formatted(e, mat.Prefix(" "), mat.Squeeze()))
fmt.Println("c : ", mat.Formatted(&c, mat.Prefix(" "), mat.Squeeze()))
var row, col = e.Dims()
fmt.Println("dims(e): ", row, col)
e.Scale(1/float64(row), e)
fmt.Println("e : ", mat.Formatted(e, mat.Prefix(" "), mat.Squeeze()))
fmt.Println("e sum : ", mat.Formatted(MatrixSumKeepDims(e), mat.Prefix(" "), mat.Squeeze()))
fmt.Println("ReLU(0.9) : ", NewReLUFunc()(0, 0, -1))
e.Apply(NewReLUFunc(), e)
fmt.Println("e : ", mat.Formatted(e, mat.Prefix(" "), mat.Squeeze()))
e.Apply(NewSigmoidFunc(), e)
fmt.Println("e : ", mat.Formatted(e, mat.Prefix(" "), mat.Squeeze()))
var layer = NewLayerRandomWeight(3, 3, NewReLUFunc())
x := mat.NewDense(3, 1, []float64{
1,
1,
1,
})
out := layer.linearActivationForward(x)
fmt.Println("layer matrix out : ", mat.Formatted(out))
e.Apply(func(i, j int, v float64) float64 { return math.Log(v) }, e)
fmt.Println("test mat log : ", mat.Formatted(e))
var test mat.Dense
test.Mul(out, x.T())
fmt.Println("test : ", mat.Formatted(&test))
row, col = test.Dims()
fmt.Println("test.Dims : ", row, col)
fmt.Println("Dot Product : ", MatrixSum(&test))
tmp1 := mat.NewDense(3, 3, make([]float64, 9))
row, col = b.Dims()
fmt.Println("b.Dims : ", row, col)
row, col = x.Dims()
fmt.Println("x.Dims : ", row, col)
row, col = b.T().Dims()
fmt.Println("b.T().Dims : ", row, col)
//var row, _ = Y.Dims()
x.Apply(func(i, j int, v float64) float64 { return math.Log(v) }, x)
tmp1.Mul(x, b.T())
fmt.Println("tmp1 : ", mat.Formatted(tmp1))
} | main.go | 0.522933 | 0.420778 | main.go | starcoder |
package main
import (
"adventofcodego/utils/characters"
"adventofcodego/utils/inputs"
"adventofcodego/utils/utils"
"fmt"
"strings"
)
var DAY int = 13
type dotmatrix struct {
dots []point
width int64
height int64
}
type point struct {
x int64
y int64
}
func parseInput(input string) (matrix dotmatrix, folds []point) {
matrix.dots = make([]point, 0)
folds = make([]point, 0)
parts := strings.Split(input, "\n\n")
for _, d := range strings.Split(parts[0], "\n") {
coords := strings.Split(d, ",")
dot := point{x: inputs.ParseDecInt64(coords[0]), y: inputs.ParseDecInt64(coords[1])}
matrix.dots = append(matrix.dots, dot)
if dot.x > matrix.width {
matrix.width = dot.x
}
if dot.y > matrix.height {
matrix.height = dot.y
}
}
for _, d := range strings.Split(parts[1], "\n") {
operands := strings.Split(strings.Split(d, " ")[2], "=")
if operands[0] == "x" {
folds = append(folds, point{x: inputs.ParseDecInt64(operands[1]), y: 0})
} else {
folds = append(folds, point{y: inputs.ParseDecInt64(operands[1]), x: 0})
}
}
return matrix, folds
}
func dotExists(matrix dotmatrix, dot point) bool {
for _, d := range matrix.dots {
if d.x == dot.x && d.y == dot.y {
return true
}
}
return false
}
func foldOn(matrix dotmatrix, x int64, y int64) dotmatrix {
var newmatrix dotmatrix
if y != 0 {
newmatrix.width = matrix.width
for _, dot := range matrix.dots {
if dot.y > y {
dot.y = (y*2 - dot.y)
}
if !dotExists(newmatrix, dot) {
newmatrix.dots = append(newmatrix.dots, dot)
}
}
newmatrix.height = y
} else {
newmatrix.height = matrix.height
for _, dot := range matrix.dots {
if dot.x > x {
dot.x = (x*2 - dot.x)
}
if !dotExists(newmatrix, dot) {
newmatrix.dots = append(newmatrix.dots, dot)
}
}
newmatrix.width = x
}
return newmatrix
}
func printMatrix(matrix dotmatrix) {
for y := int64(0); y < matrix.height; y++ {
for x := int64(0); x < matrix.width; x++ {
if dotExists(matrix, point{x: x, y: y}) {
fmt.Print("X")
} else {
fmt.Print(" ")
}
}
fmt.Print("\n")
}
}
func (matrix dotmatrix) ToStrings() []string {
result := make([]string, matrix.width/5+1)
for y := int64(0); y < matrix.height; y++ {
for x := int64(0); x < matrix.width; x++ {
if dotExists(matrix, point{x: x, y: y}) {
result[x/5] = fmt.Sprintf("%sX", result[x/5])
} else {
result[x/5] = fmt.Sprintf("%s ", result[x/5])
}
}
for i := range result {
result[i] = fmt.Sprintf("%s\n", result[i])
}
}
return result
}
func part1(input string) interface{} {
matrix, folds := parseInput(input)
for _, fold := range folds {
matrix = foldOn(matrix, fold.x, fold.y)
break
}
return len(matrix.dots)
}
func resultTochars(matrix dotmatrix) (result string) {
s := matrix.ToStrings()
for i := range s {
if _, ok := characters.CHARS[s[i]]; ok {
result = fmt.Sprintf("%s%s", result, string(characters.CHARS[s[i]]))
}
}
return result
}
func part2(input string) interface{} {
matrix, folds := parseInput(input)
for _, fold := range folds {
matrix = foldOn(matrix, fold.x, fold.y)
}
//printMatrix(matrix)
return resultTochars(matrix)
}
func main() {
utils.Solve(part1, part2, DAY)
} | day13/day13.go | 0.544317 | 0.411229 | day13.go | starcoder |
package stats
import "sort"
type StaticCollector struct {
counters map[string]Int64VectorGetter
gauges map[string]Int64VectorGetter
histograms map[string]HistogramVectorGetter
}
func NewStaticCollector() *StaticCollector {
return &StaticCollector{
counters: make(map[string]Int64VectorGetter),
gauges: make(map[string]Int64VectorGetter),
histograms: make(map[string]HistogramVectorGetter),
}
}
func (c *StaticCollector) Close() error { return nil }
func (c *StaticCollector) RegisterCounter(n string, g Int64VectorGetter) {
c.counters[n] = g
}
func (c *StaticCollector) RegisterGauge(n string, g Int64VectorGetter) {
c.gauges[n] = g
}
func (c *StaticCollector) RegisterHistogram(n string, g HistogramVectorGetter) {
c.histograms[n] = g
}
type Int64Snapshot struct {
Name string
Labels map[string]string
Value int64
}
func int64snapshots(n string, g Int64VectorGetter) []Int64Snapshot {
var sns []Int64Snapshot
for _, v := range g.Get() {
sns = append(
sns,
Int64Snapshot{
Name: n,
Labels: v.Tags,
Value: v.Value,
},
)
}
return sns
}
type HistogramSnapshot struct {
Name string
Value HistogramValue
}
type Snapshot struct {
Counters []Int64Snapshot
Gauges []Int64Snapshot
Histograms []HistogramSnapshot
}
type Int64Snapshots []Int64Snapshot
func (ss Int64Snapshots) Len() int { return len(ss) }
func (ss Int64Snapshots) Less(i int, j int) bool {
if ss[i].Name != ss[j].Name {
return ss[i].Name < ss[j].Name
}
if ss[i].Value != ss[j].Value {
return ss[i].Value < ss[j].Value
}
return len(ss[i].Labels) < len(ss[j].Labels)
}
func (ss Int64Snapshots) Swap(i int, j int) {
ss[j], ss[i] = ss[i], ss[j]
}
func (c *StaticCollector) Get() Snapshot {
var (
counters, gauges []Int64Snapshot
histograms []HistogramSnapshot
)
for n, g := range c.counters {
counters = append(counters, int64snapshots(n, g)...)
}
for n, g := range c.gauges {
gauges = append(gauges, int64snapshots(n, g)...)
}
for n, g := range c.histograms {
for _, v := range g.Get() {
histograms = append(histograms, HistogramSnapshot{Name: n, Value: *v})
}
}
sort.Sort(Int64Snapshots(counters))
sort.Sort(Int64Snapshots(gauges))
return Snapshot{Counters: counters, Gauges: gauges, Histograms: histograms}
} | vendor/github.com/upfluence/stats/static_collector.go | 0.644561 | 0.469034 | static_collector.go | starcoder |
package config
// UnitType type
type UnitType string
func (t UnitType) String() string {
return string(t)
}
// UnitType enums
const (
// Misc
UnitTypeNone UnitType = "none"
UnitTypeString UnitType = "string"
UnitTypeShort UnitType = "short"
UnitTypePercent0100 UnitType = "percent"
UnitTypePercent0010 UnitType = "percentunit"
UnitTypeHumidityH UnitType = "humidity"
UnitTypeDecibel UnitType = "dB"
UnitTypeHexadecimal0x UnitType = "hex0x"
UnitTypeHexadecimal UnitType = "hex"
UnitTypeScientificnotation UnitType = "sci"
UnitTypeLocaleformat UnitType = "locale"
UnitTypePixels UnitType = "pixel"
// Acceleration
UnitTypeMeterssec UnitType = "accMS2"
UnitTypeFeetsec UnitType = "accFS2"
UnitTypeGunit UnitType = "accG"
// Angle
UnitTypeDegrees UnitType = "degree"
UnitTypeRadians UnitType = "radian"
UnitTypeGradian UnitType = "grad"
UnitTypeArcMinutes UnitType = "arcmin"
UnitTypeArcSeconds UnitType = "arcsec"
// Area
UnitTypeSquareMetersm UnitType = "areaM2"
UnitTypeSquareFeetft UnitType = "areaF2"
UnitTypeSquareMilesmi UnitType = "areaMI2"
// Computation
UnitTypeFLOPs UnitType = "flops"
UnitTypeMFLOPs UnitType = "mflops"
UnitTypeGFLOPs UnitType = "gflops"
UnitTypeTFLOPs UnitType = "tflops"
UnitTypePFLOPs UnitType = "pflops"
UnitTypeEFLOPs UnitType = "eflops"
UnitTypeZFLOPs UnitType = "zflops"
UnitTypeYFLOPs UnitType = "yflops"
// Concentration
UnitTypePartspermillionppm UnitType = "ppm"
UnitTypePartsperbillionppb UnitType = "conppb"
UnitTypeNanogrampercubicmeterngm UnitType = "conngm3"
UnitTypeNanogrampernormalcubicmeterngNm UnitType = "conngNm3"
UnitTypeMicrogrampercubicmetergm UnitType = "conμgm3"
UnitTypeMicrogrampernormalcubicmetergNm UnitType = "conμgNm3"
UnitTypeMilligrampercubicmetermgm UnitType = "conmgm3"
UnitTypeMilligrampernormalcubicmetermgNm UnitType = "conmgNm3"
UnitTypeGrampercubicmetergm UnitType = "congm3"
UnitTypeGrampernormalcubicmetergNm UnitType = "congNm3"
UnitTypeMilligramsperdecilitremgdL UnitType = "conmgdL"
UnitTypeMillimolesperlitremmolL UnitType = "conmmolL"
// Currency
UnitTypeDollars UnitType = "currencyUSD"
UnitTypePounds UnitType = "currencyGBP"
UnitTypeEuro UnitType = "currencyEUR"
UnitTypeYen UnitType = "currencyJPY"
UnitTypeRubles UnitType = "currencyRUB"
UnitTypeHryvnias UnitType = "currencyUAH"
UnitTypeRealR UnitType = "currencyBRL"
UnitTypeDanishKronekr UnitType = "currencyDKK"
UnitTypeIcelandicKrnakr UnitType = "currencyISK"
UnitTypeNorwegianKronekr UnitType = "currencyNOK"
UnitTypeSwedishKronakr UnitType = "currencySEK"
UnitTypeCzechkorunaczk UnitType = "currencyCZK"
UnitTypeSwissfrancCHF UnitType = "currencyCHF"
UnitTypePolishZotyPLN UnitType = "currencyPLN"
UnitTypeBitcoin UnitType = "currencyBTC"
UnitTypeMilliBitcoin UnitType = "currencymBTC"
UnitTypeMicroBitcoin UnitType = "currencyμBTC"
UnitTypeSouthAfricanRandR UnitType = "currencyZAR"
UnitTypeIndianRupee UnitType = "currencyINR"
UnitTypeSouthKoreanWon UnitType = "currencyKRW"
UnitTypeIndonesianRupiahRp UnitType = "currencyIDR"
UnitTypePhilippinePesoPHP UnitType = "currencyPHP"
UnitTypeVietnameseDongVND UnitType = "currencyVND"
// Data
UnitTypeBytesIEC UnitType = "bytes"
UnitTypeBytesSI UnitType = "decbytes"
UnitTypeBitsIEC UnitType = "bits"
UnitTypeBitsSI UnitType = "decbits"
UnitTypeKibibytes UnitType = "kbytes"
UnitTypeKilobytes UnitType = "deckbytes"
UnitTypeMebibytes UnitType = "mbytes"
UnitTypeMegabytes UnitType = "decmbytes"
UnitTypeGibibytes UnitType = "gbytes"
UnitTypeGigabytes UnitType = "decgbytes"
UnitTypeTebibytes UnitType = "tbytes"
UnitTypeTerabytes UnitType = "dectbytes"
UnitTypePebibytes UnitType = "pbytes"
UnitTypePetabytes UnitType = "decpbytes"
// Data rate
UnitTypePacketssec UnitType = "pps"
UnitTypeBytessecIEC UnitType = "binBps"
UnitTypeBytessecSI UnitType = "Bps"
UnitTypeBitssecIEC UnitType = "binbps"
UnitTypeBitssecSI UnitType = "bps"
UnitTypeKibibytessec UnitType = "KiBs"
UnitTypeKibibitssec UnitType = "Kibits"
UnitTypeKilobytessec UnitType = "KBs"
UnitTypeKilobitssec UnitType = "Kbits"
UnitTypeMibibytessec UnitType = "MiBs"
UnitTypeMibibitssec UnitType = "Mibits"
UnitTypeMegabytessec UnitType = "MBs"
UnitTypeMegabitssec UnitType = "Mbits"
UnitTypeGibibytessec UnitType = "GiBs"
UnitTypeGibibitssec UnitType = "Gibits"
UnitTypeGigabytessec UnitType = "GBs"
UnitTypeGigabitssec UnitType = "Gbits"
UnitTypeTebibytessec UnitType = "TiBs"
UnitTypeTebibitssec UnitType = "Tibits"
UnitTypeTerabytessec UnitType = "TBs"
UnitTypeTerabitssec UnitType = "Tbits"
UnitTypePetibytessec UnitType = "PiBs"
UnitTypePetibitssec UnitType = "Pibits"
UnitTypePetabytessec UnitType = "PBs"
UnitTypePetabitssec UnitType = "Pbits"
// Date & time
UnitTypeDatetimeISO UnitType = "dateTimeAsIso"
UnitTypeDatetimeISONodateiftoday UnitType = "dateTimeAsIsoNoDateIfToday"
UnitTypeDatetimeUS UnitType = "dateTimeAsUS"
UnitTypeDatetimeUSNodateiftoday UnitType = "dateTimeAsUSNoDateIfToday"
UnitTypeDatetimelocal UnitType = "dateTimeAsLocal"
UnitTypeDatetimelocalNodateiftoday UnitType = "dateTimeAsLocalNoDateIfToday"
UnitTypeDatetimedefault UnitType = "dateTimeAsSystem"
UnitTypeFromNow UnitType = "dateTimeFromNow"
// Energy
UnitTypeWattW UnitType = "watt"
UnitTypeKilowattkW UnitType = "kwatt"
UnitTypeMegawattMW UnitType = "megwatt"
UnitTypeGigawattGW UnitType = "gwatt"
UnitTypeMilliwattmW UnitType = "mwatt"
UnitTypeWattpersquaremeterWm UnitType = "Wm2"
UnitTypeVoltampereVA UnitType = "voltamp"
UnitTypeKilovoltamperekVA UnitType = "kvoltamp"
UnitTypeVoltamperereactivevar UnitType = "voltampreact"
UnitTypeKilovoltamperereactivekvar UnitType = "kvoltampreact"
UnitTypeWatthourWh UnitType = "watth"
UnitTypeWatthourperKilogramWhkg UnitType = "watthperkg"
UnitTypeKilowatthourkWh UnitType = "kwatth"
UnitTypeKilowattminkWm UnitType = "kwattm"
UnitTypeAmperehourAh UnitType = "amph"
UnitTypeKiloamperehourkAh UnitType = "kamph"
UnitTypeMilliamperehourmAh UnitType = "mamph"
UnitTypeJouleJ UnitType = "joule"
UnitTypeElectronvolteV UnitType = "ev"
UnitTypeAmpereA UnitType = "amp"
UnitTypeKiloamperekA UnitType = "kamp"
UnitTypeMilliamperemA UnitType = "mamp"
UnitTypeVoltV UnitType = "volt"
UnitTypeKilovoltkV UnitType = "kvolt"
UnitTypeMillivoltmV UnitType = "mvolt"
UnitTypeDecibelmilliwattdBm UnitType = "dBm"
UnitTypeOhm UnitType = "ohm"
UnitTypeKiloohmk UnitType = "kohm"
UnitTypeMegaohmM UnitType = "Mohm"
UnitTypeFaradF UnitType = "farad"
UnitTypeMicrofaradF UnitType = "µfarad"
UnitTypeNanofaradnF UnitType = "nfarad"
UnitTypePicofaradpF UnitType = "pfarad"
UnitTypeFemtofaradfF UnitType = "ffarad"
UnitTypeHenryH UnitType = "henry"
UnitTypeMillihenrymH UnitType = "mhenry"
UnitTypeMicrohenryH UnitType = "µhenry"
UnitTypeLumensLm UnitType = "lumens"
// Flow
UnitTypeGallonsmingpm UnitType = "flowgpm"
UnitTypeCubicmetersseccms UnitType = "flowcms"
UnitTypeCubicfeetseccfs UnitType = "flowcfs"
UnitTypeCubicfeetmincfm UnitType = "flowcfm"
UnitTypeLitrehour UnitType = "litreh"
UnitTypeLitreminLmin UnitType = "flowlpm"
UnitTypeMilliLitreminmLmin UnitType = "flowmlpm"
UnitTypeLuxlx UnitType = "lux"
// Force
UnitTypeNewtonmetersNm UnitType = "forceNm"
UnitTypeKilonewtonmeterskNm UnitType = "forcekNm"
UnitTypeNewtonsN UnitType = "forceN"
UnitTypeKilonewtonskN UnitType = "forcekN"
// Hash rate
UnitTypeHashessec UnitType = "Hs"
UnitTypeKilohashessec UnitType = "KHs"
UnitTypeMegahashessec UnitType = "MHs"
UnitTypeGigahashessec UnitType = "GHs"
UnitTypeTerahashessec UnitType = "THs"
UnitTypePetahashessec UnitType = "PHs"
UnitTypeExahashessec UnitType = "EHs"
// Mass
UnitTypeMilligrammg UnitType = "massmg"
UnitTypeGramg UnitType = "massg"
UnitTypeKilogramkg UnitType = "masskg"
UnitTypeMetrictont UnitType = "masst"
// Length
UnitTypeMillimetermm UnitType = "lengthmm"
UnitTypeFeetft UnitType = "lengthft"
UnitTypeMeterm UnitType = "lengthm"
UnitTypeKilometerkm UnitType = "lengthkm"
UnitTypeMilemi UnitType = "lengthmi"
// Pressure
UnitTypeMillibars UnitType = "pressurembar"
UnitTypeBars UnitType = "pressurebar"
UnitTypeKilobars UnitType = "pressurekbar"
UnitTypePascals UnitType = "pressurepa"
UnitTypeHectopascals UnitType = "pressurehpa"
UnitTypeKilopascals UnitType = "pressurekpa"
UnitTypeInchesofmercury UnitType = "pressurehg"
UnitTypePSI UnitType = "pressurepsi"
// Radiation
UnitTypeBecquerelBq UnitType = "radbq"
UnitTypeCurieCi UnitType = "radci"
UnitTypeGrayGy UnitType = "radgy"
UnitTypeRad UnitType = "radrad"
UnitTypeSievertSv UnitType = "radsv"
UnitTypeMilliSievertmSv UnitType = "radmsv"
UnitTypeMicroSievertSv UnitType = "radusv"
UnitTypeRem UnitType = "radrem"
UnitTypeExposureCkg UnitType = "radexpckg"
UnitTypeRoentgenR UnitType = "radr"
UnitTypeSieverthourSvh UnitType = "radsvh"
UnitTypeMilliSieverthourmSvh UnitType = "radmsvh"
UnitTypeMicroSieverthourSvh UnitType = "radusvh"
// Rotational Speed
UnitTypeRevolutionsperminuterpm UnitType = "rotrpm"
UnitTypeHertzHz UnitType = "rothz"
UnitTypeRadianspersecondrads UnitType = "rotrads"
UnitTypeDegreesperseconds UnitType = "rotdegs"
// Temperature
UnitTypeCelsiusC UnitType = "celsius"
UnitTypeFahrenheitF UnitType = "fahrenheit"
UnitTypeKelvinK UnitType = "kelvin"
// Time
UnitTypeHertz1s UnitType = "hertz"
UnitTypeNanosecondsns UnitType = "ns"
UnitTypeMicrosecondss UnitType = "µs"
UnitTypeMillisecondsms UnitType = "ms"
UnitTypeSecondss UnitType = "s"
UnitTypeMinutesm UnitType = "m"
UnitTypeHoursh UnitType = "h"
UnitTypeDaysd UnitType = "d"
UnitTypeDurationms UnitType = "dtdurationms"
UnitTypeDurations UnitType = "dtdurations"
UnitTypeDurationhhmmss UnitType = "dthms"
UnitTypeDurationdhhmmss UnitType = "dtdhms"
UnitTypeTimetickss100 UnitType = "timeticks"
UnitTypeClockms UnitType = "clockms"
UnitTypeClocks UnitType = "clocks"
// Throughput
UnitTypeCountsseccps UnitType = "cps"
UnitTypeOpssecops UnitType = "ops"
UnitTypeRequestssecrps UnitType = "reqps"
UnitTypeReadssecrps UnitType = "rps"
UnitTypeWritessecwps UnitType = "wps"
UnitTypeIOopsseciops UnitType = "iops"
UnitTypeCountsmincpm UnitType = "cpm"
UnitTypeOpsminopm UnitType = "opm"
UnitTypeReadsminrpm UnitType = "rpm"
UnitTypeWritesminwpm UnitType = "wpm"
// Velocity
UnitTypeMeterssecondms UnitType = "velocityms"
UnitTypeKilometershourkmh UnitType = "velocitykmh"
UnitTypeMileshourmph UnitType = "velocitymph"
UnitTypeKnotkn UnitType = "velocityknot"
// Volume
UnitTypeMillilitremL UnitType = "mlitre"
UnitTypeLitreL UnitType = "litre"
UnitTypeCubicmeter UnitType = "m3"
UnitTypeNormalcubicmeter UnitType = "Nm3"
UnitTypeCubicdecimeter UnitType = "dm3"
UnitTypeGallons UnitType = "gallons"
) | pkg/hyperdash/config/enum.go | 0.57523 | 0.446857 | enum.go | starcoder |
package shape
import (
"fmt"
"io"
"math"
"github.com/gregoryv/go-design/xy"
)
func NewArrow(x1, y1, x2, y2 int) *Arrow {
return &Arrow{
Start: xy.Position{x1, y1},
End: xy.Position{x2, y2},
Head: NewTriangle(x2, y2, "arrow-head"),
class: "arrow",
}
}
type Arrow struct {
Start xy.Position
End xy.Position
Tail Shape
Head Shape
class string
}
func (a *Arrow) String() string {
return fmt.Sprintf("Arrow from %v to %v", a.Start, a.End)
}
func (arrow *Arrow) WriteSvg(out io.Writer) error {
w, err := newTagPrinter(out)
x1, y1 := arrow.Start.XY()
x2, y2 := arrow.End.XY()
w.printf(`<path class="%s" d="M%v,%v L%v,%v" />`, arrow.class, x1, y1, x2, y2)
w.print("\n")
if arrow.Tail != nil {
w.printf(`<g transform="rotate(%v %v %v)">`, arrow.angle(), x1, y1)
alignTail(arrow.Tail, x1, y1)
arrow.Tail.SetClass(arrow.class + "-tail")
arrow.Tail.WriteSvg(out)
w.print("</g>\n")
}
if arrow.Head != nil {
w.printf(`<g transform="rotate(%v %v %v)">`, arrow.angle(), x2, y2)
arrow.Head.SetX(arrow.End.X)
arrow.Head.SetY(arrow.End.Y)
arrow.Head.SetClass(arrow.class + "-head")
arrow.Head.WriteSvg(out)
w.print("</g>\n")
}
return *err
}
func alignTail(s Shape, x, y int) {
switch s := s.(type) {
case *Circle:
s.SetX(x)
s.SetY(y - s.Radius)
default:
s.SetX(x)
s.SetY(y)
}
}
func (arrow *Arrow) absAngle() float64 {
return math.Abs(float64(arrow.angle()))
}
// angle returns degrees the head of an arrow should rotate depending
// on direction
func (arrow *Arrow) angle() int {
var (
start = arrow.Start
end = arrow.End
// straight arrows
right = start.LeftOf(end) && start.Y == end.Y
left = start.RightOf(end) && start.Y == end.Y
down = start.Above(end) && start.X == end.X
up = start.Below(end) && start.X == end.X
)
switch {
case right: // most frequent arrow on top
case left:
return 180
case down:
return 90
case up:
return -90
case arrow.DirQ1():
a := float64(end.Y - start.Y)
b := float64(end.X - start.X)
A := math.Atan(a / b)
return radians2degrees(A)
case arrow.DirQ2():
a := float64(end.Y - start.Y)
b := float64(start.X - end.X)
A := math.Atan(a / b)
return 180 - radians2degrees(A)
case arrow.DirQ3():
a := float64(start.Y - end.Y)
b := float64(start.X - end.X)
A := math.Atan(a / b)
return radians2degrees(A) + 180
case arrow.DirQ4():
a := float64(start.Y - end.Y)
b := float64(end.X - start.X)
A := math.Atan(a / b)
return -radians2degrees(A)
}
return 0
}
// DirQ1 returns true if the arrow points to the bottom-right
// quadrant.
func (a *Arrow) DirQ1() bool {
start, end := a.endpoints()
return start.LeftOf(end) && end.Below(start)
}
// DirQ2 returns true if the arrow points to the bottom-left
// quadrant.
func (a *Arrow) DirQ2() bool {
start, end := a.endpoints()
return start.RightOf(end) && end.Below(start)
}
// DirQ3 returns true if the arrow points to the top-left
// quadrant.
func (a *Arrow) DirQ3() bool {
start, end := a.endpoints()
return start.RightOf(end) && end.Above(start)
}
// DirQ4 returns true if the arrow points to the top-right
// quadrant.
func (a *Arrow) DirQ4() bool {
start, end := a.endpoints()
return start.LeftOf(end) && end.Above(start)
}
func (arrow *Arrow) endpoints() (xy.Position, xy.Position) {
return arrow.Start, arrow.End
}
func radians2degrees(A float64) int {
return int(A * 180 / math.Pi)
}
func (arrow *Arrow) Height() int {
return intAbs(arrow.Start.Y - arrow.End.Y)
}
func (arrow *Arrow) Width() int {
return intAbs(arrow.Start.X - arrow.End.X)
}
func (arrow *Arrow) Position() (int, int) {
return arrow.Start.XY()
}
func (arrow *Arrow) SetX(x int) {
diff := arrow.Start.X - x
arrow.Start.X = x
arrow.End.X = arrow.End.X - diff // Set X2 so the entire arrow moves
}
func (arrow *Arrow) SetY(y int) {
diff := arrow.Start.Y - y
arrow.Start.Y = y
arrow.End.Y = arrow.End.Y - diff // Set Y2 so the entire arrow moves
}
func (arrow *Arrow) Direction() Direction {
if arrow.Start.LeftOf(arrow.End) {
return LR
}
return RL
}
func (arrow *Arrow) SetClass(c string) { arrow.class = c }
func NewArrowBetween(a, b Shape) *Arrow {
ax, ay := a.Position()
bx, by := b.Position()
// From center to center
x1 := ax + a.Width()/2
y1 := ay + a.Height()/2
x2 := bx + b.Width()/2
y2 := by + b.Height()/2
arrow := NewArrow(x1, y1, x2, y2)
bs, ok := b.(Edge)
if ok {
p := bs.Edge(arrow.Start)
arrow.End.X = p.X
arrow.End.Y = p.Y
}
as, ok := a.(Edge)
if ok {
p := as.Edge(arrow.End)
arrow.Start.X = p.X
arrow.Start.Y = p.Y
}
return arrow
} | shape/arrow.go | 0.711531 | 0.415432 | arrow.go | starcoder |
package gomcts
import (
"math"
"math/rand"
)
type node struct {
parent *node
// A fully expanded node will have chldren length equal to the initial
// length of unexpandedMoves, and the latter will be empty
children []*node
// The game move which created this node
move Move
// The game state associated with this node
state GameState
// The game moves which are left to be explored
unexpandedMoves []Move
// The total accrued score for this node
score float64
// The total simulations which backpropagated through this node
visits int
}
func newNode(parent *node, state GameState, causingMove Move) node {
return node{
parent: parent,
state: state,
move: causingMove,
children: []*node{},
unexpandedMoves: state.GetLegalActions(),
}
}
// Get the Upper Confidence Bound applied to trees of the current node.
func (n *node) getUtc(c float64) float64 {
return (float64(n.score) / float64(n.visits)) + c*math.Sqrt(math.Log(float64(n.parent.visits))/float64(n.visits))
}
func (n *node) isFullyExpanded() bool {
return len(n.unexpandedMoves) == 0
}
func (n *node) isLeaf() bool {
return n.state.IsGameEnded()
}
func (n *node) selectNode(c float64) *node {
for !n.isLeaf() {
if !n.isFullyExpanded() {
return n.expand()
}
n = n.getBestUtcChild(c)
}
return n
}
func (n *node) getBestUtcChild(c float64) *node {
var best *node
max := -math.MaxFloat64
for _, child := range n.children {
current := child.getUtc(c)
if current > max {
max = current
best = child
}
}
return best
}
func (n *node) expand() *node {
// move := n.unexpandedMoves[0]
// n.unexpandedMoves = n.unexpandedMoves[1:]
i := rand.Intn(len(n.unexpandedMoves))
move := n.unexpandedMoves[i]
n.unexpandedMoves = append(n.unexpandedMoves[:i], n.unexpandedMoves[i+1:]...)
expandedChild := newNode(n, move.ApplyTo(n.state), move)
n.children = append(n.children, &expandedChild)
return &expandedChild
}
func (n *node) simulate(playerIndex int) float64 {
currentState := n.state
for !currentState.IsGameEnded() {
moves := currentState.GetLegalActions()
currentState = moves[rand.Intn(len(moves))].ApplyTo(currentState)
}
return currentState.GetScore(playerIndex)
}
func (n *node) backpropagate(score float64) {
for n.parent != nil {
n.score += score
n.visits++
n = n.parent
}
n.visits++
} | node.go | 0.68056 | 0.430506 | node.go | starcoder |
package intsets
const bitsPerWord = 64
// Dense is an intsets representation, optimised for 64-bits
type Dense struct {
words []uint64
}
// Copy sets s to the value of x
func (s *Dense) Copy(x *Dense) {
sz := len(x.words)
s.ensure(sz)
s.words = s.words[:sz]
copy(s.words, x.words)
}
// AppendTo appends the entries to dst and returns the response
func (s *Dense) AppendTo(dst []int) []int {
for pos, word := range s.words {
if word == 0 {
continue
}
v := pos * bitsPerWord
for i := 0; word != 0 && i < bitsPerWord; i++ {
if word&1 != 0 {
dst = append(dst, v)
}
v++
word >>= 1
}
}
return dst
}
// IsEmpty reports whether the set s is empty.
func (s *Dense) IsEmpty() bool { return s.Len() == 0 }
// Len returns the number of elements in the set s.
func (s *Dense) Len() int { return popcntSlice(s.words) }
// Clear clears the set
func (s *Dense) Clear() {
for i := range s.words {
s.words[i] = 0
}
s.words = s.words[:0]
}
// Insert adds x to the set s
func (s *Dense) Insert(x int) {
pos, i := x/bitsPerWord, uint(x%bitsPerWord)
s.ensure(pos + 1)
if mask := uint64(1) << i; s.words[pos]&mask == 0 {
s.words[pos] |= mask
}
}
// UnionWith sets s to the union s ∪ x, and reports whether s grew.
func (s *Dense) UnionWith(x *Dense) bool {
changed := false
s.ensure(len(x.words))
for pos, wx := range x.words {
if ws := s.words[pos]; wx != ws {
s.words[pos] = ws | wx
changed = true
}
}
return changed
}
// IntersectionWith sets s to the intersection s ∩ x.
func (s *Dense) IntersectionWith(x *Dense) {
sz, sx := len(s.words), len(x.words)
if sx < sz {
for pos := sx; pos < sz; pos++ {
s.words[pos] = 0
}
s.words = s.words[:sx]
}
for pos, wx := range x.words {
if pos >= sz {
break
}
s.words[pos] &= wx
}
}
// DifferenceWith sets s to the difference s ∖ x.
func (s *Dense) DifferenceWith(x *Dense) {
if s == x {
s.Clear()
return
}
sz := len(s.words)
for pos, wx := range x.words {
if pos >= sz {
break
}
s.words[pos] &= ^wx
}
}
func (s *Dense) ensure(sz int) {
cp := cap(s.words)
if delta := sz - cp; delta > 0 {
s.words = append(s.words[:cp], make([]uint64, delta)...)
}
if len(s.words) < sz {
s.words = s.words[:sz]
}
} | internal/intsets/dense.go | 0.771499 | 0.404331 | dense.go | starcoder |
package dfl
import (
"fmt"
"strings"
"github.com/pkg/errors"
)
// Pipe is a BinaryOperator which represents the "|" pipe operation of left and right values.
type Pipe struct {
*BinaryOperator
}
func (p Pipe) Last() Node {
switch right := p.Right.(type) {
case Pipe:
return right.Last()
}
return p
}
func (p Pipe) Dfl(quotes []string, pretty bool, tabs int) string {
if pretty {
switch p.Left.(type) {
case *Literal:
switch p.Left.(*Literal).Value.(type) {
case string, int, []byte, Null:
return strings.Repeat(" ", tabs) + p.Left.Dfl(quotes, pretty, tabs) + " | " + p.Right.Dfl(quotes, pretty, tabs)
}
}
switch p.Right.(type) {
case *Literal:
switch p.Right.(*Literal).Value.(type) {
case string, int, []byte, Null:
return strings.Repeat(" ", tabs) + p.Left.Dfl(quotes, pretty, tabs) + " | " + p.Right.Dfl(quotes, pretty, tabs)
}
}
return strings.Repeat(" ", tabs) + p.Left.Dfl(quotes, pretty, tabs) + " | " + "\n" + p.Right.Dfl(quotes, pretty, tabs)
}
return p.Left.Dfl(quotes, pretty, tabs) + " | " + p.Right.Dfl(quotes, pretty, tabs)
}
// Sql returns the SQL representation of this node as a string
func (p Pipe) Sql(pretty bool, tabs int) string {
stmt := "SELECT * FROM (SELECT * FROM $TABLE " + p.Left.Sql(pretty, tabs) + ") as A" + fmt.Sprint(tabs) + " WHERE " + p.Right.Sql(pretty, tabs) + ";" // #nosec
return stmt
}
func (p Pipe) Map() map[string]interface{} {
return p.BinaryOperator.Map("pipe", p.Left, p.Right)
}
// Compile returns a compiled version of this node.
// If the left value and right value are both compiled as Literals, then returns the logical boolean AND operation of the left and right value.
// Otherwise, returns a clone.
func (p Pipe) Compile() Node {
left := p.Left.Compile()
right := p.Right.Compile()
switch left.(type) {
case Literal:
switch right.(type) {
case Literal:
return Literal{Value: right.(Literal).Value}
}
}
return Pipe{&BinaryOperator{Left: left, Right: right}}
}
func (p Pipe) Evaluate(vars map[string]interface{}, ctx interface{}, funcs FunctionMap, quotes []string) (map[string]interface{}, interface{}, error) {
vars, lv, err := p.Left.Evaluate(vars, ctx, funcs, quotes)
if err != nil {
return vars, lv, errors.Wrap(err, "error processing left value of "+p.Dfl(quotes, false, 0))
}
vars, rv, err := p.Right.Evaluate(vars, lv, funcs, quotes)
if err != nil {
return vars, rv, errors.Wrap(err, "error processing right value of "+p.Dfl(quotes, false, 0))
}
return vars, rv, nil
} | pkg/dfl/Pipe.go | 0.772445 | 0.404125 | Pipe.go | starcoder |
package edwards25519
type NielsPoint struct {
YPlusX, YMinusX, XY2D FieldElement
}
// Precomputed scalar multiplication table
type ScalarMultTable [32][8]NielsPoint
// Set p to zero, the neutral element. Return p.
func (p *NielsPoint) SetZero() *NielsPoint {
p.YMinusX.SetOne()
p.YPlusX.SetOne()
p.XY2D.SetZero()
return p
}
// Set p to q. Returns p.
func (p *NielsPoint) Set(q *NielsPoint) *NielsPoint {
p.YPlusX.Set(&q.YPlusX)
p.YMinusX.Set(&q.YMinusX)
p.XY2D.Set(&q.XY2D)
return p
}
// Set p to q if b == 1. Assumes b is 0 or 1. Returns p.
func (p *NielsPoint) ConditionalSet(q *NielsPoint, b int32) *NielsPoint {
p.YPlusX.ConditionalSet(&q.YPlusX, b)
p.YMinusX.ConditionalSet(&q.YMinusX, b)
p.XY2D.ConditionalSet(&q.XY2D, b)
return p
}
// Set p to -q. Returns p.
func (p *NielsPoint) Neg(q *NielsPoint) *NielsPoint {
p.YMinusX.Set(&q.YPlusX)
p.YPlusX.Set(&q.YMinusX)
p.XY2D.Neg(&q.XY2D)
return p
}
// Sets p to q+r. Returns p.
func (p *CompletedPoint) AddExtendedNiels(q *ExtendedPoint, r *NielsPoint) *CompletedPoint {
var t0 FieldElement
p.X.add(&q.Y, &q.X)
p.Y.sub(&q.Y, &q.X)
p.Z.Mul(&p.X, &r.YPlusX)
p.Y.Mul(&p.Y, &r.YMinusX)
p.T.Mul(&r.XY2D, &q.T)
t0.add(&q.Z, &q.Z)
p.X.sub(&p.Z, &p.Y)
p.Y.add(&p.Z, &p.Y)
p.Z.add(&t0, &p.T)
p.T.sub(&t0, &p.T)
return p
}
// Set p to q-r. Returns p.
func (p *CompletedPoint) SubExtendedNiels(q *ExtendedPoint, r *NielsPoint) *CompletedPoint {
var t0 FieldElement
p.X.add(&q.Y, &q.X)
p.Y.sub(&q.Y, &q.X)
p.Z.Mul(&p.X, &r.YMinusX)
p.Y.Mul(&p.Y, &r.YPlusX)
p.T.Mul(&r.XY2D, &q.T)
t0.add(&q.Z, &q.Z)
p.X.sub(&p.Z, &p.Y)
p.Y.add(&p.Z, &p.Y)
p.Z.sub(&t0, &p.T)
p.T.add(&t0, &p.T)
return p
}
// Sets p to q. Returns p.
func (p *NielsPoint) SetExtended(q *ExtendedPoint) *NielsPoint {
var x, y, zInv FieldElement
zInv.Inverse(&q.Z)
x.Mul(&q.X, &zInv)
y.Mul(&q.Y, &zInv)
p.YPlusX.Add(&y, &x)
p.YMinusX.Sub(&y, &x)
p.XY2D.Mul(&x, &y)
p.XY2D.Add(&p.XY2D, &p.XY2D)
p.XY2D.Mul(&p.XY2D, &feD)
return p
}
// Fill the table t with data for the point p.
func (t *ScalarMultTable) Compute(p *ExtendedPoint) {
var c, cp ExtendedPoint
var c_pp ProjectivePoint
var c_cp CompletedPoint
cp.Set(p)
for i := 0; i < 32; i++ {
c.SetZero()
for v := 0; v < 8; v++ {
c.Add(&c, &cp)
t[i][v].SetExtended(&c)
}
c_cp.DoubleExtended(&c)
c_pp.SetCompleted(&c_cp)
c_cp.DoubleProjective(&c_pp)
c_pp.SetCompleted(&c_cp)
c_cp.DoubleProjective(&c_pp)
c_pp.SetCompleted(&c_cp)
c_cp.DoubleProjective(&c_pp)
c_pp.SetCompleted(&c_cp)
c_cp.DoubleProjective(&c_pp)
cp.SetCompleted(&c_cp)
}
}
// Compute 4-bit signed window for the scalar s
func computeScalarWindow4(s *[32]byte, w *[64]int8) {
for i := 0; i < 32; i++ {
w[2*i] = int8(s[i] & 15)
w[2*i+1] = int8((s[i] >> 4) & 15)
}
carry := int8(0)
for i := 0; i < 63; i++ {
w[i] += carry
carry = (w[i] + 8) >> 4
w[i] -= carry << 4
}
w[63] += carry
}
// Set p to s * q, where t was computed for q using t.Compute(q).
func (t *ScalarMultTable) ScalarMult(p *ExtendedPoint, s *[32]byte) {
var w [64]int8
computeScalarWindow4(s, &w)
p.SetZero()
var np NielsPoint
var cp CompletedPoint
var pp ProjectivePoint
for i := int32(0); i < 32; i++ {
t.selectPoint(&np, i, int32(w[2*i+1]))
cp.AddExtendedNiels(p, &np)
p.SetCompleted(&cp)
}
cp.DoubleExtended(p)
pp.SetCompleted(&cp)
cp.DoubleProjective(&pp)
pp.SetCompleted(&cp)
cp.DoubleProjective(&pp)
pp.SetCompleted(&cp)
cp.DoubleProjective(&pp)
p.SetCompleted(&cp)
for i := int32(0); i < 32; i++ {
t.selectPoint(&np, i, int32(w[2*i]))
cp.AddExtendedNiels(p, &np)
p.SetCompleted(&cp)
}
}
func (t *ScalarMultTable) VarTimeScalarMult(p *ExtendedPoint, s *[32]byte) {
var w [64]int8
computeScalarWindow4(s, &w)
p.SetZero()
var np NielsPoint
var cp CompletedPoint
var pp ProjectivePoint
for i := int32(0); i < 32; i++ {
if t.varTimeSelectPoint(&np, i, int32(w[2*i+1])) {
cp.AddExtendedNiels(p, &np)
p.SetCompleted(&cp)
}
}
cp.DoubleExtended(p)
pp.SetCompleted(&cp)
cp.DoubleProjective(&pp)
pp.SetCompleted(&cp)
cp.DoubleProjective(&pp)
pp.SetCompleted(&cp)
cp.DoubleProjective(&pp)
p.SetCompleted(&cp)
for i := int32(0); i < 32; i++ {
if t.varTimeSelectPoint(&np, i, int32(w[2*i])) {
cp.AddExtendedNiels(p, &np)
p.SetCompleted(&cp)
}
}
}
func (t *ScalarMultTable) selectPoint(p *NielsPoint, pos int32, b int32) {
bNegative := negative(b)
bAbs := b - (((-bNegative) & b) << 1)
p.SetZero()
for i := int32(0); i < 8; i++ {
p.ConditionalSet(&t[pos][i], equal30(bAbs, i+1))
}
var negP NielsPoint
negP.Neg(p)
p.ConditionalSet(&negP, bNegative)
}
func (t *ScalarMultTable) varTimeSelectPoint(p *NielsPoint, pos int32, b int32) bool {
if b == 0 {
return false
}
if b < 0 {
p.Neg(&t[pos][-b-1])
} else {
p.Set(&t[pos][b-1])
}
return true
} | vendor/github.com/bwesterb/go-ristretto/edwards25519/table.go | 0.82828 | 0.567098 | table.go | starcoder |
package copy
import (
"errors"
"fmt"
"reflect"
"unsafe"
"github.com/golang/groupcache/lru"
"github.com/modern-go/reflect2"
)
type Copier = *copier
type copier struct {
cacheSize int
typeCache *lru.Cache
fieldParser FieldParseFunc
}
func NewCopier(opts ...Option) *copier {
c := &copier{
typeCache: lru.New(1000),
fieldParser: ParseFiledByName,
}
for _, opt := range opts {
opt(c)
}
c.Register(
TimeToInt64Copier{},
Int64ToTimeCopier{},
)
return c
}
// Register add typed copier to cache
func (c *copier) Register(copiers ...TypedCopier) {
for _, co := range copiers {
for _, pair := range co.Pairs() {
c.typeCache.Add(pair, co)
}
}
}
// Unregister remove typed copier from cache
func (c *copier) Unregister(copiers ...TypedCopier) {
for _, co := range copiers {
for _, pair := range co.Pairs() {
c.typeCache.Remove(pair)
}
}
}
func (c *copier) Copy(dst, src interface{}) error {
var (
dstType = indirectType(reflect.TypeOf(dst))
srcType = indirectType(reflect.TypeOf(src))
dstType2 = reflect2.Type2(dstType)
srcType2 = reflect2.Type2(srcType)
dstPtr = reflect2.PtrOf(dst)
srcPtr = reflect2.PtrOf(src)
)
return c.copy(dstType2, srcType2, dstPtr, srcPtr)
}
func (c *copier) copy(dstType, srcType reflect2.Type, dstPtr, srcPtr unsafe.Pointer) error {
cpr := c.parse(dstType, srcType)
if cpr == nil {
return errors.New("unsupported copy")
}
switch cpr.(type) {
case *assignCopier:
cpr.(*assignCopier).Copy(dstType, srcType, dstPtr, srcPtr)
case *structDescriptor:
for _, i := range cpr.(*structDescriptor).FieldDescriptors {
c.copy(i.DstType, i.SrcType, unsafe.Pointer(i.DstOffset+uintptr(dstPtr)), unsafe.Pointer(i.SrcOffset+uintptr(srcPtr)))
}
default:
cpr.(TypedCopier).Copy(dstType, srcType, dstPtr, srcPtr)
}
return nil
}
func (c *copier) parse(dstType, srcType reflect2.Type) interface{} {
pair := TypePair{
DstType: dstType.RType(),
SrcType: srcType.RType(),
}
if cpr, ok := c.typeCache.Get(pair); ok {
return cpr
}
if d := c.parseAssignable(dstType, srcType); d != nil {
return c.save(pair, d)
}
if d := c.parseStructs(dstType, srcType); d != nil {
return c.save(pair, d)
}
return nil
}
func (c *copier) parseAssignable(dstType, srcType reflect2.Type) *assignCopier {
if c.isAssignable(dstType, srcType) {
return &assignCopier{}
}
return nil
}
func (c *copier) isAssignable(dstType, srcType reflect2.Type) bool {
if dstType.AssignableTo(srcType) {
return true
}
if dstType.Kind() == srcType.Kind() &&
srcType.Kind() != reflect.Struct {
return true
}
switch dstType.Kind() {
case reflect.Int:
return (srcType.Kind() == reflect.Int8) ||
(srcType.Kind() == reflect.Int16) ||
(srcType.Kind() == reflect.Int32) ||
(srcType.Kind() == reflect.Int64) ||
(srcType.Kind() == reflect.Uint8) ||
(srcType.Kind() == reflect.Uint16) ||
(srcType.Kind() == reflect.Uint32) ||
(srcType.Kind() == reflect.Uint64)
case reflect.Int8:
return srcType.Kind() == reflect.Int
case reflect.Int32:
return (srcType.Kind() == reflect.Int8) ||
(srcType.Kind() == reflect.Int16) ||
(srcType.Kind() == reflect.Int) ||
(srcType.Kind() == reflect.Uint8) ||
(srcType.Kind() == reflect.Uint16) ||
(srcType.Kind() == reflect.Uint32)
case reflect.Int64:
return (srcType.Kind() == reflect.Int8) ||
(srcType.Kind() == reflect.Int16) ||
(srcType.Kind() == reflect.Int32) ||
(srcType.Kind() == reflect.Int) ||
(srcType.Kind() == reflect.Uint8) ||
(srcType.Kind() == reflect.Uint16) ||
(srcType.Kind() == reflect.Uint32)
case reflect.Uint8:
return (srcType.Kind() == reflect.Int8) ||
(srcType.Kind() == reflect.Int)
case reflect.Uint32:
return (srcType.Kind() == reflect.Int8) ||
(srcType.Kind() == reflect.Int16) ||
(srcType.Kind() == reflect.Int32) ||
(srcType.Kind() == reflect.Int) ||
(srcType.Kind() == reflect.Uint8) ||
(srcType.Kind() == reflect.Uint16)
case reflect.Uint64:
return (srcType.Kind() == reflect.Int8) ||
(srcType.Kind() == reflect.Int16) ||
(srcType.Kind() == reflect.Int32) ||
(srcType.Kind() == reflect.Int64) ||
(srcType.Kind() == reflect.Int) ||
(srcType.Kind() == reflect.Uint8) ||
(srcType.Kind() == reflect.Uint16) ||
(srcType.Kind() == reflect.Uint32)
}
return false
}
func (c *copier) parseStructs(dstType, srcType reflect2.Type) *structDescriptor {
if dstType.Kind() != reflect.Struct || srcType.Kind() != reflect.Struct {
return nil
}
sd := &structDescriptor{
DstType: dstType,
SrcType: srcType,
}
dstFields := make(map[string]StructField)
for _, field := range deepFields(dstType.Type1(), 0) {
name := c.fieldParser(field)
if name != "" {
dstFields[fmt.Sprintf("%s-%d", name, field.depth)] = field
}
}
srcFields := make(map[string]StructField)
for _, field := range deepFields(srcType.Type1(), 0) {
name := c.fieldParser(field)
if name != "" {
srcFields[fmt.Sprintf("%s-%d", name, field.depth)] = field
}
}
for name, dstField := range dstFields {
if srcField, ok := srcFields[name]; ok {
c.parse(reflect2.Type2(dstField.Type), reflect2.Type2(srcField.Type))
sd.FieldDescriptors = append(sd.FieldDescriptors, structFieldDescriptor{
DstType: reflect2.Type2(dstField.Type),
SrcType: reflect2.Type2(srcField.Type),
DstOffset: dstField.Offset,
SrcOffset: srcField.Offset,
})
}
}
return sd
}
func (c *copier) save(pair TypePair, d interface{}) interface{} {
if d != nil {
c.typeCache.Add(pair, d)
}
return d
} | copier.go | 0.514888 | 0.433682 | copier.go | starcoder |
package main
import (
"fmt"
"path/filepath"
"github.com/derWhity/AdventOfCode/lib/input"
)
const (
floor = '.'
empty = 'L'
occupied = '#'
offGrid = 0
)
type grid map[int]map[int]rune
func (g grid) print() {
for x := 0; x < len(g); x++ {
for y := 0; y < len(g[x]); y++ {
fmt.Printf("%s", string(g[x][y]))
}
fmt.Println("")
}
fmt.Println("----------")
}
func (g grid) seat(x, y int) rune {
if row, ok := g[x]; ok {
return row[y] // zero when index does not exist
}
return offGrid
}
// count the first seat found in this direction
func countFirst(input grid, x, y, incX, incY int) uint {
xPos := x
yPos := y
for {
xPos += incX
yPos += incY
s := input.seat(xPos, yPos)
switch s {
case offGrid:
return 0
case empty:
return 0
case occupied:
return 1
}
}
}
func numOccupiedVisible(input grid, x, y int) uint {
var count uint
count += countFirst(input, x, y, -1, -1)
count += countFirst(input, x, y, 0, -1)
count += countFirst(input, x, y, 1, -1)
count += countFirst(input, x, y, -1, 0)
count += countFirst(input, x, y, 1, 0)
count += countFirst(input, x, y, -1, 1)
count += countFirst(input, x, y, 0, 1)
count += countFirst(input, x, y, 1, 1)
return count
}
func round(input grid) (grid, uint, uint) {
var numChanged uint
var numOccupied uint
out := grid{}
// The order is random, but for us this should not be a problem in this quiz
for x, row := range input {
out[x] = map[int]rune{}
for y, seat := range row {
if seat == empty {
// If a seat is empty (L) and there are no occupied seats adjacent to it, the seat becomes occupied.
if numOccupiedVisible(input, x, y) == 0 {
out[x][y] = occupied
numChanged++
} else {
out[x][y] = seat
}
} else if seat == occupied {
// If a seat is occupied (#) and four or more seats adjacent to it are also occupied, the seat becomes empty.
if numOccupiedVisible(input, x, y) >= 5 {
out[x][y] = empty
numChanged++
} else {
out[x][y] = seat
}
} else {
// Otherwise, the seat's state does not change.
out[x][y] = seat
}
if out[x][y] == occupied {
numOccupied++
}
}
}
return out, numChanged, numOccupied
}
func main() {
seats := grid{}
fmt.Println(">> ------------- <<")
items, err := input.ReadString(filepath.Join("..", "input.txt"), true)
if err != nil {
panic(err)
}
// Prepare the grid
for x, line := range items {
seats[x] = map[int]rune{}
for y, seat := range line {
seats[x][y] = seat
}
}
// Run until nothing changes
var numChanged, numOccupied uint
seats.print()
for {
seats, numChanged, numOccupied = round(seats)
//seats.print()
fmt.Printf("Round finished: %d changed | %d occupied\n", numChanged, numOccupied)
if numChanged == 0 {
return
}
}
} | 2020/day_11/star_02/main.go | 0.528533 | 0.460592 | main.go | starcoder |
package opcodes
import (
"bytes"
"crypto/sha1"
"crypto/sha256"
"encoding/binary"
"encoding/hex"
"fmt"
"math"
"github.com/syahrul12345/secp256k1"
"golang.org/x/crypto/ripemd160"
)
//GetOPCODELIST returns a mapping representing the OPCODELIST
func GetOPCODELIST() map[int]interface{} {
OPCODELIST := map[int]interface{}{
0: op_0,
79: op_1negate,
81: op_1,
82: op_2,
83: op_3,
84: op_4,
85: op_5,
86: op_6,
87: op_7,
88: op_8,
89: op_9,
90: op_10,
91: op_11,
92: op_12,
93: op_13,
94: op_14,
95: op_15,
96: op_16,
97: op_nop,
99: op_if,
100: op_notif,
105: op_verify,
106: op_return,
107: op_toaltstack,
108: op_fromaltstack,
109: op_2drop,
110: op_2dup,
111: op_3dup,
112: op_2over,
113: op_2rot,
114: op_2swap,
115: op_ifdup,
116: op_depth,
117: op_drop,
118: op_dup,
119: op_nip,
120: op_over,
121: op_pick,
122: op_roll,
123: op_rot,
124: op_swap,
125: op_tuck,
130: op_size,
135: op_equal,
136: op_equalverify,
139: op_1add,
140: op_1sub,
143: op_negate,
144: op_abs,
145: op_not,
146: op_0notequal,
147: op_add,
148: op_sub,
149: op_mul,
154: op_booland,
155: op_boolor,
156: op_numequal,
157: op_numequalverify,
158: op_numnotequal,
159: op_lessthan,
160: op_greaterthan,
161: op_lessthanorequal,
162: op_greaterthanorequal,
163: op_min,
164: op_max,
165: op_within,
166: op_ripemd160,
167: op_sha1,
168: op_sha256,
169: op_hash160,
170: op_hash256,
172: op_checksig,
173: op_checksigverify,
174: op_checkmultisig,
175: op_checkmultisigverify,
176: op_nop,
177: op_checklocktimeverify,
178: op_checksequenceverify,
179: op_nop,
180: op_nop,
181: op_nop,
182: op_nop,
183: op_nop,
184: op_nop,
185: op_nop,
}
return OPCODELIST
}
//GETOPCODENAMES will return the opcode names
func GETOPCODENAMES() map[int]string {
OPCODELIST := map[int]string{
0: "op_0",
79: "op_1negate",
81: "op_1",
82: "op_2",
83: "op_3",
84: "op_4",
85: "op_5",
86: "op_6",
87: "op_7",
88: "op_8",
89: "op_9",
90: "op_10",
91: "op_11",
92: "op_12",
93: "op_13",
94: "op_14",
95: "op_15",
96: "op_16",
97: "op_nop",
99: "op_if",
100: "op_notif",
105: "op_verify",
106: "op_return",
107: "op_toaltstack",
108: "op_fromaltstack",
109: "op_2drop",
110: "op_2dup",
111: "op_3dup",
112: "op_2over",
113: "op_2rot",
114: "op_2swap",
115: "op_ifdup",
116: "op_depth",
117: "op_drop",
118: "op_dup",
119: "op_nip",
120: "op_over",
121: "op_pick",
122: "op_roll",
123: "op_rot",
124: "op_swap",
125: "op_tuck",
130: "op_size",
135: "op_equal",
136: "op_equalverify",
139: "op_1add",
140: "op_1sub",
143: "op_negate",
144: "op_abs",
145: "op_not",
146: "op_0notequal",
147: "op_add",
148: "op_sub",
149: "op_mul",
154: "op_booland",
155: "op_boolor",
156: "op_numequal",
157: "op_numequalverify",
158: "op_numnotequal",
159: "op_lessthan",
160: "op_greaterthan",
161: "op_lessthanorequal",
162: "op_greaterthanorequal",
163: "op_min",
164: "op_max",
165: "op_within",
166: "op_ripemd160",
167: "op_sha1",
168: "op_sha256",
169: "op_hash160",
170: "op_hash256",
172: "op_checksig",
173: "op_checksigverify",
174: "op_checkmultisig",
175: "op_checkmultisigverify",
176: "op_nop",
177: "op_checklocktimeverify",
178: "op_checksequenceverify",
179: "op_nop",
180: "op_nop",
181: "op_nop",
182: "op_nop",
183: "op_nop",
184: "op_nop",
185: "op_nop",
}
return OPCODELIST
}
func encodeNum(num int) []byte {
//Return empty byte
if num == 0 {
return []byte{}
}
absNum := int(math.Abs(float64(num)))
negative := num < 0
res := []byte{}
for absNum > 0 {
xor := absNum & 0xff
res = append(res, byte(xor))
absNum >>= 8
}
if res[len(res)-1]&0x80 == 1 {
if negative {
res = append(res, 0x80)
} else {
res = append(res, 0)
}
} else if negative {
res[len(res)-1] |= 0x80
}
return res
}
func decodeNum(element []byte) int64 {
//Empty byte arrya
var negative bool
var result int64
if len(element) == 0 {
return 0
}
// Reverse the element ot make it into big endian
for i := len(element)/2 - 1; i >= 0; i-- {
opp := len(element) - 1 - i
element[i], element[opp] = element[opp], element[i]
}
// It's one
if element[0]&0x80 == 1 {
negative = true
result = int64(element[0]) & 0x7f
} else {
negative = false
result = int64(element[0])
}
for _, someByte := range element[1:] {
result <<= 8
result = result + int64(someByte)
}
if negative {
return -(result)
}
return result
}
func op_0(commands *[][]byte) bool {
*commands = append(*commands, encodeNum(0))
return true
}
func op_1negate(commands *[][]byte) bool {
*commands = append(*commands, encodeNum(-1))
return true
}
func op_1(commands *[][]byte) bool {
*commands = append(*commands, encodeNum(1))
return true
}
func op_2(commands *[][]byte) bool {
*commands = append(*commands, encodeNum(2))
return true
}
func op_3(commands *[][]byte) bool {
*commands = append(*commands, encodeNum(3))
return true
}
func op_4(commands *[][]byte) bool {
*commands = append(*commands, encodeNum(4))
return true
}
func op_5(commands *[][]byte) bool {
*commands = append(*commands, encodeNum(5))
return true
}
func op_6(commands *[][]byte) bool {
*commands = append(*commands, encodeNum(6))
return true
}
func op_7(commands *[][]byte) bool {
*commands = append(*commands, encodeNum(7))
return true
}
func op_8(commands *[][]byte) bool {
*commands = append(*commands, encodeNum(8))
return true
}
func op_9(commands *[][]byte) bool {
*commands = append(*commands, encodeNum(9))
return true
}
func op_10(commands *[][]byte) bool {
*commands = append(*commands, encodeNum(10))
return true
}
func op_11(commands *[][]byte) bool {
*commands = append(*commands, encodeNum(11))
return true
}
func op_12(commands *[][]byte) bool {
*commands = append(*commands, encodeNum(12))
return true
}
func op_13(commands *[][]byte) bool {
*commands = append(*commands, encodeNum(13))
return true
}
func op_14(commands *[][]byte) bool {
*commands = append(*commands, encodeNum(14))
return true
}
func op_15(commands *[][]byte) bool {
*commands = append(*commands, encodeNum(15))
return true
}
func op_16(commands *[][]byte) bool {
*commands = append(*commands, encodeNum(16))
return true
}
func op_nop(commands *[][]byte) bool {
return true
}
func op_if(stack, commands *[][]byte) bool {
if len(*stack) < 1 {
return false
}
trueItems := [][]byte{}
falseItems := [][]byte{}
currentArray := trueItems
found := false
numberEndifsNeeded := 1
for len(*commands) > 0 {
tempCommands := *commands
command := tempCommands[0]
*commands = tempCommands[1:]
// Conver the current coimmand into a number,it's just one byte
buf := bytes.NewBuffer(command)
number, _ := binary.ReadUvarint(buf)
if number == 99 || number == 100 {
numberEndifsNeeded = numberEndifsNeeded + 1
} else if numberEndifsNeeded == 1 && number == 103 {
currentArray = falseItems
} else if number == 104 {
if numberEndifsNeeded == 1 {
found = true
break
} else {
numberEndifsNeeded = numberEndifsNeeded - 1
currentArray = append(currentArray, command)
}
} else {
currentArray = append(currentArray, command)
}
}
if !found {
return false
}
tempCommands := *commands
element := tempCommands[len(tempCommands)-1]
*commands = tempCommands[:len(tempCommands)-1]
buf := bytes.NewBuffer(element)
number, _ := binary.ReadUvarint(buf)
//Append trueitems or falseitems to the front of command
if number == 0 {
*commands = append(trueItems, *commands...)
} else {
*commands = append(falseItems, *commands...)
}
return true
}
func op_notif(stack, commands *[][]byte) bool {
if len(*stack) < 1 {
return true
}
trueItems := [][]byte{}
falseItems := [][]byte{}
currentArray := trueItems
found := false
numEndIfsNeeded := 1
for len(*commands) > 1 {
// Get the last byte array. Let's convert it to a number
tempCommands := *commands
lastCommand := tempCommands[len(tempCommands)-1]
// It should be one byte for op_codes
if len(lastCommand) == 1 {
if int(lastCommand[0]) == 99 || int(lastCommand[0]) == 100 {
numEndIfsNeeded = numEndIfsNeeded + 1
currentArray = append(currentArray, lastCommand)
} else if numEndIfsNeeded == 1 && int(lastCommand[0]) == 103 {
currentArray = falseItems
} else if int(lastCommand[0]) == 104 {
if numEndIfsNeeded == 1 {
found = true
break
} else {
numEndIfsNeeded = numEndIfsNeeded - 1
currentArray = append(currentArray, lastCommand)
}
}
}
}
if !found {
return false
}
tempStack := *stack
element := tempStack[len(tempStack)-1]
if decodeNum(element) == 0 {
*commands = append(trueItems, *commands...)
} else {
*commands = append(falseItems, *commands...)
}
return true
}
func op_verify(stack *[][]byte) bool {
if len(*stack) < 1 {
return false
}
tempStack := *stack
element := tempStack[len(tempStack)-1]
*stack = tempStack[:len(tempStack)-1]
if decodeNum(element) == 0 {
return false
}
return true
}
func op_return(stack [][]byte) bool {
return false
}
func op_toaltstack(stack, altstack *[][]byte) bool {
if len(*stack) < 1 {
return false
}
tempStack := *stack
*altstack = append(*altstack, tempStack[len(tempStack)-1])
return true
}
func op_fromaltstack(stack, alstack *[][]byte) bool {
if len(*alstack) < 1 {
return false
}
tempStack := *alstack
*stack = append(*stack, tempStack[len(tempStack)-1])
return true
}
func op_2drop(stack *[][]byte) bool {
if len(*stack) < 2 {
return false
}
tempStack := *stack
// remove the last element of the stack
tempStack = tempStack[:len(tempStack)]
tempStack = tempStack[:len(tempStack)]
*stack = tempStack
return true
}
func op_2dup(stack *[][]byte) bool {
if len(*stack) < 2 {
return false
}
tempStack := *stack
*stack = append(*stack, tempStack[len(tempStack)-2:]...)
return true
}
func op_3dup(stack *[][]byte) bool {
if len(*stack) < 3 {
return false
}
tempStack := *stack
*stack = append(*stack, tempStack[len(tempStack)-3:]...)
return true
}
func op_2over(stack *[][]byte) bool {
if len(*stack) < 4 {
return false
}
tempStack := *stack
*stack = append(*stack, tempStack[len(tempStack)-4:len(tempStack)-2]...)
return true
}
func op_2rot(stack *[][]byte) bool {
if len(*stack) < 6 {
return false
}
tempStack := *stack
*stack = append(*stack, tempStack[len(tempStack)-6:len(tempStack)-4]...)
return true
}
func op_2swap(stack *[][]byte) bool {
if len(*stack) < 4 {
return false
}
tempStack := *stack
tempStackFirst := tempStack[len(tempStack)-2:]
tempStackSecond := tempStack[len(tempStack)-4 : len(tempStack)-2]
tempStackReplace := append(tempStackFirst, tempStackSecond...)
tempStack = append(tempStack[:len(tempStack)-3], tempStackReplace...)
*stack = tempStack
return true
}
func op_ifdup(stack *[][]byte) bool {
if len(*stack) < 1 {
return false
}
tempStack := *stack
if decodeNum(tempStack[len(tempStack)-1]) != 0 {
*stack = append(*stack, tempStack[len(tempStack)-1])
}
return true
}
func op_depth(stack *[][]byte) bool {
*stack = append(*stack, encodeNum(len(*stack)))
return true
}
func op_drop(stack *[][]byte) bool {
if len(*stack) < 1 {
return false
}
tempStack := *stack
tempStack = tempStack[:len(tempStack)-1]
*stack = tempStack
return true
}
func op_dup(stack *[][]byte) bool {
if len(*stack) < 1 {
return false
}
tempStack := *stack
*stack = append(*stack, tempStack[len(tempStack)-1])
return true
}
func op_nip(stack *[][]byte) bool {
if len(*stack) < 2 {
return false
}
tempStack := *stack
tempStack = append(tempStack[:len(tempStack)-2], tempStack[len(tempStack)-1:]...)
return true
}
func op_over(stack *[][]byte) bool {
if len(*stack) < 2 {
return false
}
tempStack := *stack
*stack = append(*stack, tempStack[len(tempStack)-2])
return true
}
func op_pick(stack *[][]byte) bool {
if len(*stack) < 1 {
return false
}
tempStack := *stack
n := decodeNum(tempStack[len(tempStack)-1])
*stack = tempStack[:len(tempStack)-1]
if len(*stack) < int(n+1) {
return false
}
*stack = append(*stack, tempStack[len(tempStack)-int(n+1)])
return true
}
func op_roll(stack *[][]byte) bool {
if len(*stack) < 1 {
return false
}
tempStack := *stack
n := decodeNum(tempStack[len(tempStack)-1])
*stack = tempStack[:len(tempStack)-1]
if len(*stack) < int(n+1) {
return false
}
if n == 0 {
return false
}
*stack = append(*stack, tempStack[len(tempStack)-int(n+1)])
return false
}
func op_rot(stack *[][]byte) bool {
if len(*stack) < 3 {
return false
}
tempStack := *stack
*stack = tempStack[:len(tempStack)-3]
*stack = append(*stack, tempStack[len(tempStack)-3])
return true
}
func op_swap(stack *[][]byte) bool {
if len(*stack) < 2 {
return false
}
tempStack := *stack
*stack = tempStack[:len(tempStack)-2]
*stack = append(*stack, tempStack[len(tempStack)-2])
return false
}
func op_tuck(stack *[][]byte) bool {
if len(*stack) < 2 {
return false
}
tempStack := *stack
// Insert the last stack item into the second last position.
i := len(tempStack) - 2
*stack = append(tempStack[:i], append(tempStack[:len(tempStack)-1], tempStack[i:]...)...)
return false
}
func op_size(stack *[][]byte) bool {
if len(*stack) < 1 {
return false
}
tempStack := *stack
// Encodes the length of the last element in the stack
num := encodeNum(len(tempStack[len(tempStack)-1]))
*stack = append(*stack, num)
return true
}
func op_equal(stack *[][]byte) bool {
if len(*stack) < 2 {
return false
}
tempStack := *stack
//Stack.pop()
element1 := tempStack[len(tempStack)-1]
tempStack = tempStack[:len(tempStack)-1]
element2 := tempStack[len(tempStack)-1]
*stack = tempStack[:len(tempStack)-1]
// Since element1 and 2 represents a number
num1 := decodeNum(element1)
num2 := decodeNum(element2)
if num1 == num2 {
*stack = append(*stack, encodeNum(1))
} else {
*stack = append(*stack, encodeNum(0))
}
return true
}
func op_equalverify(stack *[][]byte) bool {
return op_equal(stack) && op_verify(stack)
}
func op_1add(stack *[][]byte) bool {
if len(*stack) < 1 {
return false
}
tempStack := *stack
element := decodeNum(tempStack[len(tempStack)-1])
*stack = tempStack[:len(tempStack)-1]
*stack = append(*stack, encodeNum(int(element+1)))
return true
}
func op_1sub(stack *[][]byte) bool {
if len(*stack) < 1 {
return false
}
tempStack := *stack
element := decodeNum(tempStack[len(tempStack)-1])
*stack = tempStack[:len(tempStack)-1]
*stack = append(*stack, encodeNum(int(element-1)))
return true
}
func op_negate(stack *[][]byte) bool {
if len(*stack) < 1 {
return false
}
tempStack := *stack
element := decodeNum(tempStack[len(tempStack)-1])
*stack = tempStack[:len(tempStack)-1]
*stack = append(*stack, encodeNum(int(-element)))
return false
}
func op_abs(stack *[][]byte) bool {
if len(*stack) < 1 {
return false
}
tempStack := *stack
element := decodeNum(tempStack[len(tempStack)-1])
*stack = tempStack[:len(tempStack)-1]
if element < 0 {
*stack = append(*stack, encodeNum(int(-element)))
} else {
*stack = append(*stack, encodeNum(int(element)))
}
return true
}
func op_not(stack *[][]byte) bool {
if len(*stack) < 1 {
return false
}
tempStack := *stack
element := tempStack[len(tempStack)-1]
*stack = tempStack[:len(tempStack)-1]
if decodeNum(element) == 0 {
*stack = append(*stack, encodeNum(1))
} else {
*stack = append(*stack, encodeNum(0))
}
return true
}
func op_0notequal(stack *[][]byte) bool {
if len(*stack) < 1 {
return false
}
tempStack := *stack
element := tempStack[len(tempStack)-1]
*stack = tempStack[:len(tempStack)-1]
if decodeNum(element) == 0 {
*stack = append(*stack, encodeNum(0))
} else {
*stack = append(*stack, encodeNum(1))
}
return true
}
func op_add(stack *[][]byte) bool {
if len(*stack) < 2 {
return false
}
tempStack := *stack
element1 := decodeNum(tempStack[len(tempStack)-1])
tempStack = tempStack[:len(tempStack)-1]
element2 := decodeNum(tempStack[len(tempStack)-1])
*stack = tempStack[:len(tempStack)-1]
combined := element1 + element2
*stack = append(*stack, encodeNum(int(combined)))
return true
}
func op_sub(stack *[][]byte) bool {
if len(*stack) < 2 {
return false
}
tempStack := *stack
element1 := decodeNum(tempStack[len(tempStack)-1])
tempStack = tempStack[:len(tempStack)-1]
element2 := decodeNum(tempStack[len(tempStack)-1])
*stack = tempStack[:len(tempStack)-1]
combined := element2 - element1
*stack = append(*stack, encodeNum(int(combined)))
return true
}
func op_mul(stack *[][]byte) bool {
if len(*stack) < 2 {
return false
}
tempStack := *stack
element1 := decodeNum(tempStack[len(tempStack)-1])
tempStack = tempStack[:len(tempStack)-1]
element2 := decodeNum(tempStack[len(tempStack)-1])
*stack = tempStack[:len(tempStack)-1]
combined := element2 * element1
*stack = append(*stack, encodeNum(int(combined)))
return true
}
func op_booland(stack *[][]byte) bool {
if len(*stack) < 2 {
return false
}
tempStack := *stack
element1 := decodeNum(tempStack[len(tempStack)-1])
tempStack = tempStack[:len(tempStack)-1]
element2 := decodeNum(tempStack[len(tempStack)-1])
*stack = tempStack[:len(tempStack)-1]
if element1 > 0 && element2 > 0 {
*stack = append(*stack, encodeNum(1))
} else {
*stack = append(*stack, encodeNum(0))
}
return true
}
func op_boolor(stack *[][]byte) bool {
if len(*stack) < 2 {
return false
}
tempStack := *stack
element1 := decodeNum(tempStack[len(tempStack)-1])
tempStack = tempStack[:len(tempStack)-1]
element2 := decodeNum(tempStack[len(tempStack)-1])
*stack = tempStack[:len(tempStack)-1]
if element1 > 0 || element2 > 0 {
*stack = append(*stack, encodeNum(1))
} else {
*stack = append(*stack, encodeNum(0))
}
return true
}
func op_numequal(stack *[][]byte) bool {
if len(*stack) < 2 {
return false
}
tempStack := *stack
element1 := decodeNum(tempStack[len(tempStack)-1])
tempStack = tempStack[:len(tempStack)-1]
element2 := decodeNum(tempStack[len(tempStack)-1])
*stack = tempStack[:len(tempStack)-1]
if element1 == element2 {
*stack = append(*stack, encodeNum(1))
} else {
*stack = append(*stack, encodeNum(0))
}
return true
}
func op_numequalverify(stack *[][]byte) bool {
return op_numequal(stack) && op_verify(stack)
}
func op_numnotequal(stack *[][]byte) bool {
if len(*stack) < 2 {
return false
}
tempStack := *stack
element1 := decodeNum(tempStack[len(tempStack)-1])
tempStack = tempStack[:len(tempStack)-1]
element2 := decodeNum(tempStack[len(tempStack)-1])
*stack = tempStack[:len(tempStack)-1]
if element1 == element2 {
*stack = append(*stack, encodeNum(0))
} else {
*stack = append(*stack, encodeNum(1))
}
return true
}
func op_lessthan(stack *[][]byte) bool {
if len(*stack) < 2 {
return false
}
tempStack := *stack
element1 := decodeNum(tempStack[len(tempStack)-1])
tempStack = tempStack[:len(tempStack)-1]
element2 := decodeNum(tempStack[len(tempStack)-1])
*stack = tempStack[:len(tempStack)-1]
if element2 < element1 {
*stack = append(*stack, encodeNum(1))
} else {
*stack = append(*stack, encodeNum(0))
}
return true
}
func op_greaterthan(stack *[][]byte) bool {
if len(*stack) < 2 {
return false
}
tempStack := *stack
element1 := decodeNum(tempStack[len(tempStack)-1])
tempStack = tempStack[:len(tempStack)-1]
element2 := decodeNum(tempStack[len(tempStack)-1])
*stack = tempStack[:len(tempStack)-1]
if element2 > element1 {
*stack = append(*stack, encodeNum(1))
} else {
*stack = append(*stack, encodeNum(0))
}
return true
}
func op_lessthanorequal(stack *[][]byte) bool {
if len(*stack) < 2 {
return false
}
tempStack := *stack
element1 := decodeNum(tempStack[len(tempStack)-1])
tempStack = tempStack[:len(tempStack)-1]
element2 := decodeNum(tempStack[len(tempStack)-1])
*stack = tempStack[:len(tempStack)-1]
if element2 <= element1 {
*stack = append(*stack, encodeNum(1))
} else {
*stack = append(*stack, encodeNum(0))
}
return true
}
func op_greaterthanorequal(stack *[][]byte) bool {
if len(*stack) < 2 {
return false
}
tempStack := *stack
element1 := decodeNum(tempStack[len(tempStack)-1])
tempStack = tempStack[:len(tempStack)-1]
element2 := decodeNum(tempStack[len(tempStack)-1])
*stack = tempStack[:len(tempStack)-1]
if element2 >= element1 {
*stack = append(*stack, encodeNum(1))
} else {
*stack = append(*stack, encodeNum(0))
}
return true
}
func op_min(stack *[][]byte) bool {
if len(*stack) < 2 {
return false
}
tempStack := *stack
element1 := decodeNum(tempStack[len(tempStack)-1])
tempStack = tempStack[:len(tempStack)-1]
element2 := decodeNum(tempStack[len(tempStack)-1])
*stack = tempStack[:len(tempStack)-1]
if element1 < element2 {
*stack = append(*stack, encodeNum(int(element1)))
} else {
*stack = append(*stack, encodeNum(int(element2)))
}
return true
}
func op_max(stack *[][]byte) bool {
if len(*stack) < 2 {
return false
}
tempStack := *stack
element1 := decodeNum(tempStack[len(tempStack)-1])
tempStack = tempStack[:len(tempStack)-1]
element2 := decodeNum(tempStack[len(tempStack)-1])
*stack = tempStack[:len(tempStack)-1]
if element1 > element2 {
*stack = append(*stack, encodeNum(int(element1)))
} else {
*stack = append(*stack, encodeNum(int(element2)))
}
return true
}
func op_within(stack *[][]byte) bool {
if len(*stack) < 3 {
return false
}
tempStack := *stack
maximum := decodeNum(tempStack[len(tempStack)-1])
tempStack = tempStack[:len(tempStack)-1]
minimum := decodeNum(tempStack[len(tempStack)-1])
tempStack = tempStack[:len(tempStack)-1]
element := decodeNum(tempStack[len(tempStack)-1])
*stack = tempStack[:len(tempStack)-1]
if element >= minimum && element < maximum {
*stack = append(*stack, encodeNum(1))
} else {
*stack = append(*stack, encodeNum(0))
}
return true
}
func op_ripemd160(stack *[][]byte) bool {
if len(*stack) < 1 {
return false
}
tempStack := *stack
element := tempStack[len(tempStack)-1]
*stack = tempStack
ripemdHasher := ripemd160.New()
ripemdHasher.Write(element)
hashBytes := ripemdHasher.Sum(nil)
*stack = append(*stack, hashBytes)
return true
}
func op_sha1(stack *[][]byte) bool {
if len(*stack) < 1 {
return false
}
tempStack := *stack
element := tempStack[len(tempStack)-1]
*stack = tempStack[:len(tempStack)-1]
sha1Hasher := sha1.New()
sha1Hasher.Write(element)
hashBytes := sha1Hasher.Sum(nil)
*stack = append(*stack, hashBytes)
return true
}
func op_sha256(stack *[][]byte) bool {
if len(*stack) < 1 {
return false
}
tempStack := *stack
element := tempStack[len(tempStack)-1]
*stack = tempStack[:len(tempStack)-1]
sha256Hasher := sha256.New()
sha256Hasher.Write(element)
hashBytes := sha256Hasher.Sum(nil)
*stack = append(*stack, hashBytes)
return true
}
func op_hash160(stack *[][]byte) bool {
if len(*stack) < 1 {
return false
}
tempStack := *stack
element := tempStack[len(tempStack)-1]
*stack = tempStack[:len(tempStack)-1]
// Do a sha256 followed by a ripemd160
hash256 := sha256.Sum256(element)
ripemdHasher := ripemd160.New()
ripemdHasher.Write(hash256[:])
hashBytes := ripemdHasher.Sum(nil)
*stack = append(*stack, hashBytes)
return true
}
func op_hash256(stack *[][]byte) bool {
if len(*stack) < 1 {
return false
}
tempStack := *stack
element := tempStack[len(tempStack)-1]
*stack = tempStack[:len(tempStack)-1]
// Do a 2 rounds of sha256
hash256 := sha256.Sum256(element)
hash256 = sha256.Sum256(hash256[:])
*stack = append(*stack, hash256[:])
return true
}
func op_checksig(stack *[][]byte, z string) bool {
if len(*stack) < 2 {
return false
}
//Get the last two variables
tempStack := *stack
sec := tempStack[len(tempStack)-1]
tempStack = tempStack[:len(tempStack)-1]
// Last object, excluding the last byte... lol
der := tempStack[len(tempStack)-1]
der = der[:len(der)-1]
*stack = tempStack[:len(tempStack)-1]
res, err := secp256k1.Verify(hex.EncodeToString(sec), hex.EncodeToString(der), z)
if err != nil {
fmt.Println(err)
}
if res {
*stack = append(*stack, encodeNum(1))
} else {
*stack = append(*stack, encodeNum(0))
}
return true
}
func op_checksigverify(stack *[][]byte, z string) bool {
return op_checksig(stack, z) && op_verify(stack)
}
func op_checkmultisig(stack *[][]byte, z string) bool {
if len(*stack) < 1 {
return false
}
tempStack := *stack
n := decodeNum(tempStack[len(tempStack)-1])
tempStack = tempStack[:len(tempStack)-1]
if int64(len(tempStack)) < n+1 {
return false
}
// Array to hold n number of pubkeys
secPubKeyList := [][]byte{}
for i := int64(0); i < n; i++ {
pubKey := tempStack[len(tempStack)-1]
tempStack = tempStack[:len(tempStack)-1]
secPubKeyList = append(secPubKeyList, pubKey)
}
m := decodeNum(tempStack[len(tempStack)-1])
tempStack = tempStack[:len(tempStack)-1]
if int64(len(tempStack)) < m+1 {
return false
}
//Array to hold m number of singatures
derSignatureList := [][]byte{}
for j := int64(0); j < m; j++ {
derSignature := tempStack[len(tempStack)-1]
// remove last byte... as it is SIGHASHALL
derSignature = derSignature[0 : len(derSignature)-1]
tempStack = tempStack[:len(tempStack)-1]
derSignatureList = append(derSignatureList, derSignature)
}
tempStack = tempStack[:len(tempStack)-1]
// We want to check if ALL the signatures can be verified with the pubkeys. Hence we iterate of the derSignatureList
for _, sig := range derSignatureList {
// We have more signatures than pubkeys
if len(secPubKeyList) == 0 {
return false
}
for len(secPubKeyList) > 0 {
sec := hex.EncodeToString(secPubKeyList[0])
der := hex.EncodeToString(sig)
secPubKeyList = secPubKeyList[1:]
point := secp256k1.ParseSec(sec)
sig := secp256k1.ParseDer(der)
res, err := point.Verify(z, sig)
if err != nil {
return false
}
if res {
// Can verify so we exit
break
}
}
}
*stack = append(tempStack, encodeNum(1))
return true
}
func op_checkmultisigverify(stack *[][]byte, z string) bool {
return op_checkmultisig(stack, z) && op_verify(stack)
}
func op_checklocktimeverify(stack *[][]byte, locktime []byte, sequence []byte) bool {
lockTimeInt := binary.LittleEndian.Uint32(locktime)
sequenceInt := binary.LittleEndian.Uint32(sequence)
if sequenceInt == 0xffffffff {
return false
}
if len(*stack) < 1 {
return false
}
tempStack := *stack
element := decodeNum(tempStack[len(tempStack)-1])
if element < 0 {
return false
}
if element < 500000000 && lockTimeInt > 500000000 {
return false
}
if int64(lockTimeInt) < element {
return false
}
return true
}
func op_checksequenceverify(stack *[][]byte, version []byte, sequence []byte) bool {
versionInt := binary.LittleEndian.Uint32(version)
sequenceInt := binary.LittleEndian.Uint32(sequence)
if sequenceInt&(1<<31) == (1 << 31) {
return false
}
if len(*stack) < 1 {
return false
}
tempStack := *stack
element := decodeNum(tempStack[len(tempStack)-1])
if element < 0 {
return false
}
if element&(1<<31) == (1 << 31) {
if versionInt < 2 {
return false
} else if sequenceInt&(1<<31) == (1 << 31) {
return false
} else if uint32(element&(1<<22)) != sequenceInt&(1<<22) {
return false
} else if uint32(element&0xffff) > sequenceInt&0xffff {
return false
}
}
return true
} | opcodes/opcodes.go | 0.575111 | 0.439146 | opcodes.go | starcoder |
package diff
import (
"errors"
"fmt"
)
// AbsoluteDateDifference computes the absolute difference in days between the two dates (x, y: unparsed as strings).
// Both x, y are expected to be of the following format: YYYY-MM-DD. The start and end day should not be counted and
// only the absolute difference in dates is considered. If the order of two dates being compared is flipped the
// difference remains the same (i.e. AbsoluteDateDifference(x, y) == AbsoluteDateDifference(y, x)
func AbsoluteDateDifference(start, end string) (uint, error) {
if len(start) != 10 || len(end) != 10 {
return 0, errors.New(fmt.Sprintf(
"dates must be of length 10. Start and end have lengths, %d and %d respectively", len(start), len(end),
))
}
if !isCorrectStructure(start) {
return 0, errors.New("start date has invalid structure. Expected YYYY-MM-DD")
}
if !isCorrectStructure(end) {
return 0, errors.New("end date has invalid structure. Expected YYYY-MM-DD")
}
dateStart, err := constructDate(
start[0:4],
start[5:7],
start[8:10],
)
if err != nil {
return 0, fmt.Errorf("start date was invalid: %w", err)
}
dateEnd, err := constructDate(
end[0:4],
end[5:7],
end[8:10],
)
if err != nil {
return 0, fmt.Errorf("end date was invalid: %w", err)
}
return dateStart.AbsoluteDifference(dateEnd), nil
}
// isCorrectStructure checks if the date string has the correct structure for the date format YYYY-MM-DD. Equivalent to
// a string having the following regex pattern, ^.{4}-.{2}-.{2}$.
func isCorrectStructure(date string) bool {
return len(date) == 10 && date[4] == '-' && date[7] == '-'
}
// constructDate validates the year, month, day parameters and constructs a Date if valid.
func constructDate(year, month, day string) (Date, error) {
y, err := UintParse(year)
if err != nil {
return Date{}, fmt.Errorf("year is not a valid integer because %w", err)
}
m, err := UintParse(month)
if err != nil {
return Date{}, fmt.Errorf("month is not a valid integer because %w", err)
}
d, err := UintParse(day)
if err != nil {
return Date{}, fmt.Errorf("day is not a valid integer because %w", err)
}
// Validate if y, m, d construct a valid date.
err = validateDate(y, m, d)
if err != nil {
return Date{}, fmt.Errorf("")
}
date := Date{
year: y,
month: m,
day: d,
}
return date, nil
}
// validateDate throws an error if the date provided is not valid.
func validateDate(y, m, d uint) error {
if m == 0 || m > 12 {
return errors.New("month is not within range [1, 12]")
}
if isLeapYear(y) {
if d == 0 || d > monthDatesLeap[m-1] {
return fmt.Errorf("in month %d, there are not %d days during a leap year", m, d)
}
} else {
if d == 0 || d > monthDates[m-1] {
return fmt.Errorf("in month %d, there are not %d days during a non-leap year", m, d)
}
}
return nil
}
// isLeapYear returns true if the year is a leap year.
func isLeapYear(year uint) bool {
return year%4 == 0
}
// Date from the Gregorian calendar. Date structure are valid before created.
type Date struct {
year uint
month uint
day uint
}
func (d Date) AbsoluteDifference(e Date) uint {
a, b := d.DaysFromZero(), e.DaysFromZero()
if a < b {
return b - a - 1
} else if a > b {
return a - b - 1
}
return 0
}
// DaysFromZero returns the number of days between the d Date, and the start of the year (i.e. YYYY-01-01)
func (d Date) DaysFromStartOfYear() uint {
result := d.day
var daysInYear *[12]uint
if isLeapYear(d.year) {
daysInYear = &monthDatesLeap
} else {
daysInYear = &monthDates
}
for i := 0; i < int(d.month)-1; i++ {
result += daysInYear[i]
}
return result
}
// DaysFromZero returns the number of days between the d Date, and zero (i.e. 0000-00-00)
func (d Date) DaysFromZero() uint {
result := d.DaysFromStartOfYear()
if d.year == 0 {
return result
}
result += 365 * (d.year - 1)
// Compensate for leap years
result += (d.year / 4) + 1
return result
}
// Days in each month of Jan, Feb, ..., Dec
var monthDates = [12]uint{31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}
var monthDatesLeap = [12]uint{31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31} | diff/date.go | 0.817829 | 0.71307 | date.go | starcoder |
package value
import (
"math/big"
)
func asin(c Context, v Value) Value {
if u, ok := v.(Complex); ok {
if !isZero(u.imag) || !inArcRealDomain(u.real) {
return complexAsin(c, u)
}
v = u.real
} else if !inArcRealDomain(v) {
return complexAsin(c, newComplex(v, zero))
}
return evalFloatFunc(c, v, floatAsin)
}
func acos(c Context, v Value) Value {
if u, ok := v.(Complex); ok {
if !isZero(u.imag) || !inArcRealDomain(u.real) {
return complexAcos(c, u)
}
v = u.real
} else if !inArcRealDomain(v) {
return complexAcos(c, newComplex(v, zero))
}
return evalFloatFunc(c, v, floatAcos)
}
// inArcRealDomain reports whether the argument is between -1 and +1,
// the valid domain for real arcsin and arccos.
func inArcRealDomain(x Value) bool {
return compare(x, -1) >= 0 && compare(x, 1) <= 0
}
func atan(c Context, v Value) Value {
if u, ok := v.(Complex); ok {
if !isZero(u.imag) {
return complexAtan(c, u)
}
v = u.real
}
return evalFloatFunc(c, v, floatAtan)
}
// floatAsin computes asin(x) using the formula asin(x) = atan(x/sqrt(1-x²)).
func floatAsin(c Context, x *big.Float) *big.Float {
// The asin Taylor series converges very slowly near ±1, but our
// atan implementation converges well for all values, so we use
// the formula above to compute asin. But be careful when |x|=1.
if x.Cmp(floatOne) == 0 {
z := newFloat(c).Set(floatPi)
return z.Quo(z, floatTwo)
}
if x.Cmp(floatMinusOne) == 0 {
z := newFloat(c).Set(floatPi)
z.Quo(z, floatTwo)
return z.Neg(z)
}
z := newFloat(c)
z.Mul(x, x)
z.Sub(floatOne, z)
z = floatSqrt(c, z)
z.Quo(x, z)
return floatAtan(c, z)
}
// floatAcos computes acos(x) as π/2 - asin(x).
func floatAcos(c Context, x *big.Float) *big.Float {
// acos(x) = π/2 - asin(x)
z := newFloat(c).Set(floatPi)
z.Quo(z, newFloat(c).SetInt64(2))
return z.Sub(z, floatAsin(c, x))
}
// floatAtan computes atan(x) using a Taylor series. There are two series,
// one for |x| < 1 and one for larger values.
func floatAtan(c Context, x *big.Float) *big.Float {
// atan(-x) == -atan(x). Do this up top to simplify the Euler crossover calculation.
if x.Sign() < 0 {
z := newFloat(c).Set(x)
z = floatAtan(c, z.Neg(z))
return z.Neg(z)
}
// The series converges very slowly near 1. atan 1.00001 takes over a million
// iterations at the default precision. But there is hope, an Euler identity:
// atan(x) = atan(y) + atan((x-y)/(1+xy))
// Note that y is a free variable. If x is near 1, we can use this formula
// to push the computation to values that converge faster. Because
// tan(π/8) = √2 - 1, or equivalently atan(√2 - 1) == π/8
// we choose y = √2 - 1 and then we only need to calculate one atan:
// atan(x) = π/8 + atan((x-y)/(1+xy))
// Where do we cross over? This version converges significantly faster
// even at 0.5, but we must be careful that (x-y)/(1+xy) never approaches 1.
// At x = 0.5, (x-y)/(1+xy) is 0.07; at x=1 it is 0.414214; at x=1.5 it is
// 0.66, which is as big as we dare go. With 256 bits of precision and a
// crossover at 0.5, here are the number of iterations done by
// atan .1*iota 20
// 0.1 39, 0.2 55, 0.3 73, 0.4 96, 0.5 126, 0.6 47, 0.7 59, 0.8 71, 0.9 85, 1.0 99, 1.1 116, 1.2 38, 1.3 44, 1.4 50, 1.5 213, 1.6 183, 1.7 163, 1.8 147, 1.9 135, 2.0 125
tmp := newFloat(c).Set(floatOne)
tmp.Sub(tmp, x)
tmp.Abs(tmp)
if tmp.Cmp(newFloat(c).SetFloat64(0.5)) < 0 {
z := newFloat(c).Set(floatPi)
z.Quo(z, newFloat(c).SetInt64(8))
y := floatSqrt(c, floatTwo)
y.Sub(y, floatOne)
num := newFloat(c).Set(x)
num.Sub(num, y)
den := newFloat(c).Set(x)
den = den.Mul(den, y)
den = den.Add(den, floatOne)
z = z.Add(z, floatAtan(c, num.Quo(num, den)))
return z
}
if x.Cmp(floatOne) > 0 {
return floatAtanLarge(c, x)
}
// This is the series for small values |x| < 1.
// asin(x) = x - x³/3 + x⁵/5 - x⁷/7 + ...
// First term to compute in loop will be x
n := newFloat(c)
term := newFloat(c)
xN := newFloat(c).Set(x)
xSquared := newFloat(c).Set(x)
xSquared.Mul(x, x)
z := newFloat(c)
// n goes up by two each loop.
for loop := newLoop(c.Config(), "atan", x, 4); ; {
term.Set(xN)
term.Quo(term, n.SetUint64(2*loop.i+1))
z.Add(z, term)
xN.Neg(xN)
if loop.done(z) {
break
}
// xN *= x², becoming x**(n+2).
xN.Mul(xN, xSquared)
}
return z
}
// floatAtanLarge computes atan(x) for large x using a Taylor series.
// x is known to be > 1.
func floatAtanLarge(c Context, x *big.Float) *big.Float {
// This is the series for larger values |x| >= 1.
// For x > 0, atan(x) = +π/2 - 1/x + 1/3x³ -1/5x⁵ + 1/7x⁷ - ...
// First term to compute in loop will be -1/x
n := newFloat(c)
term := newFloat(c)
xN := newFloat(c).Set(x)
xSquared := newFloat(c).Set(x)
xSquared.Mul(x, x)
z := newFloat(c).Set(floatPi)
z.Quo(z, floatTwo)
// n goes up by two each loop.
for loop := newLoop(c.Config(), "atan", x, 4); ; {
xN.Neg(xN)
term.Set(xN)
term.Mul(term, n.SetUint64(2*loop.i+1))
term.Quo(floatOne, term)
z.Add(z, term)
if loop.done(z) {
break
}
// xN *= x², becoming x**(n+2).
xN.Mul(xN, xSquared)
}
return z
}
func complexAsin(c Context, v Complex) Complex {
// Use the formula: asin(v) = -i * log(sqrt(1-v²) + i*v)
x := newComplex(one, zero)
x = complexSqrt(c, x.sub(c, v.mul(c, v)))
i := newComplex(zero, one)
x = x.add(c, i.mul(c, v))
x = complexLog(c, x)
return newComplex(zero, minusOne).mul(c, x)
}
func complexAcos(c Context, v Complex) Value {
// Use the formula: acos(v) = π/2 - asin(v)
piBy2 := newComplex(BigFloat{newFloat(c).Set(floatPiBy2)}, BigFloat{floatZero})
return piBy2.sub(c, complexAsin(c, v))
}
func complexAtan(c Context, v Complex) Value {
// Use the formula: atan(v) = 1/2i * log((1-v)/(1+v))
i := newComplex(zero, one)
res := i.sub(c, v).div(c, i.add(c, v))
res = complexLog(c, res)
return res.mul(c, minusOneOverTwoI)
} | value/asin.go | 0.742328 | 0.559531 | asin.go | starcoder |
package core
import (
"sync"
)
//Node is a Branch within the BinarySearchTree.
type Node struct {
value int
left, right *Node
}
// BinarySearchTree (BST) are a particular type of container: data structures that store integers.
type BinarySearchTree struct {
lock sync.RWMutex
Root *Node //Root of the tree
}
// NewBinarySearchTree creates a new BinarySearchTree pointer.
func NewBinarySearchTree() *BinarySearchTree {
return &BinarySearchTree{}
}
// Insert adds a new node into the tree, position is determined by the int value passed.
func (t *BinarySearchTree) Insert(v int) {
t.lock.Lock()
defer t.lock.Unlock()
if t.Root == nil {
t.Root = &Node{
value: v,
}
return
}
_insert(t.Root, v)
}
// Find will search for the Node with the given int value passed.
// Returns Node and true if the Node exists otherwise Nil and false.
func (t *BinarySearchTree) Find(v int) (*Node, bool) {
t.lock.RLock()
defer t.lock.RUnlock()
if t.Root == nil {
return nil, false
}
return _find(t.Root, v)
}
func _find(node *Node, v int) (*Node, bool) {
if node == nil {
return nil, false
} else if v == node.value {
return node, true
} else if v > node.value {
return _find(node.right, v)
}
return _find(node.left, v)
}
func _insert(node *Node, v int) {
if v > node.value {
if node.right == nil {
node.right = &Node{value: v}
return
}
_insert(node.right, v)
} else if v < node.value {
if node.left == nil {
node.left = &Node{value: v}
return
}
_insert(node.left, v)
}
}
// Delete removes the Node with the given int value from the tree.
// This is a no-op if the Node does not exist.
func (t *BinarySearchTree) Delete(v int) {
t.Root = _delete(t.Root, v)
}
func _delete(node *Node, v int) *Node {
if node == nil {
return nil
} else if v < node.value {
node.left = _delete(node.left, v)
} else if v > node.value {
node.right = _delete(node.right, v)
} else {
if node.left == nil && node.right == nil {
node = nil
} else if node.left == nil {
node = node.right
} else if node.right == nil {
node = node.left
} else {
maxNode := GetMaxInSubTree(node.left)
node.value = maxNode.value
node.left = _delete(node.left, maxNode.value)
}
}
return node
}
// GetMaxInSubTree finds the greatest int within the Subtree
func GetMaxInSubTree(node *Node) *Node {
if node.right == nil {
return node
}
return GetMaxInSubTree(node.right)
}
// GetMinInSubTree finds the lowest int within the Subtree
func GetMinInSubTree(node *Node) *Node {
if node.left == nil {
return node
}
return GetMinInSubTree(node.left)
} | vendor/github.com/vastness-io/queues/pkg/core/binary_search_tree.go | 0.7865 | 0.429968 | binary_search_tree.go | starcoder |
// Package syntheticattention provides an implementation of the Synthetic Attention described in:
// "SYNTHESIZER: Rethinking Self-Attention in Transformer Models" by Tay et al., 2020.
// (https://arxiv.org/pdf/2005.00743.pdf)
package syntheticattention
import (
"encoding/gob"
mat "github.com/nlpodyssey/spago/pkg/mat32"
"github.com/nlpodyssey/spago/pkg/ml/ag"
"github.com/nlpodyssey/spago/pkg/ml/nn"
"github.com/nlpodyssey/spago/pkg/ml/nn/activation"
"github.com/nlpodyssey/spago/pkg/ml/nn/linear"
"github.com/nlpodyssey/spago/pkg/ml/nn/stack"
)
var (
_ nn.Model = &Model{}
)
// Model contains the serializable parameters.
type Model struct {
nn.BaseModel
Config
FFN *stack.Model
Value *linear.Model
W nn.Param `spago:"type:weights"`
Attention *ContextProb `spago:"scope:processor"`
}
// ContextProb is a pair of Context encodings and Prob attention scores.
type ContextProb struct {
// Context encodings.
Context []ag.Node
// Prob attention scores.
Prob []mat.Matrix
}
// Config provides configuration settings for a Synthetic Attention Model.
type Config struct {
InputSize int
HiddenSize int
ValueSize int
MaxLength int
}
func init() {
gob.Register(&Model{})
}
// New returns a new model with parameters initialized to zeros.
func New(config Config) *Model {
return &Model{
Config: config,
FFN: stack.New(
linear.New(config.InputSize, config.HiddenSize),
activation.New(ag.OpReLU),
),
W: nn.NewParam(mat.NewEmptyDense(config.MaxLength, config.HiddenSize)),
Value: linear.New(config.InputSize, config.ValueSize),
}
}
// Forward performs the forward step for each input node and returns the result.
func (m *Model) Forward(xs ...ag.Node) []ag.Node {
g := m.Graph()
length := len(xs)
context := make([]ag.Node, length)
prob := make([]mat.Matrix, length)
values := g.Stack(m.Value.Forward(xs...)...)
rectified := g.Stack(m.FFN.Forward(xs...)...)
attentionWeights := m.extractAttentionWeights(length)
mul := g.Mul(attentionWeights, g.T(rectified))
for i := 0; i < length; i++ {
attProb := g.Softmax(g.ColView(mul, i))
context[i] = g.Mul(g.T(attProb), values)
prob[i] = attProb.Value()
}
m.Attention = &ContextProb{
Context: context,
Prob: prob,
}
return context
}
// extractAttentionWeights returns the attention parameters tailored to the sequence length.
func (m *Model) extractAttentionWeights(length int) ag.Node {
g := m.Graph()
attentionWeights := make([]ag.Node, length)
for i := 0; i < length; i++ {
attentionWeights[i] = g.T(g.RowView(m.W, i))
}
return g.Stack(attentionWeights...)
} | pkg/ml/nn/attention/syntheticattention/syntheticattention.go | 0.868423 | 0.409811 | syntheticattention.go | starcoder |
package hurricane
import (
"fmt"
"godin/utilities"
"math"
"time"
)
type TrackPointSource string
const (
Unknown TrackPointSource = "unknown"
Best TrackPointSource = "BEST"
Forecasted TrackPointSource = "OFCL"
)
type TrackPoint struct {
Timestamp time.Time `json:"timestamp"`
TrackSequence float64 `json:"track_sequence"`
LatYDeg float64 `json:"lat_y_deg"`
LonXDeg float64 `json:"lon_x_deg"`
MaxWindVelocityKts float64 `json:"max_wind_velocity_kts"`
MinCentralPressureMb float64 `json:"min_central_pressure_mb"`
RadiusMaxWindNmi float64 `json:"radius_max_wind_nmi"`
CycloneForwardSpeedKts float64 `json:"cyclone_forward_speed_kts"`
CycloneHeadingDeg float64 `json:"cyclone_heading_deg"`
GradientWindAdjustmentFactor float64 `json:"gradient_wind_adjustment_factor"`
Source TrackPointSource `json:"source"`
}
type BoundingBox struct {
// Top Left Point
LatYTopDeg int `json:"lat_y_top_deg"`
LonXLeftDeg int `json:"lon_x_left_deg"`
// Bottom Right
LatYBottomDeg int `json:"lat_y_bottom_deg"`
LonXRightDeg int `json:"lon_x_right_deg"`
}
func (bb BoundingBox) GetBlockHeight(blocksPerDegLatY int) int {
delta := bb.LatYTopDeg - bb.LatYBottomDeg
if delta < 0 {delta = -delta}
return delta * blocksPerDegLatY
}
func (bb BoundingBox) GetBlockWidth(blocksPerDegLonX int) int {
delta := bb.LonXRightDeg - bb.LonXLeftDeg
if delta < 0 {delta = -delta}
return delta * blocksPerDegLonX
}
func (bb BoundingBox) toPoints(blocksPerDegLatY int, blocksPerDegLonX int) []Coordinate {
precision := 100000
deltaY := int(math.Abs(float64(bb.LatYTopDeg - bb.LatYBottomDeg))) * precision
deltaX := int(math.Abs(float64(bb.LonXRightDeg - bb.LonXLeftDeg))) * precision
blocksY := deltaY * blocksPerDegLatY / precision
blocksX := deltaX * blocksPerDegLonX / precision
stepY := int(deltaY / blocksY)
stepX := int(deltaX / blocksX)
var points []Coordinate
lat := int(bb.LatYBottomDeg) * precision
for y := 0; y < blocksY; y++ {
lon := int(bb.LonXLeftDeg) * precision
for x := 0; x < blocksX; x++ {
points = append(points, Coordinate{float64(lat) / float64(precision), float64(lon) / float64(precision)})
lon += stepX
}
lat += stepY
}
return points
}
type EventInformation struct {
ID string `json:"id"`
Name string `json:"name"`
Year int `json:"year"`
Track []TrackPoint `json:"track"`
Bounds BoundingBox `json:"bounds"`
}
type Coordinate struct {
latYDeg float64
lonXDeg float64
}
type CoordinateValue struct {
LatYDeg float64 `json:"lat_y_deg"`
LonXDeg float64 `json:"lon_x_deg"`
Value float64 `json:"value"`
}
type CalculatedEvent struct {
Info EventInformation `json:"info"`
WindField []CoordinateValue `json:"wind_field"`
MaxCalculationDistanceNmi float64 `json:"max_calculation_distance_nmi"`
PixPerDegreeLatY int `json:"pix_per_degree_lat_y"`
PixPerDegreeLonX int `json:"pix_per_degree_lon_x"`
}
func CalculateEventFrame(bbox BoundingBox,
latYDeg float64,
lonXDeg float64,
maxWindVelocityKts float64,
radiusMaxWindNmi float64,
cycloneForwardSpeedKts float64,
cycloneHeadingDeg float64,
gradientWindAdjustmentFactor float64,
pixPerDegLatY int,
pixPerDegLonX int,
maxCalculationDistanceNmi float64) (windField []CoordinateValue) {
gridPoints := bbox.toPoints(pixPerDegLatY, pixPerDegLonX)
maxDistDegApproxDeg := maxCalculationDistanceNmi / 60 // convert nmi to degrees
maxDistDegApproxDegSq := maxDistDegApproxDeg * maxDistDegApproxDeg
// PERF use pointers and basic iterators instead of range and assignment to dodge duffcopy
for i := 0; i < len(gridPoints); i++ {
var c = &gridPoints[i]
maxWindSpeedAtCoordinate := 0.0
// PERF use a less accurate, simpler max distance check - this is huge
checkDistDegSq := utilities.FastDistanceDegSq(latYDeg, lonXDeg, c.latYDeg, c.lonXDeg) // get the square of the distance to check against
if checkDistDegSq < maxDistDegApproxDegSq {
distanceToCenterNmi := utilities.HaversineDegreesToMeters(latYDeg, lonXDeg, c.latYDeg, c.lonXDeg) / 1000.0 * 0.539957 // convert to nautical miles
bearingFromCenter := utilities.CalcBearingNorthZero(latYDeg, lonXDeg, c.latYDeg, c.lonXDeg)
windSpeed, _ := CalcWindSpeedFrame(maxWindVelocityKts, distanceToCenterNmi, radiusMaxWindNmi, cycloneForwardSpeedKts, bearingFromCenter, cycloneHeadingDeg, gradientWindAdjustmentFactor)
maxWindSpeedAtCoordinate = math.Max(maxWindSpeedAtCoordinate, windSpeed)
}
windField = append(windField, CoordinateValue{
LatYDeg: c.latYDeg,
LonXDeg: c.lonXDeg,
Value: maxWindSpeedAtCoordinate,
})
}
return windField
}
func (ei EventInformation) CalculateEvent(pixPerDegLatY int, pixPerDegLonX int, maxCalculationDistanceNmi float64) (event CalculatedEvent) {
var windField []CoordinateValue
gridPoints := ei.Bounds.toPoints(pixPerDegLatY, pixPerDegLonX)
maxDistDegApproxDeg := maxCalculationDistanceNmi / 60 // convert nmi to degrees
maxDistDegApproxDegSq := maxDistDegApproxDeg * maxDistDegApproxDeg
// PERF use pointers and basic iterators instead of range and assignment to dodge duffcopy
for i := 0; i < len(gridPoints); i++ {
var c = &gridPoints[i]
maxWindSpeedAtCoordinate := 0.0
for tpj := 0; tpj < len(ei.Track); tpj++ {
var tp = &ei.Track[tpj]
// PERF use a less accurate, simpler max distance check - this is huge
checkDistDegSq := utilities.FastDistanceDegSq(tp.LatYDeg, tp.LonXDeg, c.latYDeg, c.lonXDeg) // get the square of the distance to check against
if checkDistDegSq < maxDistDegApproxDegSq {
distanceToCenterNmi := utilities.HaversineDegreesToMeters(tp.LatYDeg, tp.LonXDeg, c.latYDeg, c.lonXDeg) / 1000.0 * 0.539957 // convert to nautical miles
bearingFromCenter := utilities.CalcBearingNorthZero(tp.LatYDeg, tp.LonXDeg, c.latYDeg, c.lonXDeg)
windSpeed, _ := CalcWindSpeed(tp.MaxWindVelocityKts, distanceToCenterNmi, tp.RadiusMaxWindNmi, tp.CycloneForwardSpeedKts, bearingFromCenter, tp.CycloneHeadingDeg, tp.GradientWindAdjustmentFactor)
maxWindSpeedAtCoordinate = math.Max(maxWindSpeedAtCoordinate, windSpeed)
}
}
windField = append(windField, CoordinateValue{
LatYDeg: c.latYDeg,
LonXDeg: c.lonXDeg,
Value: maxWindSpeedAtCoordinate,
})
}
event = CalculatedEvent{
Info: ei,
WindField: windField,
MaxCalculationDistanceNmi: maxCalculationDistanceNmi,
PixPerDegreeLatY: pixPerDegLatY,
PixPerDegreeLonX: pixPerDegLonX,
}
return event
}
func (ce CalculatedEvent) TrackToDelimited(header bool) string {
outString := ""
if header {
outString = "ts, lonX, latY, maxWindKts, headingDeg, rMax, source, fSpeedKts\n"
}
for _, row := range ce.Info.Track {
rowString := fmt.Sprintf("%s, %f, %f, %f, %f, %f, %s, %f\n", row.Timestamp.Format(time.RFC3339), row.LonXDeg, row.LatYDeg, row.MaxWindVelocityKts, row.CycloneHeadingDeg, row.RadiusMaxWindNmi, row.Source, row.CycloneForwardSpeedKts)
outString = outString + rowString
}
return outString
} | hurricane/hurricane_event.go | 0.803906 | 0.509764 | hurricane_event.go | starcoder |
package date
import (
"sort"
)
// Range represents range between two dates.
type Range struct {
Start Date `json:"start"`
End Date `json:"end"`
}
// IsValid reports if r is valid range.
func (r Range) IsValid() bool {
return r.Start.IsValid() && r.End.IsValid()
}
// Empty returns true if Start equal End.
func (r Range) Empty() bool {
return r.Start.Equal(r.End)
}
// Contains reports whether d is within r.
func (r Range) Contains(d Date) bool {
return !d.Before(r.Start) && !d.After(r.End)
}
// Encloses returns true if the bounds of the inner range do not extend outside the bounds of the outer range.
func (r Range) Encloses(dr Range) bool {
return r.Contains(dr.Start) && r.Contains(dr.End)
}
// Intersects returns true if r/dr contains any of dr/r bounds.
func (r Range) Intersects(dr Range) bool {
return (r.Contains(dr.Start) || r.Contains(dr.End)) ||
(dr.Contains(r.Start) || dr.Contains(r.End))
}
// RangeSet used to works with date ranges list.
type RangeSet []Range
// Sub returns RangeSet after dr subtraction.
func (a RangeSet) Sub(dr ...Range) RangeSet {
busy := a.buildDictExcludeRanges(dr...)
dates := linearizeBusy(busy)
sort.Sort(ByAsc(dates))
list := buildRanges(dates)
return RangeSet(list)
}
// SubSet returns RangeSet list after set subtraction.
func (a RangeSet) SubSet(set RangeSet) RangeSet {
return a.Sub(set.List()...)
}
// Impose returns RangeSet after dr imposition.
// Range intersections will be merged:
// [10, 15] impose with [13, 22] got [10, 22]
// [10, 15] impose with [20, 25] got [10, 15], [20, 25]
func (a RangeSet) Impose(dr ...Range) RangeSet {
busy := a.buildDictIncludeRanges(dr...)
dates := linearizeBusy(busy)
sort.Sort(ByAsc(dates))
list := buildRanges(dates)
return RangeSet(list)
}
// ImposeSet returns ImposeSet list after set imposition.
func (a RangeSet) ImposeSet(set RangeSet) RangeSet {
return a.Impose(set.List()...)
}
// TrimEnd shifts end date a day earlier.
func (a RangeSet) TrimEnd() RangeSet {
return a.ShiftEnd(-1)
}
// ExtendEnd shifts end date a day later.
func (a RangeSet) ExtendEnd() RangeSet {
return a.ShiftEnd(1)
}
// ShiftEnd shifts end date to n days.
// n can also be negative to go into the past.
func (a RangeSet) ShiftEnd(n int) RangeSet {
for i := 0; i < len(a); i++ {
a[i].End = a[i].End.AddDays(n)
}
return a
}
// RangeTest used to filter RangeSet.
type RangeTest func(Range) bool
// RangeNotEmpty tests that Range is not empty.
func RangeNotEmpty(dr Range) bool {
return !dr.Empty()
}
// Filter returns RangeSet with all elements that pass the test.
func (a RangeSet) Filter(test RangeTest) RangeSet {
res := RangeSet{}
for i := 0; i < len(a); i++ {
if test(a[i]) {
res = append(res, a[i])
}
}
return res
}
// FilterEmpty returns RangeSet with not empty Ranges from a.
func (a RangeSet) FilterEmpty() RangeSet {
return a.Filter(RangeNotEmpty)
}
// List returns list of Ranges in set.
func (a RangeSet) List() []Range {
return []Range(a)
}
func (a RangeSet) buildBusyDict() map[Date]bool {
busy := map[Date]bool{}
for _, dr := range a {
days := dr.End.DaysSince(dr.Start)
for i := 0; i <= days; i++ {
busy[dr.Start.AddDays(i)] = true
}
}
return busy
}
func (a RangeSet) buildDictExcludeRanges(drs ...Range) map[Date]bool {
busy := a.buildBusyDict()
for _, dr := range drs {
days := dr.End.DaysSince(dr.Start)
for i := 0; i <= days; i++ {
busy[dr.Start.AddDays(i)] = false
}
}
return busy
}
func (a RangeSet) buildDictIncludeRanges(drs ...Range) map[Date]bool {
busy := a.buildBusyDict()
for _, dr := range drs {
days := dr.End.DaysSince(dr.Start)
for i := 0; i <= days; i++ {
busy[dr.Start.AddDays(i)] = true
}
}
return busy
}
func linearizeBusy(busy map[Date]bool) []Date {
res := []Date{}
for d, busy := range busy {
if busy {
res = append(res, d)
}
}
return res
}
func buildRanges(dates []Date) []Range {
N := len(dates)
if N == 0 {
return []Range{}
}
ranges := []Range{}
start := dates[0]
for i := 1; i < N; i++ {
if dates[i].DaysSince(dates[i-1]) == 1 {
continue
}
ranges = append(ranges, Range{start, dates[i-1]})
start = dates[i]
}
ranges = append(ranges, Range{start, dates[N-1]})
return ranges
} | range.go | 0.890425 | 0.448426 | range.go | starcoder |
package geojson
import "github.com/tidwall/geojson/geometry"
// Rect ...
type Rect struct {
base geometry.Rect
}
// NewRect ...
func NewRect(rect geometry.Rect) *Rect {
return &Rect{base: rect}
}
// ForEach ...
func (g *Rect) ForEach(iter func(geom Object) bool) bool {
return iter(g)
}
// Empty ...
func (g *Rect) Empty() bool {
return g.base.Empty()
}
// Rect ...
func (g *Rect) Rect() geometry.Rect {
return g.base
}
// Base ...
func (g *Rect) Base() geometry.Rect {
return g.base
}
// Center ...
func (g *Rect) Center() geometry.Point {
return g.base.Center()
}
// AppendJSON ...
func (g *Rect) AppendJSON(dst []byte) []byte {
var gPoly Polygon
gPoly.base.Exterior = g.base
return gPoly.AppendJSON(dst)
}
// JSON ...
func (g *Rect) JSON() string {
return string(g.AppendJSON(nil))
}
// String ...
func (g *Rect) String() string {
return string(g.AppendJSON(nil))
}
// Contains ...
func (g *Rect) Contains(obj Object) bool {
return obj.Spatial().WithinRect(g.base)
}
// Within ...
func (g *Rect) Within(obj Object) bool {
return obj.Contains(g)
}
// WithinRect ...
func (g *Rect) WithinRect(rect geometry.Rect) bool {
return rect.ContainsRect(g.base)
}
// WithinPoint ...
func (g *Rect) WithinPoint(point geometry.Point) bool {
return point.ContainsRect(g.base)
}
// WithinLine ...
func (g *Rect) WithinLine(line *geometry.Line) bool {
return line.ContainsRect(g.base)
}
// WithinPoly ...
func (g *Rect) WithinPoly(poly *geometry.Poly) bool {
return poly.ContainsRect(g.base)
}
// Intersects ...
func (g *Rect) Intersects(obj Object) bool {
return obj.Spatial().IntersectsRect(g.base)
}
// IntersectsPoint ...
func (g *Rect) IntersectsPoint(point geometry.Point) bool {
return g.base.IntersectsPoint(point)
}
// IntersectsRect ...
func (g *Rect) IntersectsRect(rect geometry.Rect) bool {
return g.base.IntersectsRect(rect)
}
// IntersectsLine ...
func (g *Rect) IntersectsLine(line *geometry.Line) bool {
return g.base.IntersectsLine(line)
}
// IntersectsPoly ...
func (g *Rect) IntersectsPoly(poly *geometry.Poly) bool {
return g.base.IntersectsPoly(poly)
}
// NumPoints ...
func (g *Rect) NumPoints() int {
return 2
}
// Spatial ...
func (g *Rect) Spatial() Spatial {
return g
}
// Distance ...
func (g *Rect) Distance(obj Object) float64 {
return obj.Spatial().DistanceRect(g.base)
}
// DistancePoint ...
func (g *Rect) DistancePoint(point geometry.Point) float64 {
return geoDistancePoints(g.Center(), point)
}
// DistanceRect ...
func (g *Rect) DistanceRect(rect geometry.Rect) float64 {
return geoDistancePoints(g.Center(), rect.Center())
}
// DistanceLine ...
func (g *Rect) DistanceLine(line *geometry.Line) float64 {
return geoDistancePoints(g.Center(), line.Rect().Center())
}
// DistancePoly ...
func (g *Rect) DistancePoly(poly *geometry.Poly) float64 {
return geoDistancePoints(g.Center(), poly.Rect().Center())
} | vendor/github.com/tidwall/geojson/rect.go | 0.866062 | 0.5888 | rect.go | starcoder |
package main
import "math"
const EARTH_RADIUS float64 = 6370.856 //km 地球半径 平均值,千米
//地球半径
const EARTH_R float64 = 6378245.0
func HaverSin(theta float64) float64 {
v := math.Sin(theta / 2)
return v * v
}
func Distance(lat1, lon1, lat2, lon2 float64) float64 {
//用haversine公式计算球面两点间的距离。
//经纬度转换成弧度
lat1 = ConvertDegreesToRadians(lat1)
lon1 = ConvertDegreesToRadians(lon1)
lat2 = ConvertDegreesToRadians(lat2)
lon2 = ConvertDegreesToRadians(lon2)
//差值
var vLon float64 = math.Abs(lon1 - lon2)
var vLat float64 = math.Abs(lat1 - lat2)
//h is the great circle distance in radians, great circle就是一个球体上的切面,它的圆心即是球心的一个周长最大的圆。
var h float64 = HaverSin(vLat) + math.Cos(lat1)*math.Cos(lat2)*HaverSin(vLon)
var distance float64 = 2 * EARTH_RADIUS * math.Asin(math.Sqrt(h))
return distance
}
/**
* 计算两点之间的角度
*
* @param pntFirst
* @param pntNext
* @return
*/
func GetAngle(lat1, lon1, lat2, lon2 float64) float64 {
var dRotateAngle float64 = math.Atan2(math.Abs(lon1-lon2), math.Abs(lat1-lat2))
if lon2 >= lon1 {
if lat2 >= lat1 {
} else {
dRotateAngle = math.Pi - dRotateAngle
}
} else {
if lat2 >= lat1 {
dRotateAngle = 2*math.Pi - dRotateAngle
} else {
dRotateAngle = math.Pi + dRotateAngle
}
}
dRotateAngle = dRotateAngle * 180 / math.Pi
return dRotateAngle
}
func ConvertDegreesToRadians(degrees float64) float64 {
return degrees * math.Pi / 180.0
}
func ConvertRadiansToDegrees(radian float64) float64 {
return radian * 180.0 / math.Pi
}
const ee float64 = 0.00669342162296594323
const XPI float64 = math.Pi * 3000.0 / 180.0
//世界大地坐标转为百度坐标
func wgs2bd(lat, lon float64) (bdlat, bdlng float64) {
gcjlat, gcjlng := wgs2gcj(lat, lon)
bdlat, bdlng = gcj2bd(gcjlat, gcjlng)
return
}
func gcj2bd(lat, lon float64) (bdlat, bdlng float64) {
var x float64 = lon
var y float64 = lat
var z float64 = math.Sqrt(x*x+y*y) + 0.00002*math.Sin(y*XPI)
var theta float64 = math.Atan2(y, x) + 0.000003*math.Cos(x*XPI)
bdlng = z*math.Cos(theta) + 0.0065
bdlat = z*math.Sin(theta) + 0.006
return
}
func bd2gcj(lat, lon float64) (gcjlat, gcjlng float64) {
var x float64 = lon - 0.0065
var y float64 = lat - 0.006
var z float64 = math.Sqrt(x*x+y*y) - 0.00002*math.Sin(y*XPI)
var theta float64 = math.Atan2(y, x) - 0.000003*math.Cos(x*XPI)
gcjlng = z * math.Cos(theta)
gcjlat = z * math.Sin(theta)
return
}
func wgs2gcj(lat, lon float64) (gcjlat, gcjlng float64) {
var dLat float64 = transformLat(lon-105.0, lat-35.0)
var dLon float64 = transformLon(lon-105.0, lat-35.0)
var radLat float64 = lat / 180.0 * math.Pi
var magic float64 = math.Sin(radLat)
magic = 1 - ee*magic*magic
var sqrtMagic float64 = math.Sqrt(magic)
dLat = (dLat * 180.0) / ((EARTH_R * (1 - ee)) / (magic * sqrtMagic) * math.Pi)
dLon = (dLon * 180.0) / (EARTH_R / sqrtMagic * math.Cos(radLat) * math.Pi)
gcjlat = lat + dLat
gcjlng = lon + dLon
return
}
func transformLat(lat, lon float64) float64 {
var ret = -100.0 + 2.0*lat + 3.0*lon + 0.2*lon*lon + 0.1*lat*lon + 0.2*math.Sqrt(math.Abs(lat))
ret += (20.0*math.Sin(6.0*lat*math.Pi) + 20.0*math.Sin(2.0*lat*math.Pi)) * 2.0 / 3.0
ret += (20.0*math.Sin(lon*math.Pi) + 40.0*math.Sin(lon/3.0*math.Pi)) * 2.0 / 3.0
ret += (160.0*math.Sin(lon/12.0*math.Pi) + 320*math.Sin(lon*math.Pi/30.0)) * 2.0 / 3.0
return ret
}
func transformLon(lat, lon float64) float64 {
var ret = 300.0 + lat + 2.0*lon + 0.1*lat*lat + 0.1*lat*lon + 0.1*math.Sqrt(math.Abs(lat))
ret += (20.0*math.Sin(6.0*lat*math.Pi) + 20.0*math.Sin(2.0*lat*math.Pi)) * 2.0 / 3.0
ret += (20.0*math.Sin(lat*math.Pi) + 40.0*math.Sin(lat/3.0*math.Pi)) * 2.0 / 3.0
ret += (150.0*math.Sin(lat/12.0*math.Pi) + 300.0*math.Sin(lat/30.0*math.Pi)) * 2.0 / 3.0
return ret
} | gps.go | 0.657978 | 0.643525 | gps.go | starcoder |
package types
// taken from https://www.enterpriseready.io/features/audit-log/
/*
When an event is logged it should have details that provide enough information about the event to provide the necessary context of who, what, when and where etc. Specifically, the follow fields are critical to an audit log:
Actor - The username, uuid, API token name of the account taking the action.
Group - The group (aka organization, team, account) that the actor is a member of (needed to show admins the full history of their group).
Where - IP address, device ID, country.
When - The NTP synced server time when the event happened.
Target - the object or underlying resource that is being changed (the noun) as well as the fields that include a key value for the new state of the target.
Action - the way in which the object was changed (the verb).
Action Type - the corresponding C``R``U``D category.
Event Name - Common name for the event that can be used to filter down to similar events.
Description - A human readable description of the action taken, sometimes includes links to other pages within the application.
Optional information
Server server ids or names, server location.
Version version of the code that is sending the events.
Protocols ie http vs https.
Global Actor ID if a customer is using Single Sign On, it might be important to also include a Global UID if it differs from the application specific ID.
*/
type AuditLog struct {
Actor string `auditdb:"index" json:"actor"` // The username, uuid, API token name of the account taking the action.
ActorType string `auditdb:"index" json:"actor_type"`
Group string `auditdb:"index" json:"group"` // The group (aka organization, team, account) that the actor is a member of (needed to show admins the full history of their group).
Where string `auditdb:"index" json:"where"` // IP address, device ID, country.
WhereType string `auditdb:"index" json:"where_type"`
When string `json:"when"` // The NTP synced RFC3339 server time when the event happened.
Target string `auditdb:"index" json:"target"` // The object or underlying resource that is being changed (the noun) as well as the fields that include a key value for the new state of the target.
TargetID string `auditdb:"index" json:"target_id"` // The ID (optional) of the target
Action string `auditdb:"index" json:"action"` // The way in which the object was changed (the verb).
ActionType string `auditdb:"index" json:"action_type"`
Name string `auditdb:"index" json:"name"` // Common name for the event that can be used to filter down to similar events.
Description string `json:"description"` // A human readable description of the action taken, sometimes includes links to other pages within the application.
Metadata map[string]string `json:"metadata"`
TS int64 `json:"-"` // timestamp, used only for sorting
}
func (a AuditLog) Indexes() (map[string]string, map[string]interface{}) {
return getIndexes(a)
} | types/audit-log.go | 0.589126 | 0.466663 | audit-log.go | starcoder |
package opeth
import (
"math/rand"
"io/ioutil"
"strings"
"github.com/Krognol/dgofw"
)
type Opeth struct {
lines []string
g *Generator
}
func NewOpethPlugin() *Opeth {
b, err := ioutil.ReadFile("./opeth_record.txt")
if err != nil {
return nil
}
plugin := &Opeth{lines: strings.Split(string(b), "\n"), g: CreateGenerator(1, 500)}
for _, line := range plugin.lines {
plugin.g.AddSeeds(line)
}
return plugin
}
func (o *Opeth) OnMessage(m *dgofw.DiscordMessage) {
m.Reply(o.g.GenerateText())
}
// Since both maps (the prefix -> suffix and canonical -> representation)
// operate about the same way, we abstract their representation into a notion
// of CountedStrings, where the values of the map contain both the string we
// care about and a count of how often it occurs.
type CountedString struct {
hits int
str string
}
// A CountedStringList is a list of all the CountedStrings for a given prefix,
// and a total number of times that prefix occurs (necessary, with the
// CountedString hits, for probability calculation).
type CountedStringList struct {
slice []*CountedString
total int
}
// Map from a prefix in canonical form to CountedStringLists, where one will
// move canonical prefixes to suffixes, and another to words -> representation.
type CountedStringMap map[string]*CountedStringList
// Generators gives us all we need to build a fresh data model to generate
// from.
type Generator struct {
PrefixLen int
CharLimit int
Data CountedStringMap // suffix map
Reps CountedStringMap // representation map
Beginnings []string // acceptable ways to start a tweet.
}
// CreateGenerator returns a Generator that is fully initialized and ready for
// use.
func CreateGenerator(prefixLen int, charLimit int) *Generator {
markov := make(CountedStringMap)
reps := make(CountedStringMap)
beginnings := []string{}
return &Generator{prefixLen, charLimit, markov, reps, beginnings}
}
// Convenience method, already populating the first "hit" of the CountedString.
func createCountedString(str string) *CountedString {
return &CountedString{1, str}
}
// AddSeeds takes in a string, breaks it into prefixes, and adds it to the
// data model.
func (g *Generator) AddSeeds(input string) {
source := tokenize(input)
first := true
for len(source) > g.PrefixLen {
prefix := strings.Join(source[0:g.PrefixLen], " ")
AddToMap(prefix, source[g.PrefixLen], g.Data)
source = source[1:]
if first {
g.Beginnings = append(g.Beginnings, prefix)
first = false
}
}
}
// Add to map checks if the key/value pair exists in the map. If not, we create
// them, and if so, we either increment the counter on the value or initialize
// it if it didn't exist previously.
func AddToMap(prefix, toAdd string, aMap CountedStringMap) {
if csList, exists := aMap[prefix]; exists {
if countedStr, member := csList.hasCountedString(toAdd); member {
countedStr.hits++
} else {
countedStr = createCountedString(toAdd)
csList.slice = append(csList.slice, countedStr)
}
csList.total++
} else {
countedStr := createCountedString(toAdd)
countedStrSlice := make([]*CountedString, 0)
countedStrSlice = append(countedStrSlice, countedStr)
csList := &CountedStringList{countedStrSlice, 1}
aMap[prefix] = csList
}
}
// tokenize splits the input string into "words" we use as prefixes and
// suffixes. We can't do a naive 'split' by a separator, or even a regex '\W'
// due to corner cases, and the nature of the text we intend to capture: e.g.
// we'd like "forty5" to parse as such, rather than "forty" with "5" being
// interpreted as a "non-word" character. Similarly with hashtags, etc.
func tokenize(input string) []string {
return strings.Split(input, " ")
}
// hasCountedString searches a CountedStringList for one that contains the string, and
// returns the suffix (if applicable) and a boolean describing whether or not
// we found it.
func (l CountedStringList) hasCountedString(lookFor string) (*CountedString, bool) {
slice := l.slice
for i := 0; i < len(slice); i++ {
curr := slice[i]
if curr.str == lookFor {
return curr, true
}
}
return createCountedString(""), false
}
// Generates text from the given generator. It stops when the character limit
// has run out, or it encounters a prefix it has no suffixes for.
func (g *Generator) GenerateText() string {
return g.GenerateFromPrefix(g.randomPrefix())
}
// We expose this version primarily for testing.
func (g *Generator) GenerateFromPrefix(prefix string) string {
// Representation gets a special case, since you can have a multi-word
// prefix (e.g. "Paul is") but each word needs it's own representation
// (e.g. "PAUL" "is" or "pAUL" "Is"). Note that this can break if your
// prefix's rep is longer than the charLimit, should we generalize
var result []string
charLimit := g.CharLimit
result = append(result, prefix)
charLimit -= len(prefix)
for {
word, shouldTerminate, newPrefix, newCharLimit := g.popNextWord(prefix, charLimit)
prefix = newPrefix
charLimit = newCharLimit
if shouldTerminate {
break
} else {
result = append(result, word)
}
}
return strings.Join(result, " ")
}
func (g *Generator) popNextWord(prefix string, limit int) (string, bool, string, int) {
csList, exists := g.Data[prefix]
if !exists {
return "", true, "", 0 // terminate path
}
successor := csList.DrawProbabilistically()
var rep string
rep = successor
addsTo := len(rep) + 1
if addsTo <= limit {
shifted := append(strings.Split(prefix, " ")[1:], rep)
newPrefix := strings.Join(shifted, " ")
newLimit := limit - addsTo
return rep, false, newPrefix, newLimit
}
return "", true, "", 0
}
func (cs CountedStringList) DrawProbabilistically() string {
index := rand.Intn(cs.total) + 1
for i := 0; i < len(cs.slice); i++ {
if index <= cs.slice[i].hits {
return cs.slice[i].str
}
index -= cs.slice[i].hits
}
return ""
}
func (g *Generator) randomPrefix() string {
index := rand.Intn(len(g.Beginnings))
return g.Beginnings[index]
}
// For testing.
func (s *CountedStringList) GetSuffix(lookFor string) (*CountedString, bool) {
for i := 0; i < len(s.slice); i++ {
if s.slice[i].str == lookFor {
return s.slice[i], true
}
}
return createCountedString(""), false
} | plugins/opeth/opeth.go | 0.690142 | 0.506897 | opeth.go | starcoder |
package types
import (
"fmt"
"math"
"strconv"
"strings"
"github.com/juju/errors"
"github.com/pingcap/tidb/sessionctx/variable"
)
// Range is the interface of the three type of range.
type Range interface {
fmt.Stringer
Convert2IntRange() IntColumnRange
Convert2ColumnRange() *ColumnRange
Convert2IndexRange() *IndexRange
}
// IntColumnRange represents a range for a integer column, both low and high are inclusive.
type IntColumnRange struct {
LowVal int64
HighVal int64
}
// IsPoint returns if the table range is a point.
func (tr *IntColumnRange) IsPoint() bool {
return tr.HighVal == tr.LowVal
}
func (tr IntColumnRange) String() string {
var l, r string
if tr.LowVal == math.MinInt64 {
l = "(-inf"
} else {
l = "[" + strconv.FormatInt(tr.LowVal, 10)
}
if tr.HighVal == math.MaxInt64 {
r = "+inf)"
} else if tr.HighVal == math.MinInt64 {
// This branch is for nil
r = "-inf)"
} else {
r = strconv.FormatInt(tr.HighVal, 10) + "]"
}
return l + "," + r
}
// Convert2IntRange implements the Convert2IntRange interface.
func (tr IntColumnRange) Convert2IntRange() IntColumnRange {
return tr
}
// Convert2ColumnRange implements the Convert2ColumnRange interface.
func (tr IntColumnRange) Convert2ColumnRange() *ColumnRange {
panic("you shouldn't call this method.")
}
// Convert2IndexRange implements the Convert2IndexRange interface.
func (tr IntColumnRange) Convert2IndexRange() *IndexRange {
panic("you shouldn't call this method.")
}
// ColumnRange represents a range for a column.
type ColumnRange struct {
Low Datum
High Datum
LowExcl bool
HighExcl bool
}
func (cr *ColumnRange) String() string {
var l, r string
if cr.LowExcl {
l = "("
} else {
l = "["
}
if cr.HighExcl {
r = ")"
} else {
r = "]"
}
return l + formatDatum(cr.Low) + "," + formatDatum(cr.High) + r
}
// Convert2IntRange implements the Convert2IntRange interface.
func (cr *ColumnRange) Convert2IntRange() IntColumnRange {
panic("you shouldn't call this method.")
}
// Convert2ColumnRange implements the Convert2ColumnRange interface.
func (cr *ColumnRange) Convert2ColumnRange() *ColumnRange {
return cr
}
// Convert2IndexRange implements the Convert2IndexRange interface.
func (cr *ColumnRange) Convert2IndexRange() *IndexRange {
panic("you shouldn't call this method.")
}
// IndexRange represents a range for an index.
type IndexRange struct {
LowVal []Datum
HighVal []Datum
LowExclude bool // Low value is exclusive.
HighExclude bool // High value is exclusive.
}
// IsPoint returns if the index range is a point.
func (ir *IndexRange) IsPoint(sc *variable.StatementContext) bool {
if len(ir.LowVal) != len(ir.HighVal) {
return false
}
for i := range ir.LowVal {
a := ir.LowVal[i]
b := ir.HighVal[i]
if a.Kind() == KindMinNotNull || b.Kind() == KindMaxValue {
return false
}
cmp, err := a.CompareDatum(sc, b)
if err != nil {
return false
}
if cmp != 0 {
return false
}
}
return !ir.LowExclude && !ir.HighExclude
}
// Convert2IndexRange implements the Convert2IndexRange interface.
func (ir *IndexRange) String() string {
lowStrs := make([]string, 0, len(ir.LowVal))
for _, d := range ir.LowVal {
lowStrs = append(lowStrs, formatDatum(d))
}
highStrs := make([]string, 0, len(ir.LowVal))
for _, d := range ir.HighVal {
highStrs = append(highStrs, formatDatum(d))
}
l, r := "[", "]"
if ir.LowExclude {
l = "("
}
if ir.HighExclude {
r = ")"
}
return l + strings.Join(lowStrs, " ") + "," + strings.Join(highStrs, " ") + r
}
// Convert2IntRange implements the Convert2IntRange interface.
func (ir *IndexRange) Convert2IntRange() IntColumnRange {
panic("you shouldn't call this method.")
}
// Convert2ColumnRange implements the Convert2ColumnRange interface.
func (ir *IndexRange) Convert2ColumnRange() *ColumnRange {
panic("you shouldn't call this method.")
}
// Convert2IndexRange implements the Convert2IndexRange interface.
func (ir *IndexRange) Convert2IndexRange() *IndexRange {
return ir
}
// Align appends low value and high value up to the number of columns with max value, min not null value or null value.
func (ir *IndexRange) Align(numColumns int) {
for i := len(ir.LowVal); i < numColumns; i++ {
if ir.LowExclude {
ir.LowVal = append(ir.LowVal, MaxValueDatum())
} else {
ir.LowVal = append(ir.LowVal, Datum{})
}
}
for i := len(ir.HighVal); i < numColumns; i++ {
if ir.HighExclude {
ir.HighVal = append(ir.HighVal, Datum{})
} else {
ir.HighVal = append(ir.HighVal, MaxValueDatum())
}
}
}
// PrefixEqualLen tells you how long the prefix of the range is a point.
// e.g. If this range is (1 2 3, 1 2 +inf), then the return value is 2.
func (ir *IndexRange) PrefixEqualLen(sc *variable.StatementContext) (int, error) {
// Here, len(ir.LowVal) always equal to len(ir.HighVal)
for i := 0; i < len(ir.LowVal); i++ {
cmp, err := ir.LowVal[i].CompareDatum(sc, ir.HighVal[i])
if err != nil {
return 0, errors.Trace(err)
}
if cmp != 0 {
return i, nil
}
}
return len(ir.LowVal), nil
}
func formatDatum(d Datum) string {
if d.Kind() == KindMinNotNull {
return "-inf"
}
if d.Kind() == KindMaxValue {
return "+inf"
}
return fmt.Sprintf("%v", d.GetValue())
} | util/types/range.go | 0.630685 | 0.404802 | range.go | starcoder |
package creational
import (
"bytes"
"encoding/gob"
)
/*
Summary:
Prototype pattern is used to create copies of objects.
Self clone: Actual objects have the responsiblity to make their own clone.
The object to be cloned exposes Clone() methods.
DeepCopy:
DeepCopy() is used in Golang to copy objects with actual data
without referrence copy. Two approaches to deepcopy:
1. DeepCopy using function: Use function to copy referrences as fresh object.
Downside is we need to make many functions for a large nested objects.
2. DeepCopy through searialization: Use serialization. Serialize an object
and deserialize it another object. Saves developer from writing a lot of code.
Example:
The autoscaler controller wants to work on a copy of the object, make changes
to it and then call the API to update the object status. We do not want to
create a new object as it is expensive. DeeCopy is used.
1. Complex objects can require nested copies and many deep copies, it helps
to use this pattern.
2. Objects private variables can only be performed by the same object. So self
clone is also a necessity.
3. Copied object are better than creating objects from scratch. This saves
us from heavy computations which may be required when created from scratch.
*/
type CopyMode int
const (
Serialization CopyMode = iota
Function
)
type AutoScaler struct {
Spec *AutoScalerSpec
Status *AutoScalerStatus
Mode CopyMode
}
type AutoScalerSpec struct {
Min *int
Max *int
Target *float32
}
type AutoScalerStatus struct {
Current int
Desired int
Available int
}
func NewAutoScaler(min, max int, target float32) *AutoScaler {
return &AutoScaler{
Spec: &AutoScalerSpec{
Min: &min,
Max: &max,
Target: &target,
},
Status: &AutoScalerStatus{},
Mode: Serialization,
}
}
func DeepCopyWithSerialization(a *AutoScaler) AutoScaler {
buf := bytes.Buffer{}
encode := gob.NewEncoder(&buf) // buf implements io.Writer's Write()
encode.Encode(a) // error not handled for simplicity
var copy AutoScaler
decoder := gob.NewDecoder(&buf) // buf implements io.Reader's Read()
decoder.Decode(©)
return copy
}
func (a *AutoScaler) UpdateStatus() AutoScaler {
var copy AutoScaler
switch a.Mode {
case Serialization:
copy = DeepCopyWithSerialization(a)
// case Function: not implemented as serialization is better
}
copy.Status.Current = 27
copy.Status.Desired = 27
copy.Status.Available = 27
return copy
} | creational/prototype.go | 0.723993 | 0.409693 | prototype.go | starcoder |
package edwards25519
import (
"crypto/cipher"
"encoding/hex"
"errors"
"io"
"go.dedis.ch/kyber/v3"
"go.dedis.ch/kyber/v3/group/internal/marshalling"
)
var marshalPointID = [8]byte{'e', 'd', '.', 'p', 'o', 'i', 'n', 't'}
type point struct {
ge extendedGroupElement
varTime bool
curve *Curve
}
func (P *point) Group() kyber.Group {
return P.curve
}
func (P *point) String() string {
var b [32]byte
P.ge.ToBytes(&b)
return hex.EncodeToString(b[:])
}
func (P *point) MarshalSize() int {
return 32
}
func (P *point) MarshalBinary() ([]byte, error) {
var b [32]byte
P.ge.ToBytes(&b)
return b[:], nil
}
// MarshalID returns the type tag used in encoding/decoding
func (P *point) MarshalID() [8]byte {
return marshalPointID
}
func (P *point) UnmarshalBinary(b []byte) error {
if !P.ge.FromBytes(b) {
return errors.New("invalid Ed25519 curve point")
}
return nil
}
func (P *point) MarshalTo(w io.Writer) (int, error) {
return marshalling.PointMarshalTo(P, w)
}
func (P *point) UnmarshalFrom(r io.Reader) (int, error) {
return marshalling.PointUnmarshalFrom(P, r)
}
// Equality test for two Points on the same curve
func (P *point) Equal(P2 kyber.Point) bool {
var b1, b2 [32]byte
P.ge.ToBytes(&b1)
P2.(*point).ge.ToBytes(&b2)
for i := range b1 {
if b1[i] != b2[i] {
return false
}
}
return true
}
// Set point to be equal to P2.
func (P *point) Set(P2 kyber.Point) kyber.Point {
P.ge = P2.(*point).ge
return P
}
// Set point to be equal to P2.
func (P *point) Clone() kyber.Point {
return &point{ge: P.ge}
}
// Set to the neutral element, which is (0,1) for twisted Edwards curves.
func (P *point) Null() kyber.Point {
P.ge.Zero()
return P
}
// Set to the standard base point for this curve
func (P *point) Base() kyber.Point {
P.ge = baseext
return P
}
func (P *point) EmbedLen() int {
// Reserve the most-significant 8 bits for pseudo-randomness.
// Reserve the least-significant 8 bits for embedded data length.
// (Hopefully it's unlikely we'll need >=2048-bit curves soon.)
return (255 - 8 - 8) / 8
}
func (P *point) Embed(data []byte, rand cipher.Stream) kyber.Point {
// How many bytes to embed?
dl := P.EmbedLen()
if dl > len(data) {
dl = len(data)
}
for {
// Pick a random point, with optional embedded data
var b [32]byte
rand.XORKeyStream(b[:], b[:])
if data != nil {
b[0] = byte(dl) // Encode length in low 8 bits
copy(b[1:1+dl], data) // Copy in data to embed
}
if !P.ge.FromBytes(b[:]) { // Try to decode
continue // invalid point, retry
}
// If we're using the full group,
// we just need any point on the curve, so we're done.
// if c.full {
// return P,data[dl:]
// }
// We're using the prime-order subgroup,
// so we need to make sure the point is in that subencoding.
// If we're not trying to embed data,
// we can convert our point into one in the subgroup
// simply by multiplying it by the cofactor.
if data == nil {
P.Mul(cofactorScalar, P) // multiply by cofactor
if P.Equal(nullPoint) {
continue // unlucky; try again
}
return P // success
}
// Since we need the point's y-coordinate to hold our data,
// we must simply check if the point is in the subgroup
// and retry point generation until it is.
var Q point
Q.Mul(primeOrderScalar, P)
if Q.Equal(nullPoint) {
return P // success
}
// Keep trying...
}
}
func (P *point) Pick(rand cipher.Stream) kyber.Point {
return P.Embed(nil, rand)
}
// Extract embedded data from a point group element
func (P *point) Data() ([]byte, error) {
var b [32]byte
P.ge.ToBytes(&b)
dl := int(b[0]) // extract length byte
if dl > P.EmbedLen() {
return nil, errors.New("invalid embedded data length")
}
return b[1 : 1+dl], nil
}
func (P *point) Add(P1, P2 kyber.Point) kyber.Point {
E1 := P1.(*point)
E2 := P2.(*point)
var t2 cachedGroupElement
var r completedGroupElement
E2.ge.ToCached(&t2)
r.Add(&E1.ge, &t2)
r.ToExtended(&P.ge)
return P
}
func (P *point) Sub(P1, P2 kyber.Point) kyber.Point {
E1 := P1.(*point)
E2 := P2.(*point)
var t2 cachedGroupElement
var r completedGroupElement
E2.ge.ToCached(&t2)
r.Sub(&E1.ge, &t2)
r.ToExtended(&P.ge)
return P
}
// Neg finds the negative of point A.
// For Edwards curves, the negative of (x,y) is (-x,y).
func (P *point) Neg(A kyber.Point) kyber.Point {
P.ge.Neg(&A.(*point).ge)
return P
}
// Mul multiplies point p by scalar s using the repeated doubling method.
func (P *point) Mul(s kyber.Scalar, A kyber.Point) kyber.Point {
a := &s.(*scalar).v
if A == nil {
geScalarMultBase(&P.ge, a)
} else {
if P.varTime {
geScalarMultVartime(&P.ge, a, &A.(*point).ge)
} else {
geScalarMult(&P.ge, a, &A.(*point).ge)
}
}
return P
} | vendor/go.dedis.ch/kyber/v3/group/edwards25519/point.go | 0.784732 | 0.400691 | point.go | starcoder |
package cryptoapis
import (
"encoding/json"
)
// GetBlockDetailsByBlockHeightRIBSL Litecoin
type GetBlockDetailsByBlockHeightRIBSL struct {
// Represents a mathematical value of how hard it is to find a valid hash for this block.
Difficulty string `json:"difficulty"`
// Represents a random value that can be adjusted to satisfy the Proof of Work.
Nonce string `json:"nonce"`
// Represents the total size of the block in Bytes.
Size int32 `json:"size"`
// Represents a specific sub-unit of Litecoin. Bits have two-decimal precision.
Bits string `json:"bits"`
// Represents a hexadecimal number of all the hashes necessary to produce the current chain. E.g., when converting 0000000000000000000000000000000000000000000086859f7a841475b236fd to a decimal you get 635262017308958427068157 hashes, or 635262 exahashes.
Chainwork string `json:"chainwork"`
// Defines the single and final (root) node of a Merkle tree. It is the combined hash of all transactions' hashes that are part of a blockchain block.
MerkleRoot string `json:"merkleRoot"`
// Defines the numeric representation of the block size excluding the witness data.
StrippedSize int32 `json:"strippedSize"`
// Represents the version of the specific block on the blockchain.
Version int32 `json:"version"`
// Is the hexadecimal string representation of the block's version.
VersionHex string `json:"versionHex"`
// Represents a measurement to compare the size of different transactions to each other in proportion to the block size limit.
Weight int32 `json:"weight"`
}
// NewGetBlockDetailsByBlockHeightRIBSL instantiates a new GetBlockDetailsByBlockHeightRIBSL object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewGetBlockDetailsByBlockHeightRIBSL(difficulty string, nonce string, size int32, bits string, chainwork string, merkleRoot string, strippedSize int32, version int32, versionHex string, weight int32) *GetBlockDetailsByBlockHeightRIBSL {
this := GetBlockDetailsByBlockHeightRIBSL{}
this.Difficulty = difficulty
this.Nonce = nonce
this.Size = size
this.Bits = bits
this.Chainwork = chainwork
this.MerkleRoot = merkleRoot
this.StrippedSize = strippedSize
this.Version = version
this.VersionHex = versionHex
this.Weight = weight
return &this
}
// NewGetBlockDetailsByBlockHeightRIBSLWithDefaults instantiates a new GetBlockDetailsByBlockHeightRIBSL object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewGetBlockDetailsByBlockHeightRIBSLWithDefaults() *GetBlockDetailsByBlockHeightRIBSL {
this := GetBlockDetailsByBlockHeightRIBSL{}
return &this
}
// GetDifficulty returns the Difficulty field value
func (o *GetBlockDetailsByBlockHeightRIBSL) GetDifficulty() string {
if o == nil {
var ret string
return ret
}
return o.Difficulty
}
// GetDifficultyOk returns a tuple with the Difficulty field value
// and a boolean to check if the value has been set.
func (o *GetBlockDetailsByBlockHeightRIBSL) GetDifficultyOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Difficulty, true
}
// SetDifficulty sets field value
func (o *GetBlockDetailsByBlockHeightRIBSL) SetDifficulty(v string) {
o.Difficulty = v
}
// GetNonce returns the Nonce field value
func (o *GetBlockDetailsByBlockHeightRIBSL) GetNonce() string {
if o == nil {
var ret string
return ret
}
return o.Nonce
}
// GetNonceOk returns a tuple with the Nonce field value
// and a boolean to check if the value has been set.
func (o *GetBlockDetailsByBlockHeightRIBSL) GetNonceOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Nonce, true
}
// SetNonce sets field value
func (o *GetBlockDetailsByBlockHeightRIBSL) SetNonce(v string) {
o.Nonce = v
}
// GetSize returns the Size field value
func (o *GetBlockDetailsByBlockHeightRIBSL) GetSize() int32 {
if o == nil {
var ret int32
return ret
}
return o.Size
}
// GetSizeOk returns a tuple with the Size field value
// and a boolean to check if the value has been set.
func (o *GetBlockDetailsByBlockHeightRIBSL) GetSizeOk() (*int32, bool) {
if o == nil {
return nil, false
}
return &o.Size, true
}
// SetSize sets field value
func (o *GetBlockDetailsByBlockHeightRIBSL) SetSize(v int32) {
o.Size = v
}
// GetBits returns the Bits field value
func (o *GetBlockDetailsByBlockHeightRIBSL) GetBits() string {
if o == nil {
var ret string
return ret
}
return o.Bits
}
// GetBitsOk returns a tuple with the Bits field value
// and a boolean to check if the value has been set.
func (o *GetBlockDetailsByBlockHeightRIBSL) GetBitsOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Bits, true
}
// SetBits sets field value
func (o *GetBlockDetailsByBlockHeightRIBSL) SetBits(v string) {
o.Bits = v
}
// GetChainwork returns the Chainwork field value
func (o *GetBlockDetailsByBlockHeightRIBSL) GetChainwork() string {
if o == nil {
var ret string
return ret
}
return o.Chainwork
}
// GetChainworkOk returns a tuple with the Chainwork field value
// and a boolean to check if the value has been set.
func (o *GetBlockDetailsByBlockHeightRIBSL) GetChainworkOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Chainwork, true
}
// SetChainwork sets field value
func (o *GetBlockDetailsByBlockHeightRIBSL) SetChainwork(v string) {
o.Chainwork = v
}
// GetMerkleRoot returns the MerkleRoot field value
func (o *GetBlockDetailsByBlockHeightRIBSL) GetMerkleRoot() string {
if o == nil {
var ret string
return ret
}
return o.MerkleRoot
}
// GetMerkleRootOk returns a tuple with the MerkleRoot field value
// and a boolean to check if the value has been set.
func (o *GetBlockDetailsByBlockHeightRIBSL) GetMerkleRootOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.MerkleRoot, true
}
// SetMerkleRoot sets field value
func (o *GetBlockDetailsByBlockHeightRIBSL) SetMerkleRoot(v string) {
o.MerkleRoot = v
}
// GetStrippedSize returns the StrippedSize field value
func (o *GetBlockDetailsByBlockHeightRIBSL) GetStrippedSize() int32 {
if o == nil {
var ret int32
return ret
}
return o.StrippedSize
}
// GetStrippedSizeOk returns a tuple with the StrippedSize field value
// and a boolean to check if the value has been set.
func (o *GetBlockDetailsByBlockHeightRIBSL) GetStrippedSizeOk() (*int32, bool) {
if o == nil {
return nil, false
}
return &o.StrippedSize, true
}
// SetStrippedSize sets field value
func (o *GetBlockDetailsByBlockHeightRIBSL) SetStrippedSize(v int32) {
o.StrippedSize = v
}
// GetVersion returns the Version field value
func (o *GetBlockDetailsByBlockHeightRIBSL) GetVersion() int32 {
if o == nil {
var ret int32
return ret
}
return o.Version
}
// GetVersionOk returns a tuple with the Version field value
// and a boolean to check if the value has been set.
func (o *GetBlockDetailsByBlockHeightRIBSL) GetVersionOk() (*int32, bool) {
if o == nil {
return nil, false
}
return &o.Version, true
}
// SetVersion sets field value
func (o *GetBlockDetailsByBlockHeightRIBSL) SetVersion(v int32) {
o.Version = v
}
// GetVersionHex returns the VersionHex field value
func (o *GetBlockDetailsByBlockHeightRIBSL) GetVersionHex() string {
if o == nil {
var ret string
return ret
}
return o.VersionHex
}
// GetVersionHexOk returns a tuple with the VersionHex field value
// and a boolean to check if the value has been set.
func (o *GetBlockDetailsByBlockHeightRIBSL) GetVersionHexOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.VersionHex, true
}
// SetVersionHex sets field value
func (o *GetBlockDetailsByBlockHeightRIBSL) SetVersionHex(v string) {
o.VersionHex = v
}
// GetWeight returns the Weight field value
func (o *GetBlockDetailsByBlockHeightRIBSL) GetWeight() int32 {
if o == nil {
var ret int32
return ret
}
return o.Weight
}
// GetWeightOk returns a tuple with the Weight field value
// and a boolean to check if the value has been set.
func (o *GetBlockDetailsByBlockHeightRIBSL) GetWeightOk() (*int32, bool) {
if o == nil {
return nil, false
}
return &o.Weight, true
}
// SetWeight sets field value
func (o *GetBlockDetailsByBlockHeightRIBSL) SetWeight(v int32) {
o.Weight = v
}
func (o GetBlockDetailsByBlockHeightRIBSL) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["difficulty"] = o.Difficulty
}
if true {
toSerialize["nonce"] = o.Nonce
}
if true {
toSerialize["size"] = o.Size
}
if true {
toSerialize["bits"] = o.Bits
}
if true {
toSerialize["chainwork"] = o.Chainwork
}
if true {
toSerialize["merkleRoot"] = o.MerkleRoot
}
if true {
toSerialize["strippedSize"] = o.StrippedSize
}
if true {
toSerialize["version"] = o.Version
}
if true {
toSerialize["versionHex"] = o.VersionHex
}
if true {
toSerialize["weight"] = o.Weight
}
return json.Marshal(toSerialize)
}
type NullableGetBlockDetailsByBlockHeightRIBSL struct {
value *GetBlockDetailsByBlockHeightRIBSL
isSet bool
}
func (v NullableGetBlockDetailsByBlockHeightRIBSL) Get() *GetBlockDetailsByBlockHeightRIBSL {
return v.value
}
func (v *NullableGetBlockDetailsByBlockHeightRIBSL) Set(val *GetBlockDetailsByBlockHeightRIBSL) {
v.value = val
v.isSet = true
}
func (v NullableGetBlockDetailsByBlockHeightRIBSL) IsSet() bool {
return v.isSet
}
func (v *NullableGetBlockDetailsByBlockHeightRIBSL) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableGetBlockDetailsByBlockHeightRIBSL(val *GetBlockDetailsByBlockHeightRIBSL) *NullableGetBlockDetailsByBlockHeightRIBSL {
return &NullableGetBlockDetailsByBlockHeightRIBSL{value: val, isSet: true}
}
func (v NullableGetBlockDetailsByBlockHeightRIBSL) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableGetBlockDetailsByBlockHeightRIBSL) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | model_get_block_details_by_block_height_ribsl.go | 0.835148 | 0.432483 | model_get_block_details_by_block_height_ribsl.go | starcoder |
package eif
import "math"
const (
numberOfTreesDefault = 10
)
func cFactor(n float64) float64 {
return 2.0*(math.Log(n-1)+0.5772156649) - (2.0 * (n - 1.) / (n * 1.0))
}
// Forest is a group of trees
type Forest struct {
trees []*Node
c float64
}
// Score calculates the anomaloussnes score (between 0.0 and 1.0) of a given point.
// Higher scores imply more anomaloussnes. Note that scores are affected by maximum tree depth.
func (f *Forest) Score(p []float64) float64 {
var totalDepth int
for _, t := range f.trees {
totalDepth += t.Depth(p)
}
avg := float64(totalDepth) / float64(len(f.trees))
return math.Pow(2, -avg/f.c)
}
// ForestParams is the set of metadata used to construct a forest
type forestParams struct {
numberOfTrees int
maxTreeDepth int
}
// ForestOpt is an option to adapt ForestParams
type ForestOpt func(*forestParams)
// WithTrees is a construction option, setting the amount of trees to generate
func WithTrees(n int) ForestOpt {
return func(f *forestParams) {
f.numberOfTrees = n
}
}
// WithMaxTreeDepth is a construction option, which determines how deep trees are constructed
// In general, running with smaller values is faster, but loses fidelity in anomaly scores
func WithMaxTreeDepth(n int) ForestOpt {
return func(f *forestParams) {
f.maxTreeDepth = n
}
}
// NewForest creates a new forest, with optional construction parameters.
// By default 10 trees are constructed, and a maximum depth determined by the expected depth
// of an unsuccesful binary tree search with the given data size.
func NewForest(data [][]float64, opts ...ForestOpt) *Forest {
sampleSize := float64(len(data))
params := forestParams{
numberOfTrees: numberOfTreesDefault,
maxTreeDepth: int(math.Ceil(math.Log2(sampleSize))),
}
for _, fn := range opts {
fn(¶ms)
}
trees := make([]*Node, params.numberOfTrees)
for i := range trees {
trees[i] = NewTree(data, params.maxTreeDepth)
}
return &Forest{
trees: trees,
c: cFactor(sampleSize),
}
} | forest.go | 0.841988 | 0.434521 | forest.go | starcoder |
package model
type moveStruct struct {
from int
to int
}
func (m *moveStruct) From() int {
return m.from
}
func (m *moveStruct) To() int {
return m.to
}
func (m *moveStruct) FromXY() (x int, y int) {
return IndexToXY(m.from)
}
func (m *moveStruct) ToXY() (x int, y int) {
return IndexToXY(m.to)
}
// IsValid 判断Move是否合法
func (m *moveStruct) IsValid(scene Scene) bool {
fromX, fromY := m.FromXY()
toX, toY := m.ToXY()
dx := toX - fromX
dy := toY - fromY
if dx < 0 {
dx = -dx
}
if dy < 0 {
dy = -dy
}
// 首先不能超出棋盘的范围
if !AllInRange(fromX, fromY, toX, toY) {
return false
}
fromType := scene.ChessList()[m.From()].Type()
toType := scene.ChessList()[m.To()].Type()
movingSide := scene.MovingSide()
// 起点必须是当前回合该走棋一方的棋子
if movingSide != fromType {
return false
}
// 移动到的终点,若是兵方,
if movingSide == ChessTypeSoldier {
// 必须是空格,
if toType != ChessTypeEmpty {
return false
}
// 且必须是移动1格。
if dx+dy != 1 {
return false
}
} else if movingSide == ChessTypeCannon {
// 炮方有两种情况
if toType == ChessTypeEmpty {
// 若是移动,必须是移动1格。
if dx+dy == 1 {
return true
}
}
// 吃的情况
if toType == ChessTypeSoldier {
// 必须是移动两步
if dx+dy == 2 && dx*dy == 0 {
// 且中间是空格
midX := (fromX + toX) / 2
midY := (fromY + toY) / 2
if scene.ChessList()[XyToIndex(midX, midY)].Type() == ChessTypeEmpty {
return true
}
}
}
return false
}
return true
}
type Move interface {
From() int
To() int
FromXY() (x int, y int)
ToXY() (x int, y int)
IsValid(scene Scene) bool
}
func NewMove(from int, to int) Move {
return &moveStruct{from, to}
}
func NewMoveByXY(fromX int, fromY int, toX int, toY int) Move {
return NewMove(XyToIndex(fromX, fromY), XyToIndex(toX, toY))
}
// IndexToXY 25内的序号转换为5x5纵横坐标。
func IndexToXY(index int) (x int, y int) {
x = index % 5
y = index / 5
return x, y
}
// XyToIndex 5x5纵横坐标转换为25内的序号。
func XyToIndex(x int, y int) int {
return x + y*5
}
// AllInRange 判断是否所有数值都在0..4的范围内,若全对,返回true。
func AllInRange(values ...int) bool {
for _, value := range values {
if value < 0 || value >= 5 {
return false
}
}
return true
}
func getAllAdjacentIndexes(index int) []int {
var result []int
x, y := IndexToXY(index)
checkAdjacent := func(x1, y1 int) {
if AllInRange(x1, y1) {
result = append(result, XyToIndex(x1, y1))
}
}
checkAdjacent(x-1, y)
checkAdjacent(x+1, y)
checkAdjacent(x, y-1)
checkAdjacent(x, y+1)
return result
} | Move.go | 0.503662 | 0.407923 | Move.go | starcoder |
package indicators
import (
"container/list"
"errors"
"github.com/thetruetrade/gotrade"
)
// A Simple Moving Average Indicator (Sma), no storage, for use in other indicators
type SmaWithoutStorage struct {
*baseIndicatorWithFloatBounds
// private variables
periodTotal float64
periodHistory *list.List
periodCounter int
timePeriod int
}
// NewSmaWithoutStorage creates a Simple Moving Average Indicator (Sma) without storage
func NewSmaWithoutStorage(timePeriod int, valueAvailableAction ValueAvailableActionFloat) (indicator *SmaWithoutStorage, err error) {
// an indicator without storage MUST have a value available action
if valueAvailableAction == nil {
return nil, ErrValueAvailableActionIsNil
}
// the minimum timeperiod for this indicator is 2
if timePeriod < 2 {
return nil, errors.New("timePeriod is less than the minimum (2)")
}
// check the maximum timeperiod
if timePeriod > MaximumLookbackPeriod {
return nil, errors.New("timePeriod is greater than the maximum (100000)")
}
lookback := timePeriod - 1
ind := SmaWithoutStorage{
baseIndicatorWithFloatBounds: newBaseIndicatorWithFloatBounds(lookback, valueAvailableAction),
periodCounter: timePeriod * -1,
periodHistory: list.New(),
timePeriod: timePeriod,
}
return &ind, nil
}
// A Simple Moving Average Indicator (Sma)
type Sma struct {
*SmaWithoutStorage
selectData gotrade.DOHLCVDataSelectionFunc
// public variables
Data []float64
}
// NewSma creates a Simple Moving Average Indicator (Sma) for online usage
func NewSma(timePeriod int, selectData gotrade.DOHLCVDataSelectionFunc) (indicator *Sma, err error) {
if selectData == nil {
return nil, ErrDOHLCVDataSelectFuncIsNil
}
ind := Sma{
selectData: selectData,
}
ind.SmaWithoutStorage, err = NewSmaWithoutStorage(
timePeriod,
func(dataItem float64, streamBarIndex int) {
ind.Data = append(ind.Data, dataItem)
})
return &ind, err
}
// NewDefaultSma creates a Simple Moving Average Indicator (Sma) for online usage with default parameters
// - timePeriod: 10
func NewDefaultSma() (indicator *Sma, err error) {
timePeriod := 10
return NewSma(timePeriod, gotrade.UseClosePrice)
}
// NewSmaWithSrcLen creates a Simple Moving Average Indicator (Sma) for offline usage
func NewSmaWithSrcLen(sourceLength uint, timePeriod int, selectData gotrade.DOHLCVDataSelectionFunc) (indicator *Sma, err error) {
ind, err := NewSma(timePeriod, selectData)
// only initialise the storage if there is enough source data to require it
if sourceLength-uint(ind.GetLookbackPeriod()) > 1 {
ind.Data = make([]float64, 0, sourceLength-uint(ind.GetLookbackPeriod()))
}
return ind, err
}
// NewDefaultSmaWithSrcLen creates a Simple Moving Average Indicator (Sma) for offline usage with default parameters
func NewDefaultSmaWithSrcLen(sourceLength uint) (indicator *Sma, err error) {
ind, err := NewDefaultSma()
// only initialise the storage if there is enough source data to require it
if sourceLength-uint(ind.GetLookbackPeriod()) > 1 {
ind.Data = make([]float64, 0, sourceLength-uint(ind.GetLookbackPeriod()))
}
return ind, err
}
// NewSmaForStream creates a Simple Moving Average Indicator (Sma) for online usage with a source data stream
func NewSmaForStream(priceStream gotrade.DOHLCVStreamSubscriber, timePeriod int, selectData gotrade.DOHLCVDataSelectionFunc) (indicator *Sma, err error) {
ind, err := NewSma(timePeriod, selectData)
priceStream.AddTickSubscription(ind)
return ind, err
}
// NewDefaultSmaForStream creates a Simple Moving Average Indicator (Sma) for online usage with a source data stream
func NewDefaultSmaForStream(priceStream gotrade.DOHLCVStreamSubscriber) (indicator *Sma, err error) {
ind, err := NewDefaultSma()
priceStream.AddTickSubscription(ind)
return ind, err
}
// NewSmaForStreamWithSrcLen creates a Simple Moving Average Indicator (Sma) for offline usage with a source data stream
func NewSmaForStreamWithSrcLen(sourceLength uint, priceStream gotrade.DOHLCVStreamSubscriber, timePeriod int, selectData gotrade.DOHLCVDataSelectionFunc) (indicator *Sma, err error) {
ind, err := NewSmaWithSrcLen(sourceLength, timePeriod, selectData)
priceStream.AddTickSubscription(ind)
return ind, err
}
// NewDefaultSmaForStreamWithSrcLen creates a Simple Moving Average Indicator (Sma) for offline usage with a source data stream
func NewDefaultSmaForStreamWithSrcLen(sourceLength uint, priceStream gotrade.DOHLCVStreamSubscriber) (indicator *Sma, err error) {
ind, err := NewDefaultSmaWithSrcLen(sourceLength)
priceStream.AddTickSubscription(ind)
return ind, err
}
// ReceiveDOHLCVTick consumes a source data DOHLCV price tick
func (ind *Sma) ReceiveDOHLCVTick(tickData gotrade.DOHLCV, streamBarIndex int) {
var selectedData = ind.selectData(tickData)
ind.ReceiveTick(selectedData, streamBarIndex)
}
func (ind *SmaWithoutStorage) ReceiveTick(tickData float64, streamBarIndex int) {
ind.periodCounter += 1
ind.periodHistory.PushBack(tickData)
if ind.periodCounter > 0 {
var valueToRemove = ind.periodHistory.Front()
ind.periodTotal -= valueToRemove.Value.(float64)
}
if ind.periodHistory.Len() > ind.timePeriod {
var first = ind.periodHistory.Front()
ind.periodHistory.Remove(first)
}
ind.periodTotal += tickData
var result float64 = ind.periodTotal / float64(ind.timePeriod)
if ind.periodCounter >= 0 {
ind.UpdateIndicatorWithNewValue(result, streamBarIndex)
}
} | indicators/sma.go | 0.696165 | 0.413773 | sma.go | starcoder |
package trie
import (
"bytes"
"fmt"
"github.com/lunfardo314/verkle/kzg"
"go.dedis.ch/kyber/v3"
"golang.org/x/crypto/blake2b"
"golang.org/x/xerrors"
)
// State represents kv store plus trie
type State struct {
ts *kzg.TrustedSetup
store KVStore
values KVStore
trie KVStore
root KVStore
rootCommitmentCache kyber.Point
valueCache map[string][]byte
nodeCache map[string]*Node
}
const (
prefixValues = "v"
prefixTrie = "t"
prefixRootCommitment = "r"
)
func NewState(ts *kzg.TrustedSetup) *State {
store := NewSimpleKVStore()
ret := &State{
ts: ts,
store: store,
values: store.Partition(prefixValues),
trie: store.Partition(prefixTrie),
root: store.Partition(prefixRootCommitment),
rootCommitmentCache: ts.Suite.G1().Point().Null(),
nodeCache: make(map[string]*Node),
valueCache: make(map[string][]byte),
}
// initially trie has null commitments at nil key
ret.trie.Set(nil, (&Node{}).Bytes())
data := ts.Bytes()
ret.StoreValue(nil, data)
hts := blake2b.Sum256(data)
commitTrustedSetup := ts.Suite.G1().Scalar().SetBytes(hts[:])
ret.updateKey(nil, 0, &ret.rootCommitmentCache, commitTrustedSetup)
ret.FlushCaches()
assert(ret.Check(ts), "consistency check failed")
return ret
}
func (st *State) NewNode(key []byte) (*Node, error) {
_, ok := st.GetNode(key)
if ok {
return nil, xerrors.Errorf("node with the key '%s' already exists", string(key))
}
st.nodeCache[string(key)] = &Node{}
return st.nodeCache[string(key)], nil
}
func (st *State) GetValue(key []byte) ([]byte, bool) {
ret, ok := st.valueCache[string(key)]
if ok {
return ret, true
}
ret, ok = st.values.Get(key)
if !ok {
return nil, false
}
return ret, true
}
func (st *State) mustGetNode(key []byte) *Node {
nodeBin, ok := st.trie.Get(key)
assert(ok, fmt.Sprintf("can't get node for key '%s'", string(key)))
node, err := st.NodeFromBytes(nodeBin)
assert(err == nil, err)
return node
}
func (st *State) GetNode(key []byte) (*Node, bool) {
node, ok := st.nodeCache[string(key)]
if ok {
return node, true
}
nodeBin, ok := st.trie.Get(key)
if !ok {
return nil, false
}
node, err := st.NodeFromBytes(nodeBin)
assert(err == nil, err)
st.nodeCache[string(key)] = node
return node, true
}
func (st *State) StoreValue(key, value []byte) {
st.valueCache[string(key)] = value
}
func (st *State) StoreNode(key []byte, node *Node) {
st.nodeCache[string(key)] = node
}
func (st *State) FlushCaches() {
for k, v := range st.valueCache {
st.values.Set([]byte(k), v)
}
for k, v := range st.nodeCache {
st.trie.Set([]byte(k), v.Bytes())
}
rootBin, err := st.rootCommitmentCache.MarshalBinary()
assert(err == nil, err)
st.root.Set(nil, rootBin)
st.valueCache = make(map[string][]byte)
st.nodeCache = make(map[string]*Node)
}
func (st *State) RootCommitment(ret ...kyber.Point) kyber.Point {
var ret1 kyber.Point
if len(ret) == 0 {
ret1 = st.ts.Suite.G1().Point()
} else {
ret1 = ret[0]
}
rootBin, ok := st.root.Get(nil)
assert(ok, "inconsistency")
err := ret1.UnmarshalBinary(rootBin)
assert(err == nil, err)
return ret1
}
// UpdateStr for testing
func (st *State) UpdateStr(key, value string) {
st.Update([]byte(key), []byte(value))
}
func (st *State) Update(key, value []byte) {
st.StoreValue(key, value)
vCommit := st.ts.Suite.G1().Scalar()
scalarFromBytes(vCommit, value)
st.updateKey(key, 0, &st.rootCommitmentCache, vCommit)
}
func (st *State) updateKey(path []byte, pathPosition int, updateCommitment *kyber.Point, valueCommitment kyber.Scalar) {
assert(pathPosition <= len(path), "pathPosition <= len(path)")
if len(path) == 0 {
path = []byte{}
}
key := path[:pathPosition]
node, ok := st.GetNode(key)
if !ok {
// node for the path[:pathPosition] does not exist
// create a new one, put rest of the path into the fragment
// Commit to terminal value
var err error
node, err = st.NewNode(key)
assert(err == nil, err)
node.pathFragment = path[pathPosition:]
st.updateTerminalValue(node, updateCommitment, valueCommitment)
return
}
// node for the path[:pathPosition] exists
prefix := commonPrefix(node.pathFragment, path[pathPosition:])
assert(len(prefix) <= len(node.pathFragment), "len(prefix)<= len(node.pathFragment)")
// the following parameters define how it goes:
// - len(path)
// - pathPosition
// - len(node.pathFragment)
// - len(prefix)
nextPathPosition := pathPosition + len(prefix)
assert(nextPathPosition <= len(path), "nextPathPosition <= len(path)")
if len(prefix) == len(node.pathFragment) {
// pathFragment is part of the path. No need for a fork, continue the path
if nextPathPosition == len(path) {
// reached the terminal value on this node
st.updateTerminalValue(node, updateCommitment, valueCommitment)
} else {
assert(nextPathPosition < len(path), "nextPathPosition < len(path)")
// didn't reach the end of the path
// choose direction and continue down the path of the child
childIndex := path[nextPathPosition]
var oldCommitment kyber.Point
if node.children[childIndex] != nil {
oldCommitment = st.ts.Suite.G1().Point()
oldCommitment.Set(node.children[childIndex])
}
// recursively update the rest of the path
st.updateKey(path, nextPathPosition+1, &node.children[childIndex], valueCommitment)
st.updateCommitment(updateCommitment, childIndex, oldCommitment, node.children[childIndex])
}
return
}
assert(len(prefix) < len(node.pathFragment), "len(prefix) < len(node.pathFragment)")
// need for the fork of the pathFragment
// continued branch is part of the fragment
keyContinue := make([]byte, pathPosition+len(prefix)+1)
copy(keyContinue, path)
keyContinue[len(keyContinue)-1] = node.pathFragment[len(prefix)]
// nodeContinue continues old path
nodeContinue, err := st.NewNode(keyContinue)
assert(err == nil, err)
nodeContinue.pathFragment = node.pathFragment[len(prefix)+1:]
nodeContinue.children = node.children
nodeContinue.terminalValue = node.terminalValue
// adjust the old node. It will hold 2 commitments to the forked nodes
childIndexContinue := keyContinue[len(keyContinue)-1]
node.pathFragment = prefix
node.children = [256]kyber.Point{}
node.terminalValue = nil
// previous commitment must exist
assert(*updateCommitment != nil, "*updateCommitment != nil")
node.children[childIndexContinue] = (*updateCommitment).Clone()
if pathPosition+len(prefix) == len(path) {
// no need for the new node
node.terminalValue = valueCommitment
} else {
// create the new node
keyFork := path[:pathPosition+len(prefix)+1]
assert(len(keyContinue) == len(keyFork), "len(keyContinue)==len(keyFork)")
nodeFork, err := st.NewNode(keyFork)
assert(err == nil, err)
nodeFork.pathFragment = path[len(keyFork):]
nodeFork.terminalValue = valueCommitment
childForkIndex := keyFork[len(keyFork)-1]
node.children[childForkIndex] = nodeFork.Commit(st.ts)
}
*updateCommitment = node.Commit(st.ts)
}
// updateTerminalValue updates terminal value of the node
// Returns delta for the upstream commitments
func (st *State) updateTerminalValue(n *Node, updateCommitment *kyber.Point, valueCommitment kyber.Scalar) {
delta := st.ts.Suite.G1().Scalar()
if n.terminalValue != nil {
// already has terminal value
if valueCommitment == nil {
delta.Neg(n.terminalValue)
} else {
delta.Sub(valueCommitment, n.terminalValue)
}
} else {
if valueCommitment == nil {
delta.Zero()
} else {
delta.Set(valueCommitment)
}
}
n.terminalValue = valueCommitment
deltaP := st.ts.Suite.G1().Point().Mul(delta, st.ts.LagrangeBasis[256])
if *updateCommitment == nil {
*updateCommitment = deltaP
} else {
(*updateCommitment).Add(*updateCommitment, deltaP)
}
}
func (st *State) updateCommitment(updateCommitment *kyber.Point, childIndex byte, oldC, newC kyber.Point) {
deltaScalar := scalarFromPoint(st.ts.Suite.G1().Scalar(), oldC)
newScalar := scalarFromPoint(st.ts.Suite.G1().Scalar(), newC)
deltaScalar.Sub(newScalar, deltaScalar)
deltaP := st.ts.Suite.G1().Point()
deltaP.Mul(deltaScalar, st.ts.LagrangeBasis[childIndex])
if *updateCommitment == nil {
*updateCommitment = deltaP
} else {
(*updateCommitment).Add(*updateCommitment, deltaP)
}
}
// Check checks consistency with the provided trusted setup
// The trie always has to contain proof of binary data of the trusted setup present at nil key
func (st *State) Check(ts *kzg.TrustedSetup) bool {
v, ok := st.GetValue(nil)
if !ok {
return false
}
if !bytes.Equal(ts.Bytes(), v) {
return false
}
if !bytes.Equal(st.ts.Bytes(), v) {
return false
}
rootProof, ok := st.Prove(nil)
if !ok {
return false
}
return VerifyProof(ts, rootProof) == nil
}
func (st *State) StringTrie() string {
ret := fmt.Sprintf("root commitment: %s\n", st.RootCommitment())
for _, k := range st.trie.Keys() {
ret += fmt.Sprintf("'%s':\n%s\n", k, st.mustGetNode([]byte(k)).String())
}
return ret
}
type StatsKVStore struct {
NumKeys int
AvgKeyLen float64
KeyLen []int
AvgValueSize float64
}
func GetStatsKVStore(kvs KVStore) *StatsKVStore {
ret := &StatsKVStore{}
kl := make(map[int]int)
maxLen := 0
sumKeyLen := 0
sumValueLen := 0
for _, k := range kvs.Keys() {
ret.NumKeys++
sumKeyLen += len(k)
n := kl[len(k)]
kl[len(k)] = n + 1
if len(k) > maxLen {
maxLen = len(k)
}
value, ok := kvs.Get([]byte(k))
if !ok {
panic("key not found")
}
sumValueLen += len(value)
}
ret.KeyLen = make([]int, maxLen+1)
for l, n := range kl {
ret.KeyLen[l] = n
}
ret.AvgKeyLen = float64(sumKeyLen) / float64(ret.NumKeys)
ret.AvgValueSize = float64(sumValueLen) / float64(ret.NumKeys)
return ret
}
type StatsTrie struct {
NumNodes int
AvgNumChildren float64
NumChildren [258]int
OnlyTerminal int
}
func GetStatsTrie(st *State) *StatsTrie {
ret := &StatsTrie{}
sumChildren := 0
for _, k := range st.trie.Keys() {
ret.NumNodes++
node, ok := st.GetNode([]byte(k))
if !ok {
panic("can't get node")
}
numCh := 0
for _, ch := range node.children {
if ch != nil {
numCh++
}
}
if node.terminalValue != nil {
if numCh == 0 {
ret.OnlyTerminal++
}
numCh++
}
ret.NumChildren[numCh]++
sumChildren += numCh
}
ret.AvgNumChildren = float64(sumChildren) / float64(ret.NumNodes)
return ret
} | trie/trie.go | 0.598312 | 0.437463 | trie.go | starcoder |
package wavefront_plugin
import (
"fmt"
"sort"
"strings"
"github.com/hashicorp/terraform/helper/schema"
"github.com/spaceapegames/go-wavefront"
)
// Terraform Resource Declaration
func resourceDashboard() *schema.Resource {
source := &schema.Schema{
Type: schema.TypeList,
Required: true,
Description: "A collection of Sources for a Chart",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
Description: "Name of the Source",
},
"query": {
Type: schema.TypeString,
Required: true,
Description: "Query for the Source",
},
"disabled": {
Type: schema.TypeBool,
Optional: true,
Description: "Whether to disabled the source from being displayed",
},
"scatter_plot_source": {
Type: schema.TypeString,
Optional: true,
Default: "Y",
},
"query_builder_enabled": {
Type: schema.TypeBool,
Optional: true,
Description: "Whether the query builder should be enabled",
},
"source_description": {
Type: schema.TypeString,
Optional: true,
Description: "Description of the source",
},
},
},
}
chartSetting := &schema.Schema{
Type: schema.TypeList,
MaxItems: 1,
Required: true,
Description: "Chart settings. Defaults to line charts",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"auto_column_tags": {
Type: schema.TypeBool,
Optional: true,
Description: "deprecated",
},
"column_tags": {
Type: schema.TypeString,
Optional: true,
Description: "deprecated",
},
"custom_tags": {
Type: schema.TypeList,
Optional: true,
Description: "For the tabular view, a list of point tags to display when using the custom tag display mode",
Elem: &schema.Schema{Type: schema.TypeString},
},
"expected_data_spacing": {
Type: schema.TypeInt,
Optional: true,
Description: "Threshold (in seconds) for time delta between consecutive points in a series above which a dotted line will replace a solid line in line plots. Default: 60s",
},
"fixed_legend_display_stats": {
Type: schema.TypeList,
Optional: true,
Description: "For a chart with a fixed legend, a list of statistics to display in the legend",
Elem: &schema.Schema{Type: schema.TypeString},
},
"fixed_legend_enabled": {
Type: schema.TypeBool,
Optional: true,
Description: "Whether to enable a fixed tabular legend adjacent to the chart",
},
"fixed_legend_filter_field": {
Type: schema.TypeString,
Optional: true,
Description: "Statistic to use for determining whether a series is displayed on the fixed legend = ['CURRENT', 'MEAN', 'MEDIAN', 'SUM', 'MIN', 'MAX', 'COUNT']",
},
"fixed_legend_filter_limit": {
Type: schema.TypeInt,
Optional: true,
Description: "Number of series to include in the fixed legend",
},
"fixed_legend_filter_sort": {
Type: schema.TypeString,
Optional: true,
Description: "Whether to display Top- or Bottom-ranked series in the fixed legend = ['TOP', 'BOTTOM']",
},
"fixed_legend_hide_label": {
Type: schema.TypeBool,
Optional: true,
Description: "deprecated",
},
"fixed_legend_position": {
Type: schema.TypeString,
Optional: true,
Description: "Where the fixed legend should be displayed with respect to the chart = ['RIGHT', 'TOP', 'LEFT', 'BOTTOM']",
},
"fixed_legend_use_raw_stats": {
Type: schema.TypeBool,
Optional: true,
Description: "If true, the legend uses non-summarized stats instead of summarized",
},
"group_by_source": {
Type: schema.TypeBool,
Optional: true,
Description: "For the tabular view, whether to group multi metrics into a single row by a common source. If false, each metric for each source is displayed in its own row. If true, multiple metrics for the same host will be displayed as different columns in the same row",
},
"invert_dynamic_legend_hover_control": {
Type: schema.TypeBool,
Optional: true,
Description: "Whether to disable the display of the floating legend (but reenable it when the ctrl-key is pressed)",
},
"line_type": {
Type: schema.TypeString,
Optional: true,
Description: "Plot interpolation type. linear is default = ['linear', 'step-before', 'step-after', 'basis', 'cardinal', 'monotone']",
},
"max": {
Type: schema.TypeFloat,
Optional: true,
Description: "Max value of Y-axis. Set to null or leave blank for auto",
},
"min": {
Type: schema.TypeFloat,
Optional: true,
Description: "Min value of Y-axis. Set to null or leave blank for auto",
},
"num_tags": {
Type: schema.TypeInt,
Optional: true,
Description: "For the tabular view, how many point tags to display",
},
"plain_markdown_content": {
Type: schema.TypeString,
Optional: true,
Description: "The Markdown content for a Markdown display, in plain text. Use this field instead of markdownContent",
},
"show_hosts": {
Type: schema.TypeBool,
Optional: true,
Description: "For the tabular view, whether to display sources. Default: true",
},
"show_labels": {
Type: schema.TypeBool,
Optional: true,
Description: "For the tabular view, whether to display labels. Default: true",
},
"show_raw_values": {
Type: schema.TypeBool,
Optional: true,
Description: "For the tabular view, whether to display raw values. Default: false",
},
"sort_values_descending": {
Type: schema.TypeBool,
Optional: true,
Description: "For the tabular view, whether to display display values in descending order. Default: false",
},
"sparkline_decimal_precision": {
Type: schema.TypeInt,
Optional: true,
Description: "For the single stat view, the decimal precision of the displayed number ",
},
"sparkline_display_color": {
Type: schema.TypeString,
Optional: true,
Description: "For the single stat view, the color of the displayed text (when not dynamically determined). Values should be in rgba(, , , format ",
},
"sparkline_display_font_size": {
Type: schema.TypeString,
Optional: true,
Description: "For the single stat view, the font size of the displayed text, in percent",
},
"sparkline_display_horizontal_position": {
Type: schema.TypeString,
Optional: true,
Description: "For the single stat view, the horizontal position of the displayed text = ['MIDDLE', 'LEFT', 'RIGHT']",
},
"sparkline_display_postfix": {
Type: schema.TypeString,
Optional: true,
Description: "For the single stat view, a string to append to the displayed text",
},
"sparkline_display_prefix": {
Type: schema.TypeString,
Optional: true,
Description: "For the single stat view, a string to add before the displayed text",
},
"sparkline_display_value_type": {
Type: schema.TypeString,
Optional: true,
Description: "For the single stat view, whether to display the name of the query or the value of query = ['VALUE', 'LABEL']",
},
"sparkline_display_vertical_position": {
Type: schema.TypeString,
Optional: true,
Description: "deprecated",
},
"sparkline_fill_color": {
Type: schema.TypeString,
Optional: true,
Description: "For the single stat view, the color of the background fill. Values should be in rgba(, , , format",
},
"sparkline_line_color": {
Type: schema.TypeString,
Optional: true,
Description: "For the single stat view, the color of the line. Values should be in rgba(, , , format",
},
"sparkline_size": {
Type: schema.TypeString,
Optional: true,
Description: "For the single stat view, a misleadingly named property. This determines whether the sparkline of the statistic is displayed in the chart BACKGROUND, BOTTOM, or NONE = ['BACKGROUND', 'BOTTOM', 'NONE']",
},
"sparkline_value_color_map_apply_to": {
Type: schema.TypeString,
Optional: true,
Description: "For the single stat view, whether to apply dynamic color settings to the displayed TEXT or BACKGROUND = ['TEXT', 'BACKGROUND']",
},
"sparkline_value_color_map_colors": {
Type: schema.TypeList,
Optional: true,
Description: "For the single stat view, a list of colors that differing query values map to. Must contain one more element than sparklineValueColorMapValuesV2. Values should be in rgba(, , , format",
Elem: &schema.Schema{Type: schema.TypeString},
},
"sparkline_value_color_map_values": {
Type: schema.TypeList,
Optional: true,
Description: "deprecated",
Elem: &schema.Schema{Type: schema.TypeInt},
},
"sparkline_value_color_map_values_v2": {
Type: schema.TypeList,
Optional: true,
Description: "deprecated",
Elem: &schema.Schema{Type: schema.TypeFloat},
},
"sparkline_value_text_map_text": {
Type: schema.TypeList,
Optional: true,
Description: "For the single stat view, a list of display text values that different query values map to. Must contain one more element than sparklineValueTextMapThresholds",
Elem: &schema.Schema{Type: schema.TypeString},
},
"sparkline_value_text_map_thresholds": {
Type: schema.TypeList,
Optional: true,
Description: "For the single stat view, a list of threshold boundaries for mapping different query values to display text. Must contain one less element than sparklineValueTextMapText",
Elem: &schema.Schema{Type: schema.TypeFloat},
},
"stack_type": {
Type: schema.TypeString,
Optional: true,
Description: "Type of stacked chart (applicable only if chart type is stacked). zero (default) means stacked from y=0. expand means Normalized from 0 to 1. wiggle means Minimize weighted changes. silhouette means to Center the Stream = ['zero', 'expand', 'wiggle', 'silhouette']",
},
"tag_mode": {
Type: schema.TypeString,
Optional: true,
Description: "For the tabular view, which mode to use to determine which point tags to display = ['all', 'top', 'custom']",
},
"time_based_coloring": {
Type: schema.TypeBool,
Optional: true,
Description: "Fox x-y scatterplots, whether to color more recent points as darker than older points. Default: false",
},
"type": {
Type: schema.TypeString,
Required: true,
Description: "Chart Type. 'line' refers to the Line Plot, 'scatter' to the Point Plot, 'stacked-area' to the Stacked Area plot, 'table' to the Tabular View, 'scatterploy-xy' to Scatter Plot, 'markdown-widget' to the Markdown display, and 'sparkline' to the Single Stat view = ['line', 'scatterplot', 'stacked-area', 'table', 'scatterplot-xy', 'markdown-widget', 'sparkline']",
},
"windowing": {
Type: schema.TypeString,
Optional: true,
Description: "For the tabular view, whether to use the full time window for the query or the last X minutes = ['full', 'last']",
},
"window_size": {
Type: schema.TypeInt,
Optional: true,
Description: "Width, in minutes, of the time window to use for last windowing ",
},
"xmax": {
Type: schema.TypeFloat,
Optional: true,
Description: "For x-y scatterplots, max value for X-axis. Set null for auto",
},
"xmin": {
Type: schema.TypeFloat,
Optional: true,
Description: "For x-y scatterplots, min value for X-axis. Set null for auto",
},
"y0_scale_si_by_1024": {
Type: schema.TypeBool,
Optional: true,
Description: "Default: false. Whether to scale numerical magnitude labels for left Y-axis by 1024 in the IEC/Binary manner (instead of by 1000 like SI) ,",
},
"y0_unit_autoscaling": {
Type: schema.TypeBool,
Optional: true,
Description: "Default: false. Whether to automatically adjust magnitude labels and units for the left Y-axis to favor smaller magnitudes and larger units",
},
"y1max": {
Type: schema.TypeFloat,
Optional: true,
Description: "For plots with multiple Y-axes, max value for right-side Y-axis. Set null for auto",
},
"y1min": {
Type: schema.TypeFloat,
Optional: true,
Description: "For plots with multiple Y-axes, min value for right-side Y-axis. Set null for auto",
},
"y1_scale_si_by_1024": {
Type: schema.TypeBool,
Optional: true,
Description: "Default: false. Whether to scale numerical magnitude labels for right Y-axis by 1024 in the IEC/Binary manner (instead of by 1000 like SI)",
},
"y1_unit_autoscaling": {
Type: schema.TypeBool,
Optional: true,
Description: "Default: false. Whether to automatically adjust magnitude labels and units for the right Y-axis to favor smaller magnitudes and larger units",
},
"y1_units": {
Type: schema.TypeString,
Optional: true,
Description: "For plots with multiple Y-axes, units for right-side Y-axis ",
},
"ymax": {
Type: schema.TypeFloat,
Optional: true,
Description: "For x-y scatterplots, max value for Y-axis. Set null for auto ",
},
"ymin": {
Type: schema.TypeFloat,
Optional: true,
Description: "For x-y scatterplots, min value for Y-axis. Set null for auto",
},
},
},
}
chart := &schema.Schema{
Type: schema.TypeList,
Required: true,
Description: "A collection of chart",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
Description: "Name of the Chart",
},
"description": {
Type: schema.TypeString,
Optional: true,
Description: "Description of the chart",
},
"units": {
Type: schema.TypeString,
Required: true,
Description: "Units of measurements for the chart",
},
"summarization": {
Type: schema.TypeString,
Required: true,
Description: "Summarization strategy for the chart. MEAN is default = ['MEAN', 'MEDIAN', 'MIN', 'MAX', 'SUM', 'COUNT', 'LAST', 'FIRST']",
},
"source": source,
"chart_setting": chartSetting,
},
},
}
row := &schema.Schema{
Type: schema.TypeList,
Required: true,
Description: "Rows containing chart. Rows belong in Sections",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"chart": chart,
},
},
}
section := &schema.Schema{
Type: schema.TypeList,
Required: true,
Description: "Sections of a Dashboard",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
Description: "Name of the Sections",
},
"row": row,
},
},
}
parameterDetail := &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Description: "",
Required: true,
},
"label": {
Type: schema.TypeString,
Required: true,
},
"default_value": {
Type: schema.TypeString,
Required: true,
},
"hide_from_view": {
Type: schema.TypeBool,
Required: true,
},
"parameter_type": {
Type: schema.TypeString,
Required: true,
},
"values_to_readable_strings": {
Type: schema.TypeMap,
Required: true,
Description: "Map of [string]string. At least one of the keys must match the value of default_value.",
},
"query_value": {
Type: schema.TypeString,
Optional: true,
},
"tag_key": {
Type: schema.TypeString,
Optional: true,
},
"dynamic_field_type": {
Type: schema.TypeString,
Optional: true,
},
},
},
}
return &schema.Resource{
Create: resourceDashboardCreate,
Read: resourceDashboardRead,
Update: resourceDashboardUpdate,
Delete: resourceDashboardDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
},
"description": {
Type: schema.TypeString,
Required: true,
},
"url": {
Type: schema.TypeString,
Required: true,
},
"section": section,
"parameter_details": parameterDetail,
"display_section_table_of_contents": {
Type: schema.TypeBool,
Optional: true,
},
"display_query_parameters": {
Type: schema.TypeBool,
Optional: true,
},
"event_filter_type": {
Type: schema.TypeString,
Optional: true,
},
"tags": {
Type: schema.TypeSet,
Required: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
},
}
}
// Construct a Terraform ParameterDetail
func buildTerraformParameterDetail(wavefrontParamDetail wavefront.ParameterDetail, name string) map[string]interface{} {
parameterDetail := map[string]interface{}{}
parameterDetail["name"] = name
parameterDetail["label"] = wavefrontParamDetail.Label
parameterDetail["parameter_type"] = wavefrontParamDetail.ParameterType
parameterDetail["hide_from_view"] = wavefrontParamDetail.HideFromView
parameterDetail["default_value"] = wavefrontParamDetail.DefaultValue
parameterDetail["values_to_readable_strings"] = wavefrontParamDetail.ValuesToReadableStrings
parameterDetail["query_value"] = wavefrontParamDetail.QueryValue
parameterDetail["tag_key"] = wavefrontParamDetail.TagKey
parameterDetail["dynamic_field_type"] = wavefrontParamDetail.DynamicFieldType
return parameterDetail
}
// Construct a Terraform Section
func buildTerraformSection(wavefrontSection wavefront.Section) map[string]interface{} {
section := map[string]interface{}{}
section["name"] = wavefrontSection.Name
rows := []map[string]interface{}{}
for _, wavefrontRow := range wavefrontSection.Rows {
rows = append(rows, buildTerraformRow(wavefrontRow))
}
section["row"] = rows
return section
}
// Construct a Wavefront Row
func buildTerraformRow(wavefrontRow wavefront.Row) map[string]interface{} {
row := map[string]interface{}{}
charts := []map[string]interface{}{}
for _, wavefrontRow := range wavefrontRow.Charts {
charts = append(charts, buildTerraformChart(wavefrontRow))
}
row["chart"] = charts
return row
}
// Construct a Wavefront Chart
func buildTerraformChart(wavefrontChart wavefront.Chart) map[string]interface{} {
chart := map[string]interface{}{}
chart["name"] = wavefrontChart.Name
chart["description"] = wavefrontChart.Description
chart["units"] = wavefrontChart.Units
sources := []map[string]interface{}{}
for _, wavefrontSource := range wavefrontChart.Sources {
sources = append(sources, buildTerraformSource(wavefrontSource))
}
chart["source"] = sources
chart["summarization"] = wavefrontChart.Summarization
chart["chart_setting"] = []interface{}{buildTerraformChartSettings(wavefrontChart.ChartSettings)}
return chart
}
func buildTerraformChartSettings(wavefrontChartSettings wavefront.ChartSetting) map[string]interface{} {
chartSettings := map[string]interface{}{}
chartSettings["auto_column_tags"] = wavefrontChartSettings.AutoColumnTags
chartSettings["column_tags"] = wavefrontChartSettings.ColumnTags
chartSettings["custom_tags"] = wavefrontChartSettings.CustomTags
chartSettings["expected_data_spacing"] = wavefrontChartSettings.ExpectedDataSpacing
chartSettings["fixed_legend_display_stats"] = wavefrontChartSettings.FixedLegendDisplayStats
chartSettings["fixed_legend_enabled"] = wavefrontChartSettings.FixedLegendEnabled
chartSettings["fixed_legend_filter_field"] = wavefrontChartSettings.FixedLegendFilterField
chartSettings["fixed_legend_filter_limit"] = wavefrontChartSettings.FixedLegendFilterLimit
chartSettings["fixed_legend_filter_sort"] = wavefrontChartSettings.FixedLegendFilterSort
chartSettings["fixed_legend_hide_label"] = wavefrontChartSettings.FixedLegendHideLabel
chartSettings["fixed_legend_position"] = wavefrontChartSettings.FixedLegendPosition
chartSettings["fixed_legend_use_raw_stats"] = wavefrontChartSettings.FixedLegendUseRawStats
chartSettings["group_by_source"] = wavefrontChartSettings.GroupBySource
chartSettings["invert_dynamic_legend_hover_control"] = wavefrontChartSettings.InvertDynamicLegendHoverControl
chartSettings["line_type"] = wavefrontChartSettings.LineType
chartSettings["max"] = wavefrontChartSettings.Max
chartSettings["min"] = wavefrontChartSettings.Min
chartSettings["num_tags"] = wavefrontChartSettings.NumTags
chartSettings["plain_markdown_content"] = wavefrontChartSettings.PlainMarkdownContent
chartSettings["show_hosts"] = wavefrontChartSettings.ShowHosts
chartSettings["show_labels"] = wavefrontChartSettings.ShowLabels
chartSettings["show_raw_values"] = wavefrontChartSettings.ShowRawValues
chartSettings["sort_values_descending"] = wavefrontChartSettings.SortValuesDescending
chartSettings["sparkline_decimal_precision"] = wavefrontChartSettings.SparklineDecimalPrecision
chartSettings["sparkline_display_color"] = wavefrontChartSettings.SparklineDisplayColor
chartSettings["sparkline_display_font_size"] = wavefrontChartSettings.SparklineDisplayFontSize
chartSettings["sparkline_display_horizontal_position"] = wavefrontChartSettings.SparklineDisplayHorizontalPosition
chartSettings["sparkline_display_postfix"] = wavefrontChartSettings.SparklineDisplayPostfix
chartSettings["sparkline_display_prefix"] = wavefrontChartSettings.SparklineDisplayPrefix
chartSettings["sparkline_display_value_type"] = wavefrontChartSettings.SparklineDisplayValueType
chartSettings["sparkline_display_vertical_position"] = wavefrontChartSettings.SparklineDisplayVerticalPosition
chartSettings["sparkline_fill_color"] = wavefrontChartSettings.SparklineFillColor
chartSettings["sparkline_line_color"] = wavefrontChartSettings.SparklineLineColor
chartSettings["sparkline_size"] = wavefrontChartSettings.SparklineSize
chartSettings["sparkline_value_color_map_apply_to"] = wavefrontChartSettings.SparklineValueColorMapApplyTo
chartSettings["sparkline_value_color_map_colors"] = wavefrontChartSettings.SparklineValueColorMapColors
chartSettings["sparkline_value_color_map_values"] = wavefrontChartSettings.SparklineValueColorMapValues
chartSettings["sparkline_value_color_map_values_v2"] = wavefrontChartSettings.SparklineValueColorMapValuesV2
chartSettings["sparkline_value_text_map_text"] = wavefrontChartSettings.SparklineValueTextMapText
chartSettings["sparkline_value_text_map_thresholds"] = wavefrontChartSettings.SparklineValueTextMapThresholds
chartSettings["stack_type"] = wavefrontChartSettings.StackType
chartSettings["tag_mode"] = wavefrontChartSettings.TagMode
chartSettings["time_based_coloring"] = wavefrontChartSettings.TimeBasedColoring
chartSettings["type"] = wavefrontChartSettings.Type
chartSettings["windowing"] = wavefrontChartSettings.Windowing
chartSettings["window_size"] = wavefrontChartSettings.WindowSize
chartSettings["xmax"] = wavefrontChartSettings.Xmax
chartSettings["xmin"] = wavefrontChartSettings.Xmin
chartSettings["y0_scale_si_by_1024"] = wavefrontChartSettings.Y0ScaleSIBy1024
chartSettings["y1_scale_si_by_1024"] = wavefrontChartSettings.Y1ScaleSIBy1024
chartSettings["y0_unit_autoscaling"] = wavefrontChartSettings.Y0UnitAutoscaling
chartSettings["y1_unit_autoscaling"] = wavefrontChartSettings.Y1UnitAutoscaling
chartSettings["y1_units"] = wavefrontChartSettings.Y1Units
chartSettings["y1max"] = wavefrontChartSettings.Y1Max
chartSettings["y1min"] = wavefrontChartSettings.Y1Min
return chartSettings
}
// Construct a Wavefront Source
func buildTerraformSource(wavefrontSource wavefront.Source) map[string]interface{} {
source := map[string]interface{}{}
source["name"] = wavefrontSource.Name
source["query"] = wavefrontSource.Query
source["disabled"] = wavefrontSource.Disabled
source["scatter_plot_source"] = wavefrontSource.ScatterPlotSource
source["query_builder_enabled"] = wavefrontSource.QuerybuilderEnabled
source["source_description"] = wavefrontSource.SourceDescription
return source
}
// Construct a Wavefront Section
func buildSections(terraformSections *[]interface{}) *[]wavefront.Section {
wavefrontSections := make([]wavefront.Section, len(*terraformSections))
for i, t := range *terraformSections {
t := t.(map[string]interface{})
terraformRows := t["row"].([]interface{})
wavefrontSections[i] = wavefront.Section{
Name: t["name"].(string),
Rows: *buildRows(&terraformRows),
}
}
return &wavefrontSections
}
// Construct a Wavefront Row
func buildRows(terraformRows *[]interface{}) *[]wavefront.Row {
wavefrontRows := make([]wavefront.Row, len(*terraformRows))
for i, t := range *terraformRows {
t := t.(map[string]interface{})
terraformCharts := t["chart"].([]interface{})
wavefrontRows[i] = wavefront.Row{
Charts: *buildCharts(&terraformCharts),
}
}
return &wavefrontRows
}
// Construct a Wavefront Chart
func buildCharts(terraformCharts *[]interface{}) *[]wavefront.Chart {
wavefrontCharts := make([]wavefront.Chart, len(*terraformCharts))
for i, t := range *terraformCharts {
t := t.(map[string]interface{})
terraformSources := t["source"].([]interface{})
terraformChartSettings := t["chart_setting"].([]interface{})
wavefrontCharts[i] = wavefront.Chart{
Name: t["name"].(string),
Sources: *buildSources(&terraformSources),
Description: t["description"].(string),
Units: t["units"].(string),
Summarization: t["summarization"].(string),
ChartSettings: *buildChartSettings(&terraformChartSettings),
}
}
return &wavefrontCharts
}
// Construct a Wavefront ChartSetting
func buildChartSettings(terraformChartSettings *[]interface{}) *wavefront.ChartSetting {
wavefrontChartSettings := &wavefront.ChartSetting{}
t := ((*terraformChartSettings)[0]).(map[string]interface{})
if t["auto_column_tags"] != nil {
wavefrontChartSettings.AutoColumnTags = t["auto_column_tags"].(bool)
}
if t["column_tags"] != nil {
wavefrontChartSettings.ColumnTags = t["column_tags"].(string)
}
if t["custom_tags"] != nil {
for _, tag := range t["custom_tags"].([]interface{}) {
wavefrontChartSettings.CustomTags = append(wavefrontChartSettings.CustomTags, tag.(string))
}
}
if t["expected_data_spacing"] != nil {
wavefrontChartSettings.ExpectedDataSpacing = t["expected_data_spacing"].(int)
}
if t["fixed_legend_display_stats"] != nil {
for _, stat := range t["fixed_legend_display_stats"].([]interface{}) {
wavefrontChartSettings.FixedLegendDisplayStats = append(wavefrontChartSettings.FixedLegendDisplayStats, stat.(string))
}
}
if t["fixed_legend_enabled"] != nil {
wavefrontChartSettings.FixedLegendEnabled = t["fixed_legend_enabled"].(bool)
}
if t["fixed_legend_filter_field"] != nil {
wavefrontChartSettings.FixedLegendFilterField = t["fixed_legend_filter_field"].(string)
}
if t["fixed_legend_filter_limit"] != nil {
wavefrontChartSettings.FixedLegendFilterLimit = t["fixed_legend_filter_limit"].(int)
}
if t["fixed_legend_filter_sort"] != nil {
wavefrontChartSettings.FixedLegendFilterSort = t["fixed_legend_filter_sort"].(string)
}
if t["fixed_legend_hide_label"] != nil {
wavefrontChartSettings.FixedLegendHideLabel = t["fixed_legend_hide_label"].(bool)
}
if t["fixed_legend_position"] != nil {
wavefrontChartSettings.FixedLegendPosition = t["fixed_legend_position"].(string)
}
if t["fixed_legend_use_raw_stats"] != nil {
wavefrontChartSettings.FixedLegendUseRawStats = t["fixed_legend_use_raw_stats"].(bool)
}
if t["group_by_source"] != nil {
wavefrontChartSettings.GroupBySource = t["group_by_source"].(bool)
}
if t["invert_dynamic_legend_hover_control"] != nil {
wavefrontChartSettings.InvertDynamicLegendHoverControl = t["invert_dynamic_legend_hover_control"].(bool)
}
if t["line_type"] != nil {
wavefrontChartSettings.LineType = t["line_type"].(string)
}
if t["max"] != nil {
wavefrontChartSettings.Max = float32(t["max"].(float64))
}
if t["min"] != nil {
wavefrontChartSettings.Min = float32(t["min"].(float64))
}
if t["num_tags"] != nil {
wavefrontChartSettings.NumTags = t["num_tags"].(int)
}
if t["plain_markdown_content"] != nil {
wavefrontChartSettings.PlainMarkdownContent = t["plain_markdown_content"].(string)
}
if t["show_hosts"] != nil {
wavefrontChartSettings.ShowHosts = t["show_hosts"].(bool)
}
if t["show_labels"] != nil {
wavefrontChartSettings.ShowLabels = t["show_labels"].(bool)
}
if t["show_raw_values"] != nil {
wavefrontChartSettings.ShowRawValues = t["show_raw_values"].(bool)
}
if t["sort_values_descending"] != nil {
wavefrontChartSettings.SortValuesDescending = t["sort_values_descending"].(bool)
}
if t["sparkline_decimal_precision"] != nil {
wavefrontChartSettings.SparklineDecimalPrecision = t["sparkline_decimal_precision"].(int)
}
if t["sparkline_display_color"] != nil {
wavefrontChartSettings.SparklineDisplayColor = t["sparkline_display_color"].(string)
}
if t["sparkline_display_font_size"] != nil {
wavefrontChartSettings.SparklineDisplayFontSize = t["sparkline_display_font_size"].(string)
}
if t["sparkline_display_horizontal_position"] != nil {
wavefrontChartSettings.SparklineDisplayHorizontalPosition = t["sparkline_display_horizontal_position"].(string)
}
if t["sparkline_display_postfix"] != nil {
wavefrontChartSettings.SparklineDisplayPostfix = t["sparkline_display_postfix"].(string)
}
if t["sparkline_display_prefix"] != nil {
wavefrontChartSettings.SparklineDisplayPrefix = t["sparkline_display_prefix"].(string)
}
if t["sparkline_display_value_type"] != nil {
wavefrontChartSettings.SparklineDisplayValueType = t["sparkline_display_value_type"].(string)
}
if t["sparkline_display_vertical_position"] != nil {
wavefrontChartSettings.SparklineDisplayVerticalPosition = t["sparkline_display_vertical_position"].(string)
}
if t["sparkline_fill_color"] != nil {
wavefrontChartSettings.SparklineFillColor = t["sparkline_fill_color"].(string)
}
if t["sparkline_line_color"] != nil {
wavefrontChartSettings.SparklineLineColor = t["sparkline_line_color"].(string)
}
if t["sparkline_size"] != nil {
wavefrontChartSettings.SparklineSize = t["sparkline_size"].(string)
}
if t["sparkline_value_color_map_apply_to"] != nil {
wavefrontChartSettings.SparklineValueColorMapApplyTo = t["sparkline_value_color_map_apply_to"].(string)
}
if t["sparkline_value_color_map_colors"] != nil {
for _, v := range t["sparkline_value_color_map_colors"].([]interface{}) {
wavefrontChartSettings.SparklineValueColorMapColors = append(wavefrontChartSettings.SparklineValueColorMapColors, v.(string))
}
}
if t["sparkline_value_color_map_values"] != nil {
for _, v := range t["sparkline_value_color_map_values"].([]interface{}) {
wavefrontChartSettings.SparklineValueColorMapValues = append(wavefrontChartSettings.SparklineValueColorMapValues, v.(int))
}
}
if t["sparkline_value_text_map_text"] != nil {
for _, v := range t["sparkline_value_text_map_text"].([]interface{}) {
wavefrontChartSettings.SparklineValueTextMapText = append(wavefrontChartSettings.SparklineValueTextMapText, v.(string))
}
}
if t["sparkline_value_text_map_thresholds"] != nil {
for _, v := range t["sparkline_value_text_map_thresholds"].([]interface{}) {
wavefrontChartSettings.SparklineValueTextMapThresholds = append(wavefrontChartSettings.SparklineValueTextMapThresholds, float32(v.(float64)))
}
}
if t["sparkline_value_color_map_values_v2"] != nil {
for _, v := range t["sparkline_value_color_map_values_v2"].([]interface{}) {
wavefrontChartSettings.SparklineValueColorMapValuesV2 = append(wavefrontChartSettings.SparklineValueColorMapValuesV2, float32(v.(float64)))
}
}
if t["stack_type"] != nil {
wavefrontChartSettings.StackType = t["stack_type"].(string)
}
if t["tag_mode"] != nil {
wavefrontChartSettings.TagMode = t["tag_mode"].(string)
}
if t["time_based_coloring"] != nil {
wavefrontChartSettings.TimeBasedColoring = t["time_based_coloring"].(bool)
}
if t["type"] != nil {
wavefrontChartSettings.Type = t["type"].(string)
}
if t["windowing"] != nil {
wavefrontChartSettings.Windowing = t["windowing"].(string)
}
if t["window_size"] != nil {
wavefrontChartSettings.WindowSize = t["window_size"].(int)
}
if t["xmax"] != nil {
wavefrontChartSettings.Xmax = float32(t["xmax"].(float64))
}
if t["xmin"] != nil {
wavefrontChartSettings.Xmin = float32(t["xmin"].(float64))
}
if t["y0_scale_si_by_1024"] != nil {
wavefrontChartSettings.Y0ScaleSIBy1024 = t["y0_scale_si_by_1024"].(bool)
}
if t["y0_unit_autoscaling"] != nil {
wavefrontChartSettings.Y0UnitAutoscaling = t["y0_unit_autoscaling"].(bool)
}
if t["y1max"] != nil {
wavefrontChartSettings.Y1Max = float32(t["y1max"].(float64))
}
if t["y1min"] != nil {
wavefrontChartSettings.Y1Min = float32(t["y1min"].(float64))
}
if t["y1_scale_si_by_1024"] != nil {
wavefrontChartSettings.Y1ScaleSIBy1024 = t["y1_scale_si_by_1024"].(bool)
}
if t["y1_unit_autoscaling"] != nil {
wavefrontChartSettings.Y1UnitAutoscaling = t["y1_unit_autoscaling"].(bool)
}
if t["y1_units"] != nil {
wavefrontChartSettings.Y1Units = t["y1_units"].(string)
}
if t["ymax"] != nil {
wavefrontChartSettings.Ymax = float32(t["ymax"].(float64))
}
if t["ymin"] != nil {
wavefrontChartSettings.Ymin = float32(t["ymin"].(float64))
}
return wavefrontChartSettings
}
// Construct a Wavefront Source
func buildSources(terraformSources *[]interface{}) *[]wavefront.Source {
wavefrontSources := make([]wavefront.Source, len(*terraformSources))
for i, t := range *terraformSources {
t := t.(map[string]interface{})
wavefrontSources[i] = wavefront.Source{
Name: t["name"].(string),
Query: t["query"].(string),
}
if t["disabled"] != nil {
wavefrontSources[i].Disabled = t["disabled"].(bool)
}
if t["scatter_plot_source"] != nil {
wavefrontSources[i].ScatterPlotSource = t["scatter_plot_source"].(string)
}
if t["query_builder_enabled"] != nil {
wavefrontSources[i].QuerybuilderEnabled = t["query_builder_enabled"].(bool)
}
if t["source_description"] != nil {
wavefrontSources[i].SourceDescription = t["source_description"].(string)
}
}
return &wavefrontSources
}
// Construct a Wavefront ParameterDetail
func buildParameterDetails(terraformParams *[]interface{}) *map[string]wavefront.ParameterDetail {
wavefrontParams := map[string]wavefront.ParameterDetail{}
for _, t := range *terraformParams {
t := t.(map[string]interface{})
name := t["name"].(string)
valuesToReadableStrings := t["values_to_readable_strings"].(map[string]interface{})
readableStrings := map[string]string{}
for k, v := range valuesToReadableStrings {
readableStrings[k] = v.(string)
}
wfParam := wavefront.ParameterDetail{
Label: t["label"].(string),
DefaultValue: t["default_value"].(string),
HideFromView: t["hide_from_view"].(bool),
ParameterType: t["parameter_type"].(string),
ValuesToReadableStrings: readableStrings,
}
if t["query_value"] != nil {
wfParam.QueryValue = t["query_value"].(string)
}
if t["tag_key"] != nil {
wfParam.TagKey = t["tag_key"].(string)
}
if t["dynamic_field_type"] != nil {
wfParam.DynamicFieldType = t["dynamic_field_type"].(string)
}
wavefrontParams[name] = wfParam
}
return &wavefrontParams
}
// Construct a Wavefront Dashboard
func buildDashboard(d *schema.ResourceData) (*wavefront.Dashboard, error) {
var tags []string
for _, tag := range d.Get("tags").(*schema.Set).List() {
tags = append(tags, tag.(string))
}
terraformSections := d.Get("section").([]interface{})
terraformParams := d.Get("parameter_details").([]interface{})
eventFilterType := "BYCHART"
if e, ok := d.GetOk("event_filter_type"); ok {
eventFilterType = e.(string)
}
displayTOC := false
if toc, ok := d.GetOk("display_section_table_of_contents"); ok {
displayTOC = toc.(bool)
}
displayQP := false
if qp, ok := d.GetOk("display_query_parameters"); ok {
displayQP = qp.(bool)
}
return &wavefront.Dashboard{
Name: d.Get("name").(string),
ID: d.Get("url").(string),
Tags: tags,
Description: d.Get("description").(string),
Url: d.Get("url").(string),
Sections: *buildSections(&terraformSections),
ParameterDetails: *buildParameterDetails(&terraformParams),
EventFilterType: eventFilterType,
DisplaySectionTableOfContents: displayTOC,
DisplayQueryParameters: displayQP,
}, nil
}
// Create a Terraform Dashboard
func resourceDashboardCreate(d *schema.ResourceData, m interface{}) error {
dashboards := m.(*wavefrontClient).client.Dashboards()
dashboard, err := buildDashboard(d)
if err != nil {
return fmt.Errorf("failed to parse dashboard, %s", err)
}
err = dashboards.Create(dashboard)
if err != nil {
return fmt.Errorf("failed to create dashboard, %s", err)
}
d.SetId(dashboard.ID)
return resourceDashboardRead(d, m)
}
type Params []map[string]interface{}
func (p Params) Len() int { return len(p) }
func (p Params) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p Params) Less(i, j int) bool {
return sort.StringsAreSorted([]string{p[i]["name"].(string), p[j]["name"].(string)})
}
// Read a Wavefront Dashboard
func resourceDashboardRead(d *schema.ResourceData, m interface{}) error {
dashboards := m.(*wavefrontClient).client.Dashboards()
dash := wavefront.Dashboard{
ID: d.Id(),
}
// search for an dashboard with our id. We should receive 1 (Exact Match) or 0 (No Match)
err := dashboards.Get(&dash)
if err != nil {
if strings.Contains(err.Error(), "404") {
d.SetId("")
} else {
return fmt.Errorf("error finding Wavefront Dashboard %s. %s", d.Id(), err)
}
}
// Use the Wavefront url as the Terraform ID
d.SetId(dash.ID)
d.Set("name", dash.Name)
d.Set("description", dash.Description)
d.Set("url", dash.Url)
d.Set("display_section_table_of_contents", dash.DisplaySectionTableOfContents)
d.Set("display_query_parameters", dash.DisplayQueryParameters)
sections := []map[string]interface{}{}
for _, wavefrontSection := range dash.Sections {
sections = append(sections, buildTerraformSection(wavefrontSection))
}
d.Set("section", sections)
parameterDetails := []map[string]interface{}{}
for k, v := range dash.ParameterDetails {
parameterDetails = append(parameterDetails, buildTerraformParameterDetail(v, k))
}
sort.Sort(Params(parameterDetails))
d.Set("parameter_details", parameterDetails)
d.Set("tags", dash.Tags)
return nil
}
func resourceDashboardUpdate(d *schema.ResourceData, m interface{}) error {
dashboards := m.(*wavefrontClient).client.Dashboards()
a, err := buildDashboard(d)
if err != nil {
return fmt.Errorf("failed to parse dashboard, %s", err)
}
// Update the dashboard on Wavefront
err = dashboards.Update(a)
if err != nil {
return fmt.Errorf("error Updating Dashboard %s. %s", d.Get("name"), err)
}
return resourceDashboardRead(d, m)
}
func resourceDashboardDelete(d *schema.ResourceData, m interface{}) error {
dashboards := m.(*wavefrontClient).client.Dashboards()
dash := wavefront.Dashboard{
ID: d.Id(),
}
err := dashboards.Get(&dash)
if err != nil {
return fmt.Errorf("error finding Wavefront Dashboard %s. %s", d.Id(), err)
}
// Delete the Dashboard
err = dashboards.Delete(&dash)
if err != nil {
return fmt.Errorf("failed to delete Dashboard %s. %s", d.Id(), err)
}
d.SetId("")
return nil
} | wavefront/resource_dashboard.go | 0.569254 | 0.450722 | resource_dashboard.go | starcoder |
Offers functions to work with Hyper-Complex numbers modulo P.
*/
package hypercomplex
import "math/big"
import "bytes"
import "fmt"
import "io"
import "errors"
/*
Represents a Hyper-Complex number.
The number is represented as an array of integers (*big.Int), where the length
is a power of two. If a MultiComp contains only one element, it represents an
ordinary Number. Otherwise, it consists of two equal-sized halves, where the first
one represents the real part and the second one represents the imaginary part.
*/
type MultiComp []*big.Int
func (m MultiComp) BitLen() int {
i := 0
for _,mm := range m { i+= mm.BitLen() }
return i
}
func (m MultiComp) String() string {
sb := bytes.NewBuffer([]byte{'['})
for i,b := range m {
if i>0 { sb.WriteByte(',') }
fmt.Fprintf(sb,"%X",b)
}
sb.WriteByte(']')
return sb.String()
}
func (m MultiComp) Copy() MultiComp {
n := make(MultiComp,len(m))
for i,c := range m { n[i]=c }
return n
}
type Modulus struct{
Mod *big.Int
}
/*
Creates a MultiComp over the modulus group of the given length 'size'.
The argument 'size' must be a power of two.
This function chooses them deterministically. This function is meant to
be used with SHAKE-128, SHAKE-256 or similar XOF hash functions.
However, the regular "crypto/rand" rand.Reader can be used as well.
*/
func (m Modulus) Deterministic(source io.Reader, size int) (MultiComp,error){
if (size&(size-1))!=0 { return nil,errors.New("Must be power of two") }
r := make(MultiComp,size)
one := big.NewInt(1)
re := new(big.Int).Sub(m.Mod,one)
buf := make([]byte,len(re.Bytes()))
for i := range r {
_,e := io.ReadFull(source,buf)
if e!=nil { return nil,e }
coef := new(big.Int).SetBytes(buf)
coef.Mod(coef,re)
coef.Add(coef,one)
r[i] = coef
}
return r,nil
}
func (m Modulus) Add(a,b MultiComp) MultiComp {
c := make(MultiComp,len(a))
for i := range c {
c[i] = new(big.Int).Add(a[i],b[i])
c[i].Mod(c[i],m.Mod)
}
return c
}
func (m Modulus) Sub(a,b MultiComp) MultiComp {
c := make(MultiComp,len(a))
for i := range c {
c[i] = new(big.Int).Sub(a[i],b[i])
c[i].Mod(c[i],m.Mod)
}
return c
}
func (m Modulus) Multiply(a,b MultiComp) MultiComp {
// assert: len(a)==len(b)
L := len(a)/2
if L==0 {
r := new(big.Int).Mul(a[0],b[0])
r.Mod(r,m.Mod)
return MultiComp{r}
}
ar := a[:L]
ai := a[L:]
br := b[:L]
bi := b[L:]
/*
cr = ar*br - ai-bi
ci = ar*bi + ai*br
*/
cr := m.Sub( m.Multiply(ar,br), m.Multiply(ai,bi) )
ci := m.Add( m.Multiply(ar,bi), m.Multiply(ai,br) )
return append(cr,ci...)
}
func (m Modulus) Exp(g MultiComp, exp []byte) MultiComp {
v := make(MultiComp,len(g))
for i := range v{ v[i] = big.NewInt(0) }
v[0].SetUint64(1)
for _,k := range exp {
for j := 0; j<8; j++ {
v = m.Multiply(v,v)
if (k&0x80)==0x80 {
v = m.Multiply(v,g)
}
k <<= 1
}
}
return v
}
func (m Modulus) Neg(a MultiComp) MultiComp {
b := make(MultiComp,len(a))
for i,aa := range a {
n := new(big.Int).Neg(aa)
n.Mod(n,m.Mod)
b[i] = n
}
return b
}
func isZero(a MultiComp) bool {
zero := big.NewInt(0)
for _,c := range a {
if c.Cmp(zero)!=0 { return false }
}
return true
}
func zeroes(i int) MultiComp {
z := make(MultiComp,i)
for j := range z { z[j] = big.NewInt(0) }
return z
}
// For a given 'a = (r,i)' it returns '(r,-i mod P)'.
func (m Modulus) Counterpart(a MultiComp) MultiComp{
L := len(a)/2
if L==0 { return a }
ar := a[:L]
ai := a[L:]
return append(ar.Copy(),m.Neg(ai)...)
}
// Computes the modulo inverse of a.
func (m Modulus) Inverse(a MultiComp) MultiComp {
// assert: len(a)==len(b)
L := len(a)/2
if L==0 {
r := new(big.Int).ModInverse(a[0],m.Mod)
return MultiComp{r}
}
ar := a[:L]
ai := a[L:]
if isZero(ai) {
return append(m.Inverse(ar),ai...)
}
/*
Lemma: imaginary(a * counterpart(a)) = 0
Inv(a) = b*Inv(a*b)
Inv(a) = counterpart(a) * Inv(a*counterpart(a))
*/
cp := m.Counterpart(a)
prod := m.Multiply(a,cp)
prod = append(m.Inverse(prod[:L]),prod[L:]...)
prod = m.Multiply(prod,cp)
return prod
} | hyper.go | 0.606848 | 0.527682 | hyper.go | starcoder |
package paunch
import (
"math"
)
type physicsPoint struct {
x, y float64
}
type force struct {
magnitude physicsPoint
active bool
}
// Physics is an object meant to make the Movement of multiple related Movers,
// such as a Renderable and a Collision, easier. It also allows for easy
// management of multiple forces of Movement at once.
type Physics struct {
Movers []Mover
accel physicsPoint
maxAccel physicsPoint
minAccel physicsPoint
friction physicsPoint
usingMaxAccel map[Axis]bool
usingMinAccel map[Axis]bool
forces map[string]force
}
// NewPhysics creates a new Physics object.
func NewPhysics() *Physics {
physics := &Physics{}
physics.usingMaxAccel = make(map[Axis]bool)
physics.usingMinAccel = make(map[Axis]bool)
physics.forces = make(map[string]force)
return physics
}
// AddForce adds a constant force to the Physics object, which is taken
// into account every time the Calculate method is called. The force is
// disabled by default.
func (physics *Physics) AddForce(name string, forceX, forceY float64) {
physics.forces[name] = force{physicsPoint{forceX, forceY}, false}
}
// EnableForce makes the specified force active for future calls to the
// Calculate method.
func (physics *Physics) EnableForce(name string) {
if _, ok := physics.forces[name]; ok {
physics.forces[name] = force{physics.forces[name].magnitude, true}
}
}
// DisableForce makes the specified force inactive for future calls to the
// Calculate method.
func (physics *Physics) DisableForce(name string) {
if _, ok := physics.forces[name]; ok {
physics.forces[name] = force{physics.forces[name].magnitude, false}
}
}
// DeleteForce reMoves a constant force from the Physics object.
func (physics *Physics) DeleteForce(name string) {
delete(physics.forces, name)
}
// Move Moves all the members of the Physics object a specified distance.
func (physics *Physics) Move(x, y float64) {
for _, val := range physics.Movers {
val.Move(x, y)
}
}
// SetPosition sets the position of the Physics object relative to the Physics
// object's first Mover.
func (physics *Physics) SetPosition(x, y float64) {
if len(physics.Movers) == 0 {
return
}
xDisp, yDisp := physics.Movers[0].Position()
xDisp = x - xDisp
yDisp = y - yDisp
for _, val := range physics.Movers {
val.Move(xDisp, yDisp)
}
}
// Position returns the position of the Physics object's first Mover.
func (physics *Physics) Position() (x, y float64) {
if len(physics.Movers) == 0 {
return 0, 0
}
return physics.Movers[0].Position()
}
// Acceleration returns the X and Y acceleration of the Physics object.
func (physics *Physics) Acceleration() (float64, float64) {
return physics.accel.x, physics.accel.y
}
// Accelerate exerts a specified force upon the Physics object the next time
// the Calculate method is called.
func (physics *Physics) Accelerate(forceX, forceY float64) {
physics.accel.x += forceX
physics.accel.y += forceY
}
// SetAcceleration sets the acceleration of the Physics object on the specified
// axis.
func (physics *Physics) SetAcceleration(force float64, axis Axis) {
switch axis {
case X:
physics.accel.x = force
case Y:
physics.accel.y = force
}
}
// SetMaxAcceleration sets the maximum allowed acceleration of the Physics
// object on the specified axis. In situations where the object would normally
// go faster than the specified value, it will be set to the value instead.
func (physics *Physics) SetMaxAcceleration(force float64, axis Axis) {
switch axis {
case X:
physics.maxAccel.x = force
case Y:
physics.maxAccel.y = force
}
physics.usingMaxAccel[axis] = true
}
// SetMinAcceleration sets the minimum allowed acceleration of the Physics
// object on the specified axis. In situations where the object would normally
// go slower than the specified value, it will be set to the value instead.
func (physics *Physics) SetMinAcceleration(force float64, axis Axis) {
switch axis {
case X:
physics.minAccel.x = force
case Y:
physics.minAccel.y = force
}
physics.usingMinAccel[axis] = true
}
// SetFriction sets the friction value of the Physics object. Friction is a
// force that enfluences acceleration to Move toward zero. This might be used
// to simulate the natural slowdown of an object rubbing against a surface.
func (physics *Physics) SetFriction(forceX, forceY float64) {
physics.friction = physicsPoint{forceX, forceY}
}
// Calculate Moves the Physics object given any specified constant forces,
// calls to the Accelerate method, and any leftover acceleration. Then,
// friction is applied to the resulting acceleration value.
func (physics *Physics) Calculate() {
for _, val := range physics.forces {
if val.active {
physics.accel.x += val.magnitude.x
physics.accel.y += val.magnitude.y
}
}
if physics.accel.x > physics.maxAccel.x && physics.usingMaxAccel[X] {
physics.accel.x = physics.maxAccel.x
} else if physics.accel.x < physics.minAccel.x && physics.usingMinAccel[X] {
physics.accel.x = physics.minAccel.x
}
if physics.accel.y > physics.maxAccel.y && physics.usingMaxAccel[Y] {
physics.accel.y = physics.maxAccel.y
} else if physics.accel.y < physics.minAccel.y && physics.usingMinAccel[Y] {
physics.accel.y = physics.minAccel.y
}
for i := range physics.Movers {
physics.Movers[i].Move(physics.accel.x, physics.accel.y)
}
if math.Abs(physics.accel.x) >= math.Abs(physics.friction.x) {
if physics.accel.x > 0 {
physics.accel.x -= physics.friction.x
} else {
physics.accel.x += physics.friction.x
}
} else {
physics.accel.x = 0
}
if math.Abs(physics.accel.y) >= math.Abs(physics.friction.y) {
if physics.accel.y > 0 {
physics.accel.y -= physics.friction.y
} else {
physics.accel.y += physics.friction.y
}
} else {
physics.accel.y = 0
}
} | physics.go | 0.804098 | 0.714454 | physics.go | starcoder |
package ecdsa
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/sha256"
"fmt"
"math/big"
)
// p256Order returns the curve order for the secp256r1 curve
// NOTE: this is specific to the secp256r1/P256 curve,
// and not taken from the domain params for the key itself
// (which would be a more generic approach for all EC).
var p256Order = elliptic.P256().Params().N
// p256HalfOrder returns half the curve order
// a bit shift of 1 to the right (Rsh) is equivalent
// to division by 2, only faster.
var p256HalfOrder = new(big.Int).Rsh(p256Order, 1)
// IsSNormalized returns true for the integer sigS if sigS falls in
// lower half of the curve order
func IsSNormalized(sigS *big.Int) bool {
return sigS.Cmp(p256HalfOrder) != 1
}
// NormalizeS will invert the s value if not already in the lower half
// of curve order value
func NormalizeS(sigS *big.Int) *big.Int {
if IsSNormalized(sigS) {
return sigS
}
return new(big.Int).Sub(p256Order, sigS)
}
// signatureRaw will serialize signature to R || S.
// R, S are padded to 32 bytes respectively.
// code roughly copied from secp256k1_nocgo.go
func signatureRaw(r *big.Int, s *big.Int) []byte {
rBytes := r.Bytes()
sBytes := s.Bytes()
sigBytes := make([]byte, 64)
// 0 pad the byte arrays from the left if they aren't big enough.
copy(sigBytes[32-len(rBytes):32], rBytes)
copy(sigBytes[64-len(sBytes):64], sBytes)
return sigBytes
}
// GenPrivKey generates a new secp256r1 private key. It uses operating
// system randomness.
func GenPrivKey(curve elliptic.Curve) (PrivKey, error) {
key, err := ecdsa.GenerateKey(curve, rand.Reader)
if err != nil {
return PrivKey{}, err
}
return PrivKey{*key}, nil
}
type PrivKey struct {
ecdsa.PrivateKey
}
// PubKey returns ECDSA public key associated with this private key.
func (sk *PrivKey) PubKey() PubKey {
return PubKey{sk.PublicKey, nil}
}
// Bytes serialize the private key using big-endian.
func (sk *PrivKey) Bytes() []byte {
if sk == nil {
return nil
}
fieldSize := (sk.Curve.Params().BitSize + 7) / 8
bz := make([]byte, fieldSize)
sk.D.FillBytes(bz)
return bz
}
// Sign hashes and signs the message using ECDSA. Implements SDK
// PrivKey interface.
// NOTE: this now calls the ecdsa Sign function
// (not method!) directly as the s value of the signature is needed to
// low-s normalize the signature value
// See issue: https://github.com/cosmos/cosmos-sdk/issues/9723
// It then raw encodes the signature as two fixed width 32-byte values
// concatenated, reusing the code copied from secp256k1_nocgo.go
func (sk *PrivKey) Sign(msg []byte) ([]byte, error) {
digest := sha256.Sum256(msg)
r, s, err := ecdsa.Sign(rand.Reader, &sk.PrivateKey, digest[:])
if err != nil {
return nil, err
}
normS := NormalizeS(s)
return signatureRaw(r, normS), nil
}
// String returns a string representation of the public key based on the curveName.
func (sk *PrivKey) String(name string) string {
return name + "{-}"
}
// MarshalTo implements proto.Marshaler interface.
func (sk *PrivKey) MarshalTo(dAtA []byte) (int, error) {
bz := sk.Bytes()
copy(dAtA, bz)
return len(bz), nil
}
// Unmarshal implements proto.Marshaler interface.
func (sk *PrivKey) Unmarshal(bz []byte, curve elliptic.Curve, expectedSize int) error {
if len(bz) != expectedSize {
return fmt.Errorf("wrong ECDSA SK bytes, expecting %d bytes", expectedSize)
}
sk.Curve = curve
sk.D = new(big.Int).SetBytes(bz)
sk.X, sk.Y = curve.ScalarBaseMult(bz)
return nil
} | crypto/keys/internal/ecdsa/privkey.go | 0.853913 | 0.434521 | privkey.go | starcoder |
package stool
type ViewExplainer struct {
ViewIndexer ViewIndexer
}
type VariableCollection struct {
Variables map[string]int
}
type ParentCollection struct {
Parents map[string]ViewNode
}
type ChildrenCollection struct {
Children map[string]ViewNode
}
func (this *ViewExplainer) CollectParentsFrom(viewName string) map[string]bool {
parentCollection := this.makeParentCollection()
parentNameSet := make(map[string]bool)
nodes := this.ViewIndexer.IndexViews(this.ViewIndexer.RootDir)
this.collectParents(viewName, nodes, parentCollection)
for _, p := range parentCollection.Parents {
parentNameSet[p.Name] = true
}
delete(parentNameSet, viewName)
return parentNameSet
}
func (this *ViewExplainer) makeParentCollection() *ParentCollection {
return &ParentCollection{
Parents: make(map[string]ViewNode),
}
}
func (this *ViewExplainer) CollectChildrenFrom(viewName string) map[string]bool {
childrenCollection := this.getChildrenCollection()
childrenNameSet := make(map[string]bool)
nodes := this.ViewIndexer.IndexViews(this.ViewIndexer.RootDir)
this.collectChildren(viewName, nodes, childrenCollection)
for _, p := range childrenCollection.Children {
childrenNameSet[p.Name] = true
}
delete(childrenNameSet, viewName)
return childrenNameSet
}
func (this *ViewExplainer) getChildrenCollection() *ChildrenCollection {
return &ChildrenCollection{
Children: make(map[string]ViewNode),
}
}
func (this *ViewExplainer) collectParents(viewName string, nodes map[string]ViewNode, collection *ParentCollection) {
node, _ := nodes[viewName]
collection.Parents[viewName] = node
for _, parent := range node.Parents {
this.collectParents(parent, nodes, collection)
}
}
func (this *ViewExplainer) collectChildren(viewName string, nodes map[string]ViewNode, collection *ChildrenCollection) {
node, _ := nodes[viewName]
collection.Children[viewName] = node
for _, child := range node.Children {
this.collectChildren(child, nodes, collection)
}
}
func (this *ViewExplainer) CollectVariablesFromParents(viewName string) map[string]int {
collection := this.makeVariableCollection()
nodes := this.ViewIndexer.IndexViews(this.ViewIndexer.RootDir)
this.collectTreeVariables(viewName, nodes, collection)
return collection.Variables
}
func (this *ViewExplainer) CollectVariablesFromChildren(viewName string) map[string]int {
nodes := this.ViewIndexer.IndexViews(this.ViewIndexer.RootDir)
collection := this.makeVariableCollection()
this.collectTreeVariablesDesc(viewName, nodes, collection)
return collection.Variables
}
func (this *ViewExplainer) makeVariableCollection() *VariableCollection {
return &VariableCollection{
Variables: make(map[string]int),
}
}
func (this *ViewExplainer) collectTreeVariables(viewName string, nodes map[string]ViewNode, collection *VariableCollection) {
node, _ := nodes[viewName]
for variable := range node.Variables {
collection.Variables[variable]++
}
if len(node.Parents) > 0 {
for _, parent := range node.Parents {
this.collectTreeVariables(parent, nodes, collection)
}
}
}
func (this *ViewExplainer) collectTreeVariablesDesc(viewName string, nodes map[string]ViewNode, collection *VariableCollection) {
node, _ := nodes[viewName]
for variable := range node.Variables {
collection.Variables[variable]++
}
if len(node.Children) > 0 {
for _, child := range node.Children {
this.collectTreeVariablesDesc(child, nodes, collection)
}
}
} | stool/variable_collector.go | 0.593374 | 0.453746 | variable_collector.go | starcoder |
package config
/**
* Configuration for compression policy label resource.
*/
type Cmppolicylabel struct {
/**
* Name of the HTTP compression policy label. Must begin with a letter, number, or the underscore character (_). Additional characters allowed, after the first character, are the hyphen (-), period (.) pound sign (#), space ( ), at sign (@), equals (=), and colon (:). The name must be unique within the list of policy labels for compression policies. Can be renamed after the policy label is created.
The following requirement applies only to the Citrix ADC CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my cmp policylabel" or 'my cmp policylabel').
*/
Labelname string `json:"labelname,omitempty"`
/**
* Type of packets (request packets or response) against which to match the policies bound to this policy label.
*/
Type string `json:"type,omitempty"`
/**
* New name for the compression policy label. Must begin with an ASCII alphabetic or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters.
The following requirement applies only to the Citrix ADC CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my cmp policylabel" or 'my cmp policylabel').
*/
Newname string `json:"newname,omitempty"`
//------- Read only Parameter ---------;
Numpol string `json:"numpol,omitempty"`
Hits string `json:"hits,omitempty"`
Priority string `json:"priority,omitempty"`
Gotopriorityexpression string `json:"gotopriorityexpression,omitempty"`
Labeltype string `json:"labeltype,omitempty"`
Invokelabelname string `json:"invoke_labelname,omitempty"`
Flowtype string `json:"flowtype,omitempty"`
Description string `json:"description,omitempty"`
} | resource/config/cmppolicylabel.go | 0.769254 | 0.406685 | cmppolicylabel.go | starcoder |
package chipmunk
import (
"fmt"
goz "github.com/20tab/gozmo"
"github.com/vova616/chipmunk"
"github.com/vova616/chipmunk/vect"
)
var space *chipmunk.Space
var Gravity goz.Vector2 = goz.Vector2{0, -9.8}
func checkSpace() {
if space == nil {
space = chipmunk.NewSpace()
space.Gravity = vect.Vect{vect.Float(Gravity[0]), vect.Float(Gravity[1])}
}
}
// Rigid body.
type RigidBody struct {
body *chipmunk.Body
weight float32
initialized bool
}
func (rbody *RigidBody) Start(gameObject *goz.GameObject) {
checkSpace()
rbody.body = chipmunk.NewBody(vect.Float(rbody.weight), vect.Float(1))
space.AddBody(rbody.body)
}
func (rbody *RigidBody) Update(gameObject *goz.GameObject) {
if !rbody.initialized {
pos := gameObject.Position
rbody.body.SetPosition(vect.Vect{vect.Float(pos[0]), vect.Float(pos[1])})
rbody.body.SetAngle(vect.Float(gameObject.Rotation))
rbody.initialized = true
}
pos := rbody.body.Position()
gameObject.SetPosition(float32(pos.X), float32(pos.Y))
gameObject.Rotation = float32(rbody.body.Angle())
}
func (rbody *RigidBody) GetType() string {
return "RigidBody"
}
func (rbody *RigidBody) SetAttr(attr string, value interface{}) error {
switch attr {
case "velocityX":
x, _ := goz.CastFloat32(value)
oldV := rbody.body.Velocity()
rbody.body.SetVelocity(x, float32(oldV.Y))
}
return nil
}
func (rbody *RigidBody) GetAttr(attr string) (interface{}, error) {
switch attr {
case "velocityX":
return float32(rbody.body.Velocity().X), nil
case "velocityY":
return float32(rbody.body.Velocity().Y), nil
}
return nil, fmt.Errorf("%v attribute of %T not found", attr, rbody)
}
func NewRigidBody(weight float32) goz.Component {
body := RigidBody{weight: weight}
return &body
}
func initRigidBody(args []interface{}) goz.Component {
return NewRigidBody(1)
}
// Static body.
type StaticBody struct {
body *chipmunk.Body
initialized bool
}
func (sbody *StaticBody) Start(gameObject *goz.GameObject) {
checkSpace()
sbody.body = chipmunk.NewBodyStatic()
space.AddBody(sbody.body)
}
func (sbody *StaticBody) Update(gameObject *goz.GameObject) {
if !sbody.initialized {
pos := gameObject.Position
sbody.body.SetPosition(vect.Vect{vect.Float(pos[0]), vect.Float(pos[1])})
sbody.body.SetAngle(vect.Float(gameObject.Rotation))
sbody.initialized = true
}
pos := sbody.body.Position()
gameObject.SetPosition(float32(pos.X), float32(pos.Y))
gameObject.Rotation = float32(sbody.body.Angle())
}
func (sbody *StaticBody) GetType() string {
return "StaticBody"
}
func NewStaticBody() goz.Component {
body := StaticBody{}
return &body
}
func initStaticBody(args []interface{}) goz.Component {
return NewStaticBody()
}
// Circle shape.
type ShapeCircle struct {
shape *chipmunk.CircleShape
initialized bool
}
func (circle *ShapeCircle) Start(gameObject *goz.GameObject) {
component := gameObject.GetComponentByType("RigidBody")
if component != nil {
return
}
component = gameObject.GetComponentByType("StaticBody")
if component != nil {
return
}
fmt.Println("ShapeCircle requires a physic body")
}
func (circle *ShapeCircle) Update(gameObject *goz.GameObject) {
if circle.initialized {
return
}
circle.initialized = true
component := gameObject.GetComponentByType("RigidBody")
if component != nil {
rbody := component.(*RigidBody)
moment := circle.shape.Moment(float32(rbody.body.Mass()))
rbody.body.SetMoment(moment)
rbody.body.AddShape(circle.shape.Shape)
space.AddShape(circle.shape.Shape)
return
}
component = gameObject.GetComponentByType("StaticBody")
if component != nil {
sbody := component.(*StaticBody)
sbody.body.AddShape(circle.shape.Shape)
space.AddShape(circle.shape.Shape)
return
}
}
func (circle *ShapeCircle) SetAttr(attr string, value interface{}) error {
switch attr {
case "radius":
radius, _ := goz.CastFloat32(value)
circle.shape.Radius = vect.Float(radius)
circle.shape.Shape.Update()
}
return nil
}
func (circle *ShapeCircle) GetAttr(attr string) (interface{}, error) {
switch attr {
case "radius":
return float32(circle.shape.Radius), nil
}
return nil, fmt.Errorf("%v attribute of %T not found", attr, circle)
}
func NewShapeCircle() goz.Component {
circle := ShapeCircle{}
circle.shape = chipmunk.NewCircle(vect.Vector_Zero, 0).ShapeClass.(*chipmunk.CircleShape)
return &circle
}
// TODO: pass the radius as argument.
func initShapeCircle(args []interface{}) goz.Component {
return NewShapeCircle()
}
// Box shape.
type ShapeBox struct {
shape *chipmunk.BoxShape
initialized bool
}
func (box *ShapeBox) Start(gameObject *goz.GameObject) {
component := gameObject.GetComponentByType("RigidBody")
if component != nil {
return
}
component = gameObject.GetComponentByType("StaticBody")
if component != nil {
return
}
fmt.Println("ShapeBox requires a physic body")
}
func (box *ShapeBox) Update(gameObject *goz.GameObject) {
if box.initialized {
return
}
box.initialized = true
component := gameObject.GetComponentByType("RigidBody")
if component != nil {
rbody := component.(*RigidBody)
moment := box.shape.Moment(float32(rbody.body.Mass()))
rbody.body.SetMoment(moment)
rbody.body.AddShape(box.shape.Shape)
space.AddShape(box.shape.Shape)
return
}
component = gameObject.GetComponentByType("StaticBody")
if component != nil {
sbody := component.(*StaticBody)
sbody.body.AddShape(box.shape.Shape)
space.AddShape(box.shape.Shape)
return
}
}
func (box *ShapeBox) SetAttr(attr string, value interface{}) error {
switch attr {
case "width":
w, _ := goz.CastFloat32(value)
box.shape.Width = vect.Float(w)
box.shape.UpdatePoly()
case "height":
h, _ := goz.CastFloat32(value)
box.shape.Height = vect.Float(h)
box.shape.UpdatePoly()
}
return nil
}
func (box *ShapeBox) GetAttr(attr string) (interface{}, error) {
switch attr {
case "width":
return float32(box.shape.Width), nil
case "height":
return float32(box.shape.Height), nil
}
return nil, fmt.Errorf("%v attribute of %T not found", attr, box)
}
func NewShapeBox() goz.Component {
box := ShapeBox{}
box.shape = chipmunk.NewBox(vect.Vector_Zero, 0, 0).ShapeClass.(*chipmunk.BoxShape)
return &box
}
// TODO: pass width and height.
func initShapeBox(args []interface{}) goz.Component {
return NewShapeBox()
}
// updateWorld is called at each world update.
func updateWorld(scene *goz.Scene, deltaTime float32) {
if space == nil {
return
}
space.Step(vect.Float(deltaTime))
}
func init() {
goz.RegisterComponent("RigidBody", initRigidBody)
goz.RegisterComponent("StaticBody", initStaticBody)
goz.RegisterComponent("ShapeCircle", initShapeCircle)
goz.RegisterComponent("ShapeBox", initShapeBox)
goz.RegisterUpdater(updateWorld)
} | chipmunk/chipmunk.go | 0.575946 | 0.429968 | chipmunk.go | starcoder |
package merkle
import (
"bytes"
"encoding/base64"
"encoding/hex"
"fmt"
log "github.com/golang/glog"
)
// RootHashMismatchError indicates a unexpected root hash value.
type RootHashMismatchError struct {
ExpectedHash []byte
ActualHash []byte
}
func (r RootHashMismatchError) Error() string {
return fmt.Sprintf("root hash mismatch got: %v expected: %v", r.ActualHash, r.ExpectedHash)
}
// CompactMerkleTree is a compact Merkle tree representation.
// Uses log(n) nodes to represent the current on-disk tree.
type CompactMerkleTree struct {
hasher TreeHasher
root []byte
// the list of "dangling" left-hand nodes, NOTE: index 0 is the leaf, not the root.
nodes [][]byte
size int64
}
func isPerfectTree(x int64) bool {
return x != 0 && (x&(x-1) == 0)
}
func bitLen(x int64) int {
r := 0
for x > 0 {
r++
x >>= 1
}
return r
}
// GetNodeFunc is a function prototype which can look up particular nodes within a non-compact Merkle tree.
// Used by the CompactMerkleTree to populate itself with correct state when starting up with a non-empty tree.
type GetNodeFunc func(depth int, index int64) ([]byte, error)
// NewCompactMerkleTreeWithState creates a new CompactMerkleTree for the passed in |size|.
// This can fail if the nodes required to recreate the tree state cannot be fetched or the calculated
// root hash after population does not match the value we expect.
// |f| will be called a number of times with the co-ordinates of internal MerkleTree nodes whose hash values are
// required to initialize the internal state of the CompactMerkleTree. |expectedRoot| is the known-good tree root
// of the tree at |size|, and is used to verify the correct initial state of the CompactMerkleTree after initialisation.
func NewCompactMerkleTreeWithState(hasher TreeHasher, size int64, f GetNodeFunc, expectedRoot []byte) (*CompactMerkleTree, error) {
sizeBits := bitLen(size)
r := CompactMerkleTree{
hasher: hasher,
nodes: make([][]byte, sizeBits),
root: hasher.HashEmpty(),
size: size,
}
if isPerfectTree(size) {
log.V(1).Info("Is perfect tree.")
r.root = append(make([]byte, 0, len(expectedRoot)), expectedRoot...)
r.nodes[sizeBits-1] = r.root
} else {
// Pull in the nodes we need to repopulate our compact tree and verify the root
for depth := 0; depth < sizeBits; depth++ {
if size&1 == 1 {
index := size - 1
log.V(1).Infof("fetching d: %d i: %d, leaving size %d", depth, index, size)
h, err := f(depth, index)
if err != nil {
log.Warningf("Failed to fetch node depth %d index %d: %s", depth, index, err)
return nil, err
}
r.nodes[depth] = h
}
size >>= 1
}
r.recalculateRoot(func(depth int, index int64, hash []byte) error {
return nil
})
}
if !bytes.Equal(r.root, expectedRoot) {
log.Warningf("Corrupt state, expected root %s, got %s", hex.EncodeToString(expectedRoot[:]), hex.EncodeToString(r.root[:]))
return nil, RootHashMismatchError{ActualHash: r.root, ExpectedHash: expectedRoot}
}
log.V(1).Infof("Resuming at size %d, with root: %s", r.size, base64.StdEncoding.EncodeToString(r.root[:]))
return &r, nil
}
// NewCompactMerkleTree creates a new CompactMerkleTree with size zero. This always succeeds.
func NewCompactMerkleTree(hasher TreeHasher) *CompactMerkleTree {
r := CompactMerkleTree{
hasher: hasher,
root: hasher.HashEmpty(),
nodes: make([][]byte, 0),
size: 0,
}
return &r
}
// CurrentRoot returns the current root hash.
func (c CompactMerkleTree) CurrentRoot() []byte {
return c.root
}
// DumpNodes logs the internal state of the CompactMerkleTree, and is used for debugging.
func (c CompactMerkleTree) DumpNodes() {
log.Infof("Tree Nodes @ %d", c.size)
mask := int64(1)
numBits := bitLen(c.size)
for bit := 0; bit < numBits; bit++ {
if c.size&mask != 0 {
log.Infof("%d: %s", bit, base64.StdEncoding.EncodeToString(c.nodes[bit][:]))
} else {
log.Infof("%d: -", bit)
}
mask <<= 1
}
}
type setNodeFunc func(depth int, index int64, hash []byte) error
func (c *CompactMerkleTree) recalculateRoot(f setNodeFunc) error {
if c.size == 0 {
return nil
}
index := c.size
var newRoot []byte
first := true
mask := int64(1)
numBits := bitLen(c.size)
for bit := 0; bit < numBits; bit++ {
index >>= 1
if c.size&mask != 0 {
if first {
newRoot = c.nodes[bit]
first = false
} else {
newRoot = c.hasher.HashChildren(c.nodes[bit], newRoot)
if err := f(bit+1, index, newRoot); err != nil {
return err
}
}
}
mask <<= 1
}
c.root = newRoot
return nil
}
// AddLeaf calculates the leafhash of |data| and appends it to the tree.
// |f| is a callback which will be called multiple times with the full MerkleTree coordinates of nodes whose hash should be updated.
func (c *CompactMerkleTree) AddLeaf(data []byte, f setNodeFunc) (int64, []byte, error) {
h := c.hasher.HashLeaf(data)
seq, err := c.AddLeafHash(h, f)
if err != nil {
return 0, nil, err
}
return seq, h, err
}
// AddLeafHash adds the specified |leafHash| to the tree.
// |f| is a callback which will be called multiple times with the full MerkleTree coordinates of nodes whose hash should be updated.
func (c *CompactMerkleTree) AddLeafHash(leafHash []byte, f setNodeFunc) (int64, error) {
defer func() {
c.size++
// TODO(al): do this lazily
c.recalculateRoot(f)
}()
assignedSeq := c.size
index := assignedSeq
if err := f(0, index, leafHash); err != nil {
return 0, err
}
if c.size == 0 {
// new tree
c.nodes = append(c.nodes, leafHash)
return assignedSeq, nil
}
// Initialize our running hash value to the leaf hash
hash := leafHash
bit := 0
// Iterate over the bits in our tree size
for t := c.size; t > 0; t >>= 1 {
index >>= 1
if t&1 == 0 {
// Just store the running hash here; we're done.
c.nodes[bit] = hash
// Don't re-write the leaf hash node (we've done it above already)
if bit > 0 {
// Store the leaf hash node
if err := f(bit, index, hash); err != nil {
return 0, err
}
}
return assignedSeq, nil
}
// The bit is set so we have a node at that position in the nodes list so hash it with our running hash:
hash = c.hasher.HashChildren(c.nodes[bit], hash)
// Store the resulting parent hash.
if err := f(bit+1, index, hash); err != nil {
return 0, err
}
// Now, clear this position in the nodes list as the hash it formerly contained will be propagated upwards.
c.nodes[bit] = nil
// Figure out if we're done:
if bit+1 >= len(c.nodes) {
// If we're extending the node list then add a new entry with our
// running hash, and we're done.
c.nodes = append(c.nodes, hash)
return assignedSeq, nil
} else if t&0x02 == 0 {
// If the node above us is unused at this tree size, then store our
// running hash there, and we're done.
c.nodes[bit+1] = hash
return assignedSeq, nil
}
// Otherwise, go around again.
bit++
}
// We should never get here, because that'd mean we had a running hash which
// we've not stored somewhere.
return 0, fmt.Errorf("AddLeaf failed running hash not cleared: h: %v seq: %d", leafHash, assignedSeq)
}
// Size returns the current size of the tree, that is, the number of leaves ever added to the tree.
func (c CompactMerkleTree) Size() int64 {
return c.size
}
// Hashes returns a copy of the set of node hashes that comprise the compact representation of the tree.
func (c CompactMerkleTree) Hashes() [][]byte {
if isPerfectTree(c.size) {
return nil
}
n := make([][]byte, len(c.nodes))
copy(n, c.nodes)
return n
}
// Depth returns the number of levels in the tree.
func (c CompactMerkleTree) Depth() int {
if c.size == 0 {
return 0
}
return bitLen(c.size - 1)
} | merkle/compact_merkle_tree.go | 0.735452 | 0.541469 | compact_merkle_tree.go | starcoder |
package geom
import "math"
type geom0 struct {
layout Layout
stride int
flatCoords []float64
srid int
}
type geom1 struct {
geom0
}
type geom2 struct {
geom1
ends []int
}
type geom3 struct {
geom1
endss [][]int
}
type CoordConvert func(x, y float64) (float64, float64)
// Bounds returns the bounds of g.
func (g *geom0) Bounds() *Bounds {
return NewBounds(g.layout).extendFlatCoords(g.flatCoords, 0, len(g.flatCoords), g.stride)
}
// Coords returns all the coordinates in g, i.e. a single coordinate.
func (g *geom0) Coords() Coord {
return inflate0(g.flatCoords, 0, len(g.flatCoords), g.stride)
}
// Empty returns true if g contains no coordinates.
func (g *geom0) Empty() bool {
return len(g.flatCoords) == 0
}
// Ends returns the end indexes of sub-structures of g, i.e. an empty slice.
func (g *geom0) Ends() []int {
return nil
}
// Endss returns the end indexes of sub-sub-structures of g, i.e. an empty
// slice.
func (g *geom0) Endss() [][]int {
return nil
}
// FlatCoords returns the flat coordinates of g.
func (g *geom0) FlatCoords() []float64 {
return g.flatCoords
}
// Layout returns g's layout.
func (g *geom0) Layout() Layout {
return g.layout
}
// NumCoords returns the number of coordinates in g, i.e. 1.
func (g *geom0) NumCoords() int {
return 1
}
// Reserve reserves space in g for n coordinates.
func (g *geom0) Reserve(n int) {
if cap(g.flatCoords) < n*g.stride {
fcs := make([]float64, len(g.flatCoords), n*g.stride)
copy(fcs, g.flatCoords)
g.flatCoords = fcs
}
}
// SRID returns g's SRID.
func (g *geom0) SRID() int {
return g.srid
}
func (g *geom0) setCoords(coords0 []float64) error {
var err error
g.flatCoords, err = deflate0(nil, coords0, g.stride)
return err
}
// Stride returns g's stride.
func (g *geom0) Stride() int {
return g.stride
}
func (g *geom0) verify() error {
if g.stride != g.layout.Stride() {
return errStrideLayoutMismatch
}
if g.stride == 0 {
if len(g.flatCoords) != 0 {
return errNonEmptyFlatCoords
}
return nil
}
if len(g.flatCoords) != g.stride {
return errLengthStrideMismatch
}
return nil
}
// coords convert
func (g *geom0) Convert(coordConvert CoordConvert) {
for i := 0; i < len(g.flatCoords); i += g.stride {
g.flatCoords[i], g.flatCoords[i+1] = coordConvert(g.flatCoords[i], g.flatCoords[i+1])
}
}
// Coord returns the ith coord of g.
func (g *geom1) Coord(i int) Coord {
return g.flatCoords[i*g.stride : (i+1)*g.stride]
}
// Coords unpacks and returns all of g's coordinates.
func (g *geom1) Coords() []Coord {
return inflate1(g.flatCoords, 0, len(g.flatCoords), g.stride)
}
// NumCoords returns the number of coordinates in g.
func (g *geom1) NumCoords() int {
return len(g.flatCoords) / g.stride
}
// Reverse reverses the order of g's coordinates.
func (g *geom1) Reverse() {
reverse1(g.flatCoords, 0, len(g.flatCoords), g.stride)
}
func (g *geom1) setCoords(coords1 []Coord) error {
var err error
g.flatCoords, err = deflate1(nil, coords1, g.stride)
return err
}
func (g *geom1) verify() error {
if g.stride != g.layout.Stride() {
return errStrideLayoutMismatch
}
if g.stride == 0 {
if len(g.flatCoords) != 0 {
return errNonEmptyFlatCoords
}
} else {
if len(g.flatCoords)%g.stride != 0 {
return errLengthStrideMismatch
}
}
return nil
}
// Coords returns all of g's coordinates.
func (g *geom2) Coords() [][]Coord {
return inflate2(g.flatCoords, 0, g.ends, g.stride)
}
// Ends returns the end indexes of all sub-structures in g.
func (g *geom2) Ends() []int {
return g.ends
}
// Reverse reverses the order of coordinates for each sub-structure in g.
func (g *geom2) Reverse() {
reverse2(g.flatCoords, 0, g.ends, g.stride)
}
func (g *geom2) setCoords(coords2 [][]Coord) error {
var err error
g.flatCoords, g.ends, err = deflate2(nil, nil, coords2, g.stride)
return err
}
func (g *geom2) verify() error {
if g.stride != g.layout.Stride() {
return errStrideLayoutMismatch
}
if g.stride == 0 {
if len(g.flatCoords) != 0 {
return errNonEmptyFlatCoords
}
if len(g.ends) != 0 {
return errNonEmptyEnds
}
return nil
}
if len(g.flatCoords)%g.stride != 0 {
return errLengthStrideMismatch
}
offset := 0
for _, end := range g.ends {
if end%g.stride != 0 {
return errMisalignedEnd
}
if end < offset {
return errOutOfOrderEnd
}
offset = end
}
if offset != len(g.flatCoords) {
return errIncorrectEnd
}
return nil
}
// Coords returns all the coordinates in g.
func (g *geom3) Coords() [][][]Coord {
return inflate3(g.flatCoords, 0, g.endss, g.stride)
}
// Endss returns a list of all the sub-sub-structures in g.
func (g *geom3) Endss() [][]int {
return g.endss
}
// Reverse reverses the order of coordinates for each sub-sub-structure in g.
func (g *geom3) Reverse() {
reverse3(g.flatCoords, 0, g.endss, g.stride)
}
func (g *geom3) setCoords(coords3 [][][]Coord) error {
var err error
g.flatCoords, g.endss, err = deflate3(nil, nil, coords3, g.stride)
return err
}
func (g *geom3) verify() error {
if g.stride != g.layout.Stride() {
return errStrideLayoutMismatch
}
if g.stride == 0 {
if len(g.flatCoords) != 0 {
return errNonEmptyFlatCoords
}
if len(g.endss) != 0 {
return errNonEmptyEndss
}
return nil
}
if len(g.flatCoords)%g.stride != 0 {
return errLengthStrideMismatch
}
offset := 0
for _, ends := range g.endss {
for _, end := range ends {
if end%g.stride != 0 {
return errMisalignedEnd
}
if end < offset {
return errOutOfOrderEnd
}
offset = end
}
}
if offset != len(g.flatCoords) {
return errIncorrectEnd
}
return nil
}
func doubleArea1(flatCoords []float64, offset, end, stride int) float64 {
var doubleArea float64
for i := offset + stride; i < end; i += stride {
doubleArea += (flatCoords[i+1] - flatCoords[i+1-stride]) * (flatCoords[i] + flatCoords[i-stride])
}
return doubleArea
}
func doubleArea2(flatCoords []float64, offset int, ends []int, stride int) float64 {
var doubleArea float64
for i, end := range ends {
da := doubleArea1(flatCoords, offset, end, stride)
if i == 0 {
doubleArea = da
} else {
doubleArea -= da
}
offset = end
}
return doubleArea
}
func doubleArea3(flatCoords []float64, offset int, endss [][]int, stride int) float64 {
var doubleArea float64
for _, ends := range endss {
doubleArea += doubleArea2(flatCoords, offset, ends, stride)
offset = ends[len(ends)-1]
}
return doubleArea
}
func deflate0(flatCoords []float64, c Coord, stride int) ([]float64, error) {
if len(c) != stride {
return nil, ErrStrideMismatch{Got: len(c), Want: stride}
}
flatCoords = append(flatCoords, c...)
return flatCoords, nil
}
func deflate1(flatCoords []float64, coords1 []Coord, stride int) ([]float64, error) {
for _, c := range coords1 {
var err error
flatCoords, err = deflate0(flatCoords, c, stride)
if err != nil {
return nil, err
}
}
return flatCoords, nil
}
func deflate2(
flatCoords []float64, ends []int, coords2 [][]Coord, stride int,
) ([]float64, []int, error) {
for _, coords1 := range coords2 {
var err error
flatCoords, err = deflate1(flatCoords, coords1, stride)
if err != nil {
return nil, nil, err
}
ends = append(ends, len(flatCoords))
}
return flatCoords, ends, nil
}
func deflate3(
flatCoords []float64, endss [][]int, coords3 [][][]Coord, stride int,
) ([]float64, [][]int, error) {
for _, coords2 := range coords3 {
var err error
var ends []int
flatCoords, ends, err = deflate2(flatCoords, ends, coords2, stride)
if err != nil {
return nil, nil, err
}
endss = append(endss, ends)
}
return flatCoords, endss, nil
}
func inflate0(flatCoords []float64, offset, end, stride int) Coord {
if offset+stride != end {
panic("geom: stride mismatch")
}
c := make([]float64, stride)
copy(c, flatCoords[offset:end])
return c
}
func inflate1(flatCoords []float64, offset, end, stride int) []Coord {
coords1 := make([]Coord, (end-offset)/stride)
for i := range coords1 {
coords1[i] = inflate0(flatCoords, offset, offset+stride, stride)
offset += stride
}
return coords1
}
func inflate2(flatCoords []float64, offset int, ends []int, stride int) [][]Coord {
coords2 := make([][]Coord, len(ends))
for i := range coords2 {
end := ends[i]
coords2[i] = inflate1(flatCoords, offset, end, stride)
offset = end
}
return coords2
}
func inflate3(flatCoords []float64, offset int, endss [][]int, stride int) [][][]Coord {
coords3 := make([][][]Coord, len(endss))
for i := range coords3 {
ends := endss[i]
coords3[i] = inflate2(flatCoords, offset, ends, stride)
if len(ends) > 0 {
offset = ends[len(ends)-1]
}
}
return coords3
}
func length1(flatCoords []float64, offset, end, stride int) float64 {
var length float64
for i := offset + stride; i < end; i += stride {
dx := flatCoords[i] - flatCoords[i-stride]
dy := flatCoords[i+1] - flatCoords[i+1-stride]
length += math.Sqrt(dx*dx + dy*dy)
}
return length
}
func length2(flatCoords []float64, offset int, ends []int, stride int) float64 {
var length float64
for _, end := range ends {
length += length1(flatCoords, offset, end, stride)
offset = end
}
return length
}
func length3(flatCoords []float64, offset int, endss [][]int, stride int) float64 {
var length float64
for _, ends := range endss {
length += length2(flatCoords, offset, ends, stride)
offset = ends[len(ends)-1]
}
return length
}
func reverse1(flatCoords []float64, offset, end, stride int) {
for i, j := offset+stride, end; i <= j; i, j = i+stride, j-stride {
for k := 0; k < stride; k++ {
flatCoords[i-stride+k], flatCoords[j-stride+k] = flatCoords[j-stride+k], flatCoords[i-stride+k]
}
}
}
func reverse2(flatCoords []float64, offset int, ends []int, stride int) {
for _, end := range ends {
reverse1(flatCoords, offset, end, stride)
offset = end
}
}
func reverse3(flatCoords []float64, offset int, endss [][]int, stride int) {
for _, ends := range endss {
if len(ends) == 0 {
continue
}
reverse2(flatCoords, offset, ends, stride)
offset = ends[len(ends)-1]
}
} | flat.go | 0.849893 | 0.42185 | flat.go | starcoder |
package config
/**
* Configuration for SNMP mib resource.
*/
type Snmpmib struct {
/**
* Name of the administrator for this Citrix ADC. Along with the name, you can include information on how to contact this person, such as a phone number or an email address. Can consist of 1 to 127 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters.
The following requirement applies only to the Citrix ADC CLI:
If the information includes one or more spaces, enclose it in double or single quotation marks (for example, "my contact" or 'my contact').
*/
Contact string `json:"contact,omitempty"`
/**
* Name for this Citrix ADC. Can consist of 1 to 127 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters. You should choose a name that helps identify the Citrix ADC appliance.
The following requirement applies only to the Citrix ADC CLI:
If the name includes one or more spaces, enclose it in double or single quotation marks (for example, "my name" or 'my name').
*/
Name string `json:"name,omitempty"`
/**
* Physical location of the Citrix ADC. For example, you can specify building name, lab number, and rack number. Can consist of 1 to 127 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters.
The following requirement applies only to the Citrix ADC CLI:
If the location includes one or more spaces, enclose it in double or single quotation marks (for example, "my location" or 'my location').
*/
Location string `json:"location,omitempty"`
/**
* Custom identification number for the Citrix ADC. Can consist of 1 to 127 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters. You should choose a custom identification that helps identify the Citrix ADC appliance.
The following requirement applies only to the Citrix ADC CLI:
If the ID includes one or more spaces, enclose it in double or single quotation marks (for example, "my ID" or 'my ID').
*/
Customid string `json:"customid,omitempty"`
/**
* ID of the cluster node for which we are setting the mib. This is a mandatory argument to set snmp mib on CLIP.
*/
Ownernode int `json:"ownernode,omitempty"`
//------- Read only Parameter ---------;
Sysdesc string `json:"sysdesc,omitempty"`
Sysuptime string `json:"sysuptime,omitempty"`
Sysservices string `json:"sysservices,omitempty"`
Sysoid string `json:"sysoid,omitempty"`
} | resource/config/snmpmib.go | 0.658966 | 0.422981 | snmpmib.go | starcoder |
// Package audio interacts with audio operation.
package audio
import (
"context"
"fmt"
"math"
"regexp"
"strconv"
"time"
"chromiumos/tast/errors"
"chromiumos/tast/local/testexec"
"chromiumos/tast/testing"
)
// TestRawData is used to specify parameters of the audio test data, which should be raw, signed, and little-endian.
type TestRawData struct {
// Path specifies the file path of audio data.
Path string
// BitsPerSample specifies bits per data sample.
BitsPerSample int
// Channels specifies the channel count of audio data.
Channels int
// Rate specifies the sampling rate.
Rate int
// Frequencies specifies the frequency of each channel, whose length should be equal to Channels.
// This is only used in the sine tone generation of sox.
Frequencies []int
// Volume specifies the volume scale of sox, e.g. 0.5 to scale volume by half. -1.0 to invert.
// This is only used in the sine tone generation of sox.
Volume float32
// Duration specifies the duration of audio data in seconds.
Duration int
}
// ConvertRawToWav converts the audio raw file to wav file.
func ConvertRawToWav(ctx context.Context, rawFileName, wavFileName string, rate, channels int) error {
err := testexec.CommandContext(
ctx, "sox", "-b", "16",
"-r", strconv.Itoa(rate),
"-c", strconv.Itoa(channels),
"-e", "signed",
"-t", "raw",
rawFileName, wavFileName).Run(testexec.DumpLogOnError)
if err != nil {
return errors.Wrap(err, "sox failed")
}
return nil
}
// TrimFileFrom removes all samples before startTime from the file.
func TrimFileFrom(ctx context.Context, oldFileName, newFileName string, startTime time.Duration) error {
err := testexec.CommandContext(
ctx, "sox", oldFileName, newFileName, "trim",
strconv.FormatFloat(startTime.Seconds(), 'f', -1, 64)).Run(testexec.DumpLogOnError)
if err != nil {
return errors.Wrap(err, "sox failed")
}
return nil
}
// CheckRecordingQuality checks the recording file to see whether internal mic works normally.
// A qualified file must meet these requirements:
// 1. The RMS must be smaller than the threshold. If not, it may be the static noise inside.
// 2. The recorded samples can not be all zeros. It is impossible for a normal internal mic.
func CheckRecordingQuality(ctx context.Context, fileName string) error {
const threshold = -10.0 // dB
out, err := testexec.CommandContext(ctx, "sox", fileName, "-n", "stats").CombinedOutput(testexec.DumpLogOnError)
if err != nil {
return errors.Wrap(err, "sox failed")
}
re := regexp.MustCompile("RMS Pk dB +\\S+ +(\\S+) +(\\S+)")
rms := re.FindStringSubmatch(string(out))
if rms == nil {
testing.ContextLog(ctx, "sox stats: ", string(out))
return errors.New("could not find RMS info from the sox result")
}
rmsLeft, err := strconv.ParseFloat(rms[1], 32)
if err != nil {
return errors.Wrap(err, "atof failed")
}
rmsRight, err := strconv.ParseFloat(rms[2], 32)
if err != nil {
return errors.Wrap(err, "atof failed")
}
testing.ContextLogf(ctx, "Left channel RMS: %f dB", rmsLeft)
testing.ContextLogf(ctx, "Right channel RMS: %f dB", rmsRight)
if rmsLeft > threshold || rmsRight > threshold {
return errors.Errorf("the RMS (%f, %f) is too large", rmsLeft, rmsRight)
}
// If all samples are zeros, the rms is -inf.
if math.IsInf(rmsLeft, -1) || math.IsInf(rmsRight, -1) {
return errors.New("the samples are all zeros")
}
return nil
}
// GetRmsAmplitude gets signal RMS of testData by sox.
func GetRmsAmplitude(ctx context.Context, testData TestRawData) (float64, error) {
cmd := testexec.CommandContext(
ctx, "sox",
"-b", strconv.Itoa(testData.BitsPerSample),
"-c", strconv.Itoa(testData.Channels),
"-r", strconv.Itoa(testData.Rate),
"-e", "signed",
"-t", "raw",
testData.Path, "-n", "stat")
_, bstderr, err := cmd.SeparatedOutput()
if err != nil {
return 0.0, errors.Wrap(err, "sox failed")
}
stderr := string(bstderr)
re := regexp.MustCompile("RMS\\s+amplitude:\\s+(\\S+)")
match := re.FindStringSubmatch(stderr)
if match == nil {
testing.ContextLog(ctx, "sox stat: ", stderr)
return 0.0, errors.New("could not find RMS info from the sox result")
}
rms, err := strconv.ParseFloat(match[1], 64)
if err != nil {
return 0.0, errors.Wrap(err, "atof failed")
}
return rms, nil
}
// GenerateTestRawData generates sine raw data by sox with specified parameters in testData, and stores in testData.Path.
func GenerateTestRawData(ctx context.Context, testData TestRawData) error {
if len(testData.Frequencies) != testData.Channels {
return errors.Errorf("unexpected length of frequencies: got %d; want %d", len(testData.Frequencies), testData.Channels)
}
args := []string{
"-n",
"-b", strconv.Itoa(testData.BitsPerSample),
"-c", strconv.Itoa(testData.Channels),
"-r", strconv.Itoa(testData.Rate),
"-e", "signed",
"-t", "raw",
testData.Path,
"synth", strconv.Itoa(testData.Duration),
}
for _, f := range testData.Frequencies {
args = append(args, "sine", strconv.Itoa(f))
}
args = append(args, "vol", fmt.Sprintf("%f", testData.Volume))
if err := testexec.CommandContext(ctx, "sox", args...).Run(testexec.DumpLogOnError); err != nil {
return errors.Wrap(err, "sox failed")
}
return nil
} | src/chromiumos/tast/local/audio/util.go | 0.72662 | 0.442335 | util.go | starcoder |
package copycat
import (
"reflect"
)
// DeepCopy recursively copies data from src to dst.
func DeepCopy(dst interface{}, src interface{}) error {
args := deepCopyArgs{
d: reflect.ValueOf(dst),
s: reflect.ValueOf(src),
visited: &map[visitedAddr]reflect.Value{},
}
return deepCopy(&args)
}
func deepCopy(args *deepCopyArgs) error {
args.resolve()
d := args.d
s := args.s
if !canCopy(d, s) {
return nil
}
if s.CanAddr() {
addr := visitedAddr{
a: s.UnsafeAddr(),
t: s.Type(),
}
if value, ok := (*args.visited)[addr]; ok {
d.Set(value)
return nil
}
args.recordVisited(addr)
}
switch k := d.Kind(); k {
case reflect.String:
d.SetString(s.String())
case reflect.Bool:
d.SetBool(s.Bool())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
d.SetInt(s.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
d.SetUint(s.Uint())
case reflect.Float32, reflect.Float64:
d.SetFloat(s.Float())
case reflect.Complex64, reflect.Complex128:
d.SetComplex(s.Complex())
case reflect.Struct:
return structHandler(args)
case reflect.Map:
return mapHandler(args)
case reflect.Array:
return arrayHandler(args)
case reflect.Slice:
return sliceHandler(args)
case reflect.Chan, reflect.Func, reflect.Uintptr, reflect.UnsafePointer, reflect.Interface:
return nil
}
return nil
}
func structHandler(args *deepCopyArgs) error {
d := args.d
s := args.s
t := d.Type()
for i, num := 0, t.NumField(); i < num; i++ {
f := t.Field(i)
nextArgs := args.next()
nextArgs.d = d.Field(i)
nextArgs.s = s.FieldByName(f.Name)
if err := deepCopy(nextArgs); err != nil {
return err
}
}
return nil
}
func mapHandler(args *deepCopyArgs) error {
d := args.d
s := args.s
t := d.Type()
newMap := reflect.MakeMap(t)
d.Set(newMap)
for iter := s.MapRange(); iter.Next(); {
nextArgs := args.next()
nextArgs.d = reflect.New(t.Elem()).Elem()
nextArgs.s = iter.Value()
if err := deepCopy(nextArgs); err != nil {
return err
}
d.SetMapIndex(iter.Key(), nextArgs.d)
}
return nil
}
func sliceHandler(args *deepCopyArgs) error {
d := args.d
s := args.s
t := d.Type()
len := s.Len()
arr := reflect.MakeSlice(t, len, s.Cap())
d.Set(arr)
dk := d.Type().Elem().Kind()
sk := s.Type().Elem().Kind()
if dk == reflect.Uint8 && sk == reflect.Uint8 {
d.SetBytes(s.Bytes())
return nil
}
for i := 0; i < len; i++ {
nextArgs := args.next()
nextArgs.d = d.Index(i)
nextArgs.s = s.Index(i)
if err := deepCopy(nextArgs); err != nil {
return err
}
}
return nil
}
func arrayHandler(args *deepCopyArgs) error {
d := args.d
s := args.s
len := d.Len()
if len > s.Len() {
len = s.Len()
}
for i := 0; i < len; i++ {
nextArgs := args.next()
nextArgs.d = d.Index(i)
nextArgs.s = s.Index(i)
if err := deepCopy(nextArgs); err != nil {
return err
}
}
return nil
} | deepcopy.go | 0.525856 | 0.404507 | deepcopy.go | starcoder |
package points
import (
"fmt"
"math"
"github.com/mukhinaks/fops/generic"
)
type Location struct {
X float64
Y float64
}
func EuclidianDistance(location1 generic.Point, location2 generic.Point) float64 {
switch v := location1.(type) {
case Location:
loc1 := location1.(Location)
loc2 := location2.(Location)
return distanceToPoint(loc1.X, loc1.Y, loc2.X, loc2.Y)
case BaseLocation:
loc1 := location1.(BaseLocation)
loc2 := location2.(BaseLocation)
return distanceToPoint(loc1.X, loc1.Y, loc2.X, loc2.Y)
case CityBrandLocation:
loc1 := location1.(CityBrandLocation)
loc2 := location2.(CityBrandLocation)
return distanceToPoint(loc1.X, loc1.Y, loc2.X, loc2.Y)
default:
fmt.Println("Unexpected location type", v)
return -1
}
}
func EuclidianDistanceToLineSegment(startLocation generic.Point, endLocation generic.Point, newLocation generic.Point) float64 {
switch v := startLocation.(type) {
case Location:
start := startLocation.(Location)
end := endLocation.(Location)
location := newLocation.(Location)
return distanceToLine(start.X, start.Y, end.X, end.Y, location.X, location.Y)
case BaseLocation:
start := startLocation.(BaseLocation)
end := endLocation.(BaseLocation)
location := newLocation.(BaseLocation)
return distanceToLine(start.X, start.Y, end.X, end.Y, location.X, location.Y)
case CityBrandLocation:
start := startLocation.(CityBrandLocation)
end := endLocation.(CityBrandLocation)
location := newLocation.(CityBrandLocation)
return distanceToLine(start.X, start.Y, end.X, end.Y, location.X, location.Y)
default:
fmt.Println("Unexpected location type", v)
return -1
}
}
func WalkingTime(location1 generic.Point, location2 generic.Point) int {
switch v := location1.(type) {
case Location:
loc1 := location1.(Location)
loc2 := location2.(Location)
return int(distanceToPoint(loc1.X, loc1.Y, loc2.X, loc2.Y) / 66.7)
case BaseLocation:
loc1 := location1.(BaseLocation)
loc2 := location2.(BaseLocation)
return int(distanceToPoint(loc1.X, loc1.Y, loc2.X, loc2.Y) / 66.7)
case CityBrandLocation:
loc1 := location1.(CityBrandLocation)
loc2 := location2.(CityBrandLocation)
return int(distanceToPoint(loc1.X, loc1.Y, loc2.X, loc2.Y) / 66.7)
default:
fmt.Println("Unexpected location type", v)
return -1
}
}
func distanceToPoint(loc1Lat float64, loc1Lng float64, loc2Lat float64, loc2Lng float64) float64 {
result := math.Sqrt(math.Pow((loc1Lat-loc2Lat), 2) + math.Pow((loc1Lng-loc2Lng), 2))
return result
}
func convertToRadians(angle float64) float64 {
rad := angle * 180 / math.Pi
return rad
}
func HaversineDistance(loc1Lat float64, loc1Lng float64, loc2Lat float64, loc2Lng float64) float64 {
R := 6371 * 1000.0 // Earth radius in meters
phi1 := convertToRadians(loc1Lat)
phi2 := convertToRadians(loc2Lat)
deltaPhi := convertToRadians(loc2Lat - loc1Lat)
deltaLambda := convertToRadians(loc2Lng - loc1Lng)
a := math.Pow(math.Sin(deltaPhi/2), 2) + math.Cos(phi1)*math.Cos(phi2)*math.Pow(math.Sin(deltaLambda/2), 2)
c := 2 * math.Atan2(math.Sqrt(a), math.Sqrt(1-a))
distance := R * c
return distance
}
func scalarDot(loc1Lat float64, loc1Lng float64, loc2Lat float64, loc2Lng float64) float64 {
result := (loc1Lat * loc2Lat) + (loc1Lng * loc2Lng)
return result
}
func distanceToLine(loc1Lat float64, loc1Lng float64, loc2Lat float64, loc2Lng float64, newLat float64, newLng float64) float64 {
vector1Lat := loc1Lat - loc2Lat
vector1Lng := loc1Lng - loc2Lng
vector2Lat := loc1Lat - newLat
vector2Lng := loc1Lng - newLng
scalarProduct := scalarDot(vector1Lat, vector1Lng, vector2Lat, vector2Lng)
if scalarProduct <= 0 {
return distanceToPoint(newLat, newLng, loc1Lat, loc1Lng)
}
length := scalarDot(vector1Lat, vector1Lng, vector1Lat, vector1Lng)
if length <= scalarProduct {
return distanceToPoint(newLat, newLng, loc2Lat, loc2Lng)
}
b := scalarProduct / length
locLat := loc1Lat + b*vector1Lat
locLng := loc1Lng + b*vector1Lng
return distanceToPoint(newLat, newLng, locLat, locLng)
} | points/distanceFunctions.go | 0.783285 | 0.485905 | distanceFunctions.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.