code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package sqlcon
// HelpMessage returns a string explaining how to set up SQL using environment variables.
func HelpMessage() string {
return `- DATABASE_URL: A URL to a persistent backend. Various backends are supported:
- Changes are lost on process death (ephemeral storage):
- Memory: If DATABASE_URL is "memory", data will be written to memory and is lost when you restart this instance.
Example: DATABASE_URL=memory
- Changes are kept after process death (persistent storage):
- SQL Databases: Officially, PostgreSQL and MySQL are supported. This project works best with PostgreSQL.
- PostgreSQL: If DATABASE_URL is a DSN starting with postgres://, PostgreSQL will be used as storage backend.
Example: DATABASE_URL=postgres://user:password@host:123/database
Additionally, the following query/DSN parameters are supported:
* sslmode (string): Whether or not to use SSL (default is require)
* disable - No SSL
* require - Always SSL (skip verification)
* verify-ca - Always SSL (verify that the certificate presented by the
server was signed by a trusted CA)
* verify-full - Always SSL (verify that the certification presented by
the server was signed by a trusted CA and the server host name
matches the one in the certificate)
* fallback_application_name (string): An application_name to fall back to if one isn't provided.
* connect_timeout (number): Maximum wait for connection, in seconds. Zero or
not specified means wait indefinitely.
* sslcert (string): Cert file location. The file must contain PEM encoded data.
* sslkey (string): Key file location. The file must contain PEM encoded data.
* sslrootcert (string): The location of the root certificate file. The file
must contain PEM encoded data.
Example: DATABASE_URL=postgres://user:password@host:123/database?sslmode=verify-full
- MySQL: If DATABASE_URL is a DSN starting with mysql:// MySQL will be used as storage backend.
Be aware that the ?parseTime=true parameter is mandatory, or timestamps will not work.
Example: DATABASE_URL=mysql://user:password@tcp(host:123)/database?parseTime=true
Additionally, the following query/DSN parameters are supported:
* collation (string): Sets the collation used for client-server interaction on connection. In contrast to charset,
collation does not issue additional queries. If the specified collation is unavailable on the target server,
the connection will fail.
* loc (string): Sets the location for time.Time values. Note that this sets the location for time.Time values
but does not change MySQL's time_zone setting. For that set the time_zone DSN parameter. Please keep in mind,
that param values must be url.QueryEscape'ed. Alternatively you can manually replace the / with %2F.
For example US/Pacific would be loc=US%2FPacific.
* maxAllowedPacket (number): Max packet size allowed in bytes. The default value is 4 MiB and should be
adjusted to match the server settings. maxAllowedPacket=0 can be used to automatically fetch the max_allowed_packet variable from server on every connection.
* readTimeout (duration): I/O read timeout. The value must be a decimal number with a unit suffix
("ms", "s", "m", "h"), such as "30s", "0.5m" or "1m30s".
* timeout (duration): Timeout for establishing connections, aka dial timeout. The value must be a decimal number with a unit suffix
("ms", "s", "m", "h"), such as "30s", "0.5m" or "1m30s".
* tls (bool / string): tls=true enables TLS / SSL encrypted connection to the server. Use skip-verify if
you want to use a self-signed or invalid certificate (server side).
* writeTimeout (duration): I/O write timeout. The value must be a decimal number with a unit suffix
("ms", "s", "m", "h"), such as "30s", "0.5m" or "1m30s".
Example: DATABASE_URL=mysql://user:password@tcp(host:123)/database?parseTime=true&writeTimeout=123s
The following settings can be configured using URL query parameters (postgres://.../database?max_conns=1):
* max_conns (number): Sets the maximum number of open connections to the database. Defaults to the number of CPU cores times 2.
* max_idle_conns (number): Sets the maximum number of connections in the idle. Defaults to the number of CPU cores.
* max_conn_lifetime (duratino): Sets the maximum amount of time ("ms", "s", "m", "h") a connection may be reused.
Defaults to 0s (disabled).`
} | sqlcon/message.go | 0.761804 | 0.42173 | message.go | starcoder |
package schema
// SettingsSchemaJSON is the content of the file "settings.schema.json".
const SettingsSchemaJSON = `{
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": "settings.schema.json#",
"title": "Settings",
"description": "Configuration settings for users and organizations on Sourcegraph.",
"type": "object",
"properties": {
"search.savedQueries": {
"description": "Saved search queries",
"type": "array",
"items": {
"type": "object",
"properties": {
"key": {
"type": "string",
"description": "Unique key for this query in this file"
},
"description": {
"type": "string",
"description": "Description of this saved query"
},
"query": {
"type": "string",
"description": "Query string"
},
"showOnHomepage": {
"type": "boolean",
"description": "Show this saved query on the homepage"
},
"notify": {
"type": "boolean",
"description": "Notify the owner of this configuration file when new results are available"
},
"notifySlack": {
"type": "boolean",
"description": "Notify Slack via the organization's Slack webhook URL when new results are available"
}
},
"additionalProperties": false,
"required": ["key", "description", "query"]
}
},
"search.scopes": {
"description": "Predefined search scopes",
"type": "array",
"items": {
"$ref": "#/definitions/SearchScope"
}
},
"search.repositoryGroups": {
"description": "Named groups of repositories that can be referenced in a search query using the repogroup: operator.",
"type": "object",
"additionalProperties": {
"type": "array",
"items": { "type": "string" }
}
},
"search.contextLines": {
"description": "The default number of lines to show as context below and above search results. Default is 1.",
"type": "integer",
"minimum": 0,
"default": 1
},
"notifications.slack": {
"$ref": "#/definitions/SlackNotificationsConfig"
},
"motd": {
"description": "An array (often with just one element) of messages to display at the top of all pages, including for unauthenticated users. Users may dismiss a message (and any message with the same string value will remain dismissed for the user).\n\nMarkdown formatting is supported.\n\nUsually this setting is used in global and organization settings. If set in user settings, the message will only be displayed to that user. (This is useful for testing the correctness of the message's Markdown formatting.)\n\nMOTD stands for \"message of the day\" (which is the conventional Unix name for this type of message).",
"type": "array",
"items": { "type": "string" }
},
"extensions": {
"description": "The Sourcegraph extensions to use. Enable an extension by adding a property ` + "`" + `\"my/extension\": true` + "`" + ` (where ` + "`" + `my/extension` + "`" + ` is the extension ID). Override a previously enabled extension and disable it by setting its value to ` + "`" + `false` + "`" + `.",
"type": "object",
"propertyNames": {
"type": "string",
"description": "A valid extension ID.",
"pattern": "^([^/]+/)?[^/]+/[^/]+$"
},
"additionalProperties": {
"type": "boolean",
"description": "` + "`" + `true` + "`" + ` to enable the extension, ` + "`" + `false` + "`" + ` to disable the extension (if it was previously enabled)"
}
}
},
"definitions": {
"SearchScope": {
"type": "object",
"additionalProperties": false,
"required": ["name", "value"],
"properties": {
"id": {
"type": "string",
"description": "A unique identifier for the search scope.\n\nIf set, a scoped search page is available at https://[sourcegraph-hostname]/search/scope/ID, where ID is this value."
},
"name": {
"type": "string",
"description": "The human-readable name for this search scope"
},
"value": {
"type": "string",
"description": "The query string of this search scope"
},
"description": {
"type": "string",
"description": "A description for this search scope"
}
}
},
"SlackNotificationsConfig": {
"type": "object",
"description": "Configuration for sending notifications to Slack.",
"additionalProperties": false,
"required": ["webhookURL"],
"properties": {
"webhookURL": {
"type": "string",
"description": "The Slack webhook URL used to post notification messages to a Slack channel. To obtain this URL, go to: https://YOUR-WORKSPACE-NAME.slack.com/apps/new/A0F7XDUAZ-incoming-webhooks",
"format": "uri"
}
}
}
}
}
` | schema/settings_stringdata.go | 0.723993 | 0.444263 | settings_stringdata.go | starcoder |
// Package ring gives a simple implementation of a ring/b.bufular buffer.
package ring
import (
"fmt"
"io"
"runtime"
"sync"
)
// Buffer is a simple implementation of a ring/b.bufular buffer.
// See https://en.wikipedia.org/wiki/b.bufular_buffer for more details.
type Buffer struct {
sync.Mutex
buf []byte
// Picture [read, write] as a sliding window. These values grow without
// bounds, read not being allowed to exceed write, but are handled
// modularly during |buf| i/o considerations.
read int
write int
}
// NewBuffer returns a ring buffer of a given size.
func NewBuffer(size int) *Buffer {
if size <= 0 {
panic(fmt.Sprintf("size was %d; must be positive", size))
}
return &Buffer{
buf: make([]byte, size),
}
}
// Read reads from the buffer, returning io.EOF if it has read up until where
// it has written.
func (b *Buffer) Read(p []byte) (int, error) {
b.Lock()
defer b.Unlock()
maxBytes := min(len(p), b.write-b.read)
b.copyToBuffer(p[:maxBytes], b.read)
b.read += maxBytes
if maxBytes == 0 {
return 0, io.EOF
} else {
return maxBytes, nil
}
}
// Write writes to the buffer, b.bufularly overwriting data if p exceeds the
// size of the buffer.
func (b *Buffer) Write(p []byte) (int, error) {
total := len(p)
for {
if len(p) == 0 {
break
}
// Before we overwrite data, preempt the current goroutine to allow all
// other current ones - which might be waiting to read - execute first so
// as to minimize data loss.
runtime.Gosched()
b.Lock()
// We don't want b.write to get more then len(b.buf) ahead of b.read; we
// read as much as possible taking that into account.
maxBytes := min(len(p), len(b.buf)-(b.write-b.read))
// If b.write and b.read are maximally far apart, we can overwrite
// len(p) or len(b.buf) many bytes.
if maxBytes == 0 {
maxBytes = min(len(p), len(b.buf))
b.read += maxBytes
}
b.copyFromBuffer(p[:maxBytes], b.write)
b.write += maxBytes
p = p[maxBytes:]
b.Unlock()
}
return total, nil
}
// Bytes returns the number of unread bytes in the buffer.
func (b Buffer) Bytes() []byte {
b.Lock()
defer b.Unlock()
p := make([]byte, b.write-b.read)
b.copyToBuffer(p, b.read)
return p
}
func (b *Buffer) copyToBuffer(p []byte, start int) {
N := len(b.buf)
P := len(p)
// Assume P <= N.
if P > N {
panic("copyToBuffer: expects len(p) <= size of Buffer")
}
start = start % N
if start+P <= N {
copy(p, b.buf[start:P+start])
} else {
copy(p[:N-start], b.buf[start:])
copy(p[N-start:], b.buf[:P-(N-start)])
}
}
func (b *Buffer) copyFromBuffer(p []byte, start int) {
N := len(b.buf)
P := len(p)
// Assume P <= N.
if P > N {
panic("copyFromBuffer: expects len(p) <= size of Buffer")
}
start = start % N
if start+P <= N {
copy(b.buf[start:start+P], p)
} else {
copy(b.buf[start:], p[:N-start])
copy(b.buf[:P-(N-start)], p[N-start:])
}
}
func min(n, m int) int {
if n <= m {
return int(n)
}
return m
} | tools/lib/ring/buffer.go | 0.754282 | 0.473109 | buffer.go | starcoder |
package main
//Using math and sort
import (
"fmt"
"math"
"sort"
)
// Operation is the Binary Operation for func Accumulate()
type Operation func(float64, float64) float64
// Accumulate is the member function of []float64
func Accumulate(data []float64, initValue float64, f Operation) float64 {
res := initValue
for _, v := range data {
res = f(res, v)
}
return res
}
// ArithmeticAverage returns the arithmetic average of data by Accumulate
func ArithmeticAverage(data []float64) float64 {
return Accumulate(data, 0, func(a float64, b float64) float64 {
return a + b
}) / float64(len(data))
}
// GeometricMean returns the geometric average of data by Accumulate
func GeometricMean(data []float64) float64 {
return math.Pow(Accumulate(data, 1, func(a float64, b float64) float64 {
return a * b
}), 1.0/float64(len(data)))
}
// HarmonicMean returns the harmonic average of data by Accumulate
func HarmonicMean(data []float64) float64 {
return float64(len(data)) / Accumulate(data, 0, func(a float64, b float64) float64 {
return a + 1.0/b
})
}
// Reduce against mean with an exponent by Accumulate
func Reduce(data []float64, mean float64, exponent float64) float64 {
return Accumulate(data, 0, func(a float64, b float64) float64 {
return a + math.Pow(b-mean, exponent)
})
}
// Variance returns unbiased variance of data by Reduce()
func Variance(data []float64, mean float64) float64 {
return Reduce(data, mean, 2) / (float64(len(data)) - 1)
}
// Skewness returns skewness of data by Reduce()
func Skewness(data []float64, mean float64, variance float64) float64 {
n := float64(len(data))
return n * Reduce(data, mean, 3) / math.Pow(variance, 1.5) / (n - 1) / (n - 2)
}
// Kurtosis returns kurtosis of data by Reduce()
func Kurtosis(data []float64, mean float64, variance float64) float64 {
n := float64(len(data))
nm1 := n - 1
nm2nm3 := (n - 2) * (n - 3)
return n*(n+1)*Reduce(data, mean, 4)/nm1/nm2nm3/math.Pow(variance, 2) - 3.0*nm1*nm1/nm2nm3
}
// Median returns the median inside data
func Median(data []float64) float64 {
size := len(data)
sorted := data
sort.Float64s(sorted)
if size%2 == 0 {
return (sorted[size/2-1] + sorted[size/2]) / 2.0
}
return sorted[size/2]
}
// Code to demonstrate
func main() {
epilson := 1.0e-05
vec := []float64{1.2, 2.3, 3.4, 4.5, 5.6, 6.7, 7.8, 8.9, 9.0}
arithmeticMean := ArithmeticAverage(vec)
fmt.Println(math.Abs(arithmeticMean-5.48889) < epilson)
fmt.Println(math.Abs(GeometricMean(vec)-4.636355) < epilson)
fmt.Println(math.Abs(HarmonicMean(vec)-3.652661) < epilson)
fmt.Println(math.Abs(Median(vec)-5.599999) < epilson)
variance := Variance(vec, arithmeticMean)
fmt.Println(math.Abs(variance-8.086111) < epilson)
fmt.Println(math.Abs(Skewness(vec, arithmeticMean, variance)-(-0.169877)) < epilson)
fmt.Println(math.Abs(Kurtosis(vec, arithmeticMean, variance)-(-1.37685)) < epilson)
} | math/moments/golang/moments.go | 0.818918 | 0.53048 | moments.go | starcoder |
package series
import (
"fmt"
"math"
"strconv"
)
type floatElement struct {
e float64
nan bool
}
func (e *floatElement) Set(value interface{}) {
e.nan = false
switch value.(type) {
case string:
if value.(string) == "NaN" {
e.nan = true
return
}
f, err := strconv.ParseFloat(value.(string), 64)
if err != nil {
e.nan = true
return
}
e.e = f
case int:
e.e = float64(value.(int))
case float64:
e.e = float64(value.(float64))
case bool:
b := value.(bool)
if b {
e.e = 1
} else {
e.e = 0
}
case Element:
e.e = value.(Element).Float()
default:
e.nan = true
return
}
return
}
func (e floatElement) Copy() Element {
if e.IsNA() {
return &floatElement{0.0, true}
}
return &floatElement{e.e, false}
}
func (e floatElement) IsNA() bool {
if e.nan || math.IsNaN(e.e) {
return true
}
return false
}
func (e floatElement) Type() Type {
return Float
}
func (e floatElement) Val() ElementValue {
if e.IsNA() {
return nil
}
return float64(e.e)
}
func (e floatElement) String() string {
if e.IsNA() {
return "NaN"
}
return fmt.Sprintf("%#g", e.e)
}
func (e floatElement) Int() (int, error) {
if e.IsNA() {
return 0, fmt.Errorf("can't convert NaN to int")
}
f := e.e
if math.IsInf(f, 1) || math.IsInf(f, -1) {
return 0, fmt.Errorf("can't convert Inf to int")
}
if math.IsNaN(f) {
return 0, fmt.Errorf("can't convert NaN to int")
}
return int(f), nil
}
func (e floatElement) Float() float64 {
if e.IsNA() {
return math.NaN()
}
return float64(e.e)
}
func (e floatElement) Bool() (bool, error) {
if e.IsNA() {
return false, fmt.Errorf("can't convert NaN to bool")
}
switch e.e {
case 1:
return true, nil
case 0:
return false, nil
}
return false, fmt.Errorf("can't convert Float \"%v\" to bool", e.e)
}
func (e floatElement) Eq(elem Element) bool {
f := elem.Float()
if e.IsNA() || math.IsNaN(f) {
return false
}
return e.e == f
}
func (e floatElement) Neq(elem Element) bool {
f := elem.Float()
if e.IsNA() || math.IsNaN(f) {
return false
}
return e.e != f
}
func (e floatElement) Less(elem Element) bool {
f := elem.Float()
if e.IsNA() || math.IsNaN(f) {
return false
}
return e.e < f
}
func (e floatElement) LessEq(elem Element) bool {
f := elem.Float()
if e.IsNA() || math.IsNaN(f) {
return false
}
return e.e <= f
}
func (e floatElement) Greater(elem Element) bool {
f := elem.Float()
if e.IsNA() || math.IsNaN(f) {
return false
}
return e.e > f
}
func (e floatElement) GreaterEq(elem Element) bool {
f := elem.Float()
if e.IsNA() || math.IsNaN(f) {
return false
}
return e.e >= f
} | series/type-float.go | 0.69451 | 0.533823 | type-float.go | starcoder |
package sqlchain
import (
"errors"
"github.com/SQLess/SQLess/crypto/hash"
"github.com/SQLess/SQLess/proto"
)
// Answer is responded by node to confirm other nodes that the node stores data correctly.
type Answer struct {
// The block id that the question belongs to
PreviousBlockID BlockID
// The node id that provides this answer
NodeID proto.NodeID
// The answer for the question
Answer hash.Hash
}
// NewAnswer generates an answer for storage proof.
func NewAnswer(previousBlockID BlockID, nodeID proto.NodeID, answer hash.Hash) *Answer {
return &Answer{
PreviousBlockID: previousBlockID,
NodeID: nodeID,
Answer: answer,
}
}
// getNextPuzzle generate new puzzle which ask other nodes to get a specified record in database.
// The index of next SQL (puzzle) is determined by the previous answer and previous block hash.
func getNextPuzzle(answers []Answer, previousBlock StorageProofBlock) (int32, error) {
var totalRecordsInSQLChain int32 = 10
var sum int32
if !CheckValid(answers) {
return -1, errors.New("some nodes have not submitted its answer")
}
for _, answer := range answers {
// check if answer is valid
if len(answer.Answer) != hash.HashSize {
return -1, errors.New("invalid answer format")
}
sum += int32(hash.FNVHash32uint(answer.Answer[:]))
}
// check if block is valid
if len(previousBlock.ID) <= 0 {
return -1, errors.New("invalid block format")
}
sum += int32(hash.FNVHash32uint([]byte(previousBlock.ID)))
nextPuzzle := sum % totalRecordsInSQLChain
return nextPuzzle, nil
}
// getNExtVerifier returns the id of next verifier.
// ID is determined by the hash of previous block.
func getNextVerifier(previousBlock, currentBlock StorageProofBlock) (int32, error) {
// check if block is valid
if len(previousBlock.ID) <= 0 {
return -1, errors.New("invalid previous block")
}
if len(currentBlock.Nodes) <= 0 {
return -1, errors.New("invalid current block")
}
verifier := int32(hash.FNVHash32uint([]byte(previousBlock.ID))) % int32(len(currentBlock.Nodes))
return verifier, nil
}
// selectRecord returns nth record in the table from the database.
func selectRecord(n int32) string {
return "hello world"
}
// CheckValid returns whether answers is valid
// Checkvalid checks answers as follows:
// 1. len(answers) == len(nodes) - 1
// 2. answers[i].nodeID's answer is the same as the hash of verifier.
func CheckValid(answers []Answer) bool {
return len(answers) > 0
}
// GenerateAnswer will select specified record for proving.
// In order to generate a unique answer which is different with other nodes' answer,
// we hash(record + nodeID) as the answer.
func GenerateAnswer(answers []Answer, previousBlock StorageProofBlock, node proto.Node) (*Answer, error) {
sqlIndex, err := getNextPuzzle(answers, previousBlock)
if err != nil {
return nil, err
}
record := []byte(selectRecord(sqlIndex))
// check if node is valid
if len(node.ID) <= 0 {
return nil, errors.New("invalid node format")
}
answer := append(record, []byte(node.ID)...)
answerHash := hash.HashH(answer)
return NewAnswer(previousBlock.ID, node.ID, answerHash), nil
} | sqlchain/storageproof.go | 0.652795 | 0.42662 | storageproof.go | starcoder |
package input
import (
"bytes"
"fmt"
"sort"
"strings"
"github.com/Jeffail/benthos/v3/internal/docs"
"github.com/Jeffail/benthos/v3/lib/message/batch"
"github.com/Jeffail/benthos/v3/lib/util/config"
"gopkg.in/yaml.v3"
)
//------------------------------------------------------------------------------
func sanitiseWithBatch(
componentConfig interface{},
batchConfig batch.PolicyConfig,
) (map[string]interface{}, error) {
batchSanit, err := batch.SanitisePolicyConfig(batchConfig)
if err != nil {
return nil, err
}
cBytes, err := yaml.Marshal(componentConfig)
if err != nil {
return nil, err
}
hashMap := map[string]interface{}{}
if err = yaml.Unmarshal(cBytes, &hashMap); err != nil {
return nil, err
}
hashMap["batching"] = batchSanit
return hashMap, nil
}
//------------------------------------------------------------------------------
var header = "This document was generated with `benthos --list-inputs`" + `
An input is a source of data piped through an array of optional
[processors](/docs/components/processors/about). Only one input is configured at the root of a
Benthos config. However, the root input can be a [broker](/docs/components/inputs/broker) which
combines multiple inputs.
An input config section looks like this:
` + "```yaml" + `
input:
redis_streams:
url: tcp://localhost:6379
streams:
- benthos_stream
body_key: body
consumer_group: benthos_group
# Optional list of processing steps
processors:
- jmespath:
query: '{ message: @, meta: { link_count: length(links) } }'
` + "```" + ``
// Descriptions returns a formatted string of descriptions for each type.
func Descriptions() string {
// Order our input types alphabetically
names := []string{}
for name := range Constructors {
names = append(names, name)
}
sort.Strings(names)
buf := bytes.Buffer{}
buf.WriteString("Inputs\n")
buf.WriteString(strings.Repeat("=", 6))
buf.WriteString("\n\n")
buf.WriteString(header)
buf.WriteString("\n\n")
buf.WriteString("### Contents\n\n")
i := 0
for _, name := range names {
if Constructors[name].Status == docs.StatusDeprecated {
continue
}
i++
buf.WriteString(fmt.Sprintf("%v. [`%v`](#%v)\n", i, name, name))
}
buf.WriteString("\n")
// Append each description
for i, name := range names {
def := Constructors[name]
if def.Status == docs.StatusDeprecated {
continue
}
var confBytes []byte
conf := NewConfig()
conf.Type = name
conf.Processors = nil
if confSanit, err := conf.Sanitised(true); err == nil {
confBytes, _ = config.MarshalYAML(confSanit)
}
buf.WriteString("## ")
buf.WriteString("`" + name + "`")
buf.WriteString("\n")
if confBytes != nil {
buf.WriteString("\n``` yaml\n")
buf.Write(confBytes)
buf.WriteString("```\n")
}
buf.WriteString(def.Description)
buf.WriteString("\n")
if i != (len(names) - 1) {
buf.WriteString("\n---\n")
}
}
return buf.String()
}
//------------------------------------------------------------------------------ | lib/input/docs.go | 0.677794 | 0.424173 | docs.go | starcoder |
package rowconv
import (
"context"
"errors"
"fmt"
"io"
"github.com/liquidata-inc/dolt/go/libraries/utils/set"
"github.com/liquidata-inc/dolt/go/libraries/doltcore/doltdb"
"github.com/liquidata-inc/dolt/go/libraries/doltcore/schema"
"github.com/liquidata-inc/dolt/go/store/hash"
"github.com/liquidata-inc/dolt/go/store/types"
)
// TagKindPair is a simple tuple that holds a tag and a NomsKind of a column
type TagKindPair struct {
// Tag is the tag of a column
Tag uint64
// Kind is the NomsKind of a colum
Kind types.NomsKind
}
// NameKindPair is a simple tuple that holds the name of a column and it's NomsKind
type NameKindPair struct {
// Name is the name of the column
Name string
// Kind is the NomsKind of the column
Kind types.NomsKind
}
// SuperSchema is an immutable schema generated by a SuperSchemaGen which defines methods for getting the schema
// and mapping another schema onto the super schema
type SuperSchema struct {
sch schema.Schema
namedCols map[TagKindPair]string
}
// GetSchema gets the underlying schema.Schema object
func (ss SuperSchema) GetSchema() schema.Schema {
if ss.sch == nil {
panic("Bug: super schema not generated.")
}
return ss.sch
}
// RowConvForSchema creates a RowConverter for transforming rows with the the given schema to this super schema.
// This is done by mapping the column tag and type to the super schema column representing that tag and type.
func (ss SuperSchema) RowConvForSchema(sch schema.Schema) (*RowConverter, error) {
inNameToOutName := make(map[string]string)
allCols := sch.GetAllCols()
err := allCols.Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
tkp := TagKindPair{Tag: tag, Kind: col.Kind}
outName, ok := ss.namedCols[tkp]
if !ok {
return false, errors.New("failed to map columns")
}
inNameToOutName[col.Name] = outName
return false, nil
})
if err != nil {
return nil, err
}
fm, err := NewFieldMappingFromNameMap(sch, ss.sch, inNameToOutName)
if err != nil {
return nil, err
}
return NewRowConverter(fm)
}
// SuperSchemaGen is a utility class used to generate the superset of several schemas.
type SuperSchemaGen struct {
tagKindToDestTag map[TagKindPair]uint64
usedTags map[uint64]struct{}
names map[TagKindPair]*set.StrSet
}
// NewSuperSchemaGen creates a new SuperSchemaGen
func NewSuperSchemaGen() *SuperSchemaGen {
return &SuperSchemaGen{
tagKindToDestTag: make(map[TagKindPair]uint64),
usedTags: make(map[uint64]struct{}),
names: make(map[TagKindPair]*set.StrSet),
}
}
// AddSchema will add a schema which will be incorporated into the superset of schemas
func (ssg *SuperSchemaGen) AddSchema(sch schema.Schema) error {
err := sch.GetAllCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
tagKind := TagKindPair{Tag: tag, Kind: col.Kind}
_, exists := ssg.tagKindToDestTag[tagKind]
if !exists {
destTag := tag
for {
_, collides := ssg.usedTags[destTag]
if !collides {
ssg.tagKindToDestTag[tagKind] = destTag
ssg.usedTags[destTag] = struct{}{}
ssg.names[tagKind] = set.NewStrSet([]string{col.Name})
return false, nil
}
if destTag == tag {
destTag = schema.ReservedTagMin
} else {
destTag++
}
}
} else {
ssg.names[tagKind].Add(col.Name)
}
return false, nil
})
if err != nil {
return err
}
return nil
}
func (ssg *SuperSchemaGen) nameCols() map[TagKindPair]string {
colNames := make(map[string][]TagKindPair)
for tagKind, names := range ssg.names {
name := fmt.Sprintf("%d_%s", tagKind.Tag, tagKind.Kind.String())
if names.Size() == 1 {
name = names.AsSlice()[0]
}
colNames[name] = append(colNames[name], tagKind)
}
results := make(map[TagKindPair]string)
for name, tagKinds := range colNames {
if len(tagKinds) == 1 {
results[tagKinds[0]] = name
continue
}
for _, tagKind := range tagKinds {
name := fmt.Sprintf("%s_%s_%d", name, tagKind.Kind.String(), tagKind.Tag)
results[tagKind] = name
}
}
return results
}
// GenerateSuperSchema takes all the accumulated schemas and generates a schema which is the superset of all of
// those schemas.
func (ssg *SuperSchemaGen) GenerateSuperSchema(additionalCols ...NameKindPair) (SuperSchema, error) {
namedCols := ssg.nameCols()
colColl, _ := schema.NewColCollection()
for tagKind, colName := range namedCols {
destTag, ok := ssg.tagKindToDestTag[tagKind]
if !ok {
panic("mismatch between namedCols and tagKindToDestTag")
}
col := schema.NewColumn(colName, destTag, tagKind.Kind, false)
var err error
colColl, err = colColl.Append(col)
if err != nil {
return SuperSchema{}, err
}
}
if len(additionalCols) > 0 {
nextReserved := schema.ReservedTagMin
for _, nameKindPair := range additionalCols {
if _, ok := colColl.GetByName(nameKindPair.Name); ok {
return SuperSchema{}, errors.New("Additional column name collision: " + nameKindPair.Name)
}
for {
if _, ok := ssg.usedTags[nextReserved]; !ok {
break
}
nextReserved++
}
var err error
ssg.usedTags[nextReserved] = struct{}{}
colColl, err = colColl.Append(schema.NewColumn(nameKindPair.Name, nextReserved, nameKindPair.Kind, false))
if err != nil {
return SuperSchema{}, err
}
}
}
sch := schema.UnkeyedSchemaFromCols(colColl)
return SuperSchema{sch: sch, namedCols: namedCols}, nil
}
func (ssg *SuperSchemaGen) AddHistoryOfCommits(ctx context.Context, tblName string, ddb *doltdb.DoltDB, cmItr doltdb.CommitItr) error {
addedSchemas := make(map[hash.Hash]bool)
for {
_, cm, err := cmItr.Next(ctx)
if err != nil {
if err == io.EOF {
return nil
}
return err
}
root, err := cm.GetRootValue()
if err != nil {
return err
}
tbl, ok, err := root.GetTable(ctx, tblName)
if err != nil {
return err
}
if ok {
schRef, err := tbl.GetSchemaRef()
if err != nil {
return err
}
h := schRef.TargetHash()
if !addedSchemas[h] {
addedSchemas[h] = true
sch, err := tbl.GetSchema(ctx)
if err != nil {
return err
}
err = ssg.AddSchema(sch)
if err != nil {
return err
}
}
}
}
}
// AddHistoryOfTable will traverse all commit graphs which have local branches associated with them and add all
// passed versions of a table's schema to the schemas being supersetted
func (ssg *SuperSchemaGen) AddHistoryOfTable(ctx context.Context, tblName string, ddb *doltdb.DoltDB) error {
cmItr, err := doltdb.CommitItrForAllBranches(ctx, ddb)
if err != nil {
return err
}
err = ssg.AddHistoryOfCommits(ctx, tblName, ddb, cmItr)
if err != nil {
return err
}
return nil
} | go/libraries/doltcore/rowconv/super_schema.go | 0.605916 | 0.427038 | super_schema.go | starcoder |
package extime
import (
"fmt"
"math"
"strings"
"time"
)
// Microsecond time.Time 转为 微秒
func Microsecond(t time.Time) int64 {
return t.UnixNano() / int64(time.Microsecond)
}
// Millisecond time.Time 转为 毫秒
func Millisecond(t time.Time) int64 {
return t.UnixNano() / int64(time.Millisecond)
}
// NowUS time.Now() 转为 微秒
func NowUS() int64 {
return time.Now().UnixNano() / int64(time.Millisecond)
}
// NowMS time.Now() 转为 毫秒
func NowMS() int64 {
return time.Now().UnixNano() / int64(time.Millisecond)
}
// Time 毫秒转time.Time
func Time(msec int64) time.Time {
return time.Unix(msec/1000, (msec%1000)*int64(time.Millisecond))
}
// Sleep pauses the current goroutine for at least the second d.
// A negative or zero duration causes Sleep to return immediately.
func Sleep(t int64) {
time.Sleep(time.Duration(t) * time.Second)
}
// MSleep pauses the current goroutine for at least the millisecond d.
// A negative or zero duration causes Sleep to return immediately.
func MSleep(t int64) {
time.Sleep(time.Duration(t) * time.Millisecond)
}
// USleep pauses the current goroutine for at least the microsecond d.
// A negative or zero duration causes Sleep to return immediately.
func USleep(t int64) {
time.Sleep(time.Duration(t) * time.Microsecond)
}
// Valid 检查是否正常的日期.
func Valid(year, month, day int) bool {
return month >= 1 && month <= 12 &&
day >= 1 && day <= 31 &&
year >= 1 && year <= math.MaxInt32 &&
day <= MonthDays(year, time.Month(month))
}
// Days time.Duration转化为天数
func Days(d time.Duration) float64 {
day := d / (24 * time.Hour)
nsec := d % (24 * time.Hour)
return float64(day) + float64(nsec)/(24*60*60*1e9)
}
// IsLeapYear 是否闰年
func IsLeapYear(year int) bool {
return year%4 == 0 && (year%100 != 0 || year%400 == 0)
}
// YearDays 所在年份总天数
func YearDays(year int) int {
if IsLeapYear(year) {
return 366
}
return 365
}
// MonthDays 所在年份月份的天数
func MonthDays(year int, month time.Month) int {
switch month {
case time.January, time.March, time.May, time.July,
time.August, time.October, time.December:
return 31
case time.February:
if IsLeapYear(year) {
return 29
}
return 28
case time.April, time.June, time.September, time.November:
return 30
default:
panic(fmt.Errorf("invalid month %v", month))
}
}
// MonthDays2 t 所在时间月份的天数
func MonthDays2(t time.Time) int { return MonthDays(t.Year(), t.Month()) }
// Date Format pattern rules.
var replacer = strings.NewReplacer([]string{
// year
"Y", "2006", // A full numeric representation of a year, 4 digits Examples: 1999 or 2003
"y", "06", // A two digit representation of a year Examples: 99 or 03
// month
"m", "01", // Numeric representation of a month, with leading zeros 01 through 12
"n", "1", // Numeric representation of a month, without leading zeros 1 through 12
"M", "Jan", // A short textual representation of a month, three letters Jan through Dec
"F", "January", // A full textual representation of a month, such as January or March January through December
// day
"d", "02", // Day of the month, 2 digits with leading zeros 01 to 31
"j", "2", // Day of the month without leading zeros 1 to 31
// week
"D", "Mon", // A textual representation of a day, three letters Mon through Sun
"l", "Monday", // A full textual representation of the day of the week Sunday through Saturday
// time
"g", "3", // 12-hour format of an hour without leading zeros 1 through 12
"G", "15", // 24-hour format of an hour without leading zeros 0 through 23
"h", "03", // 12-hour format of an hour with leading zeros 01 through 12
"H", "15", // 24-hour format of an hour with leading zeros 00 through 23
"a", "pm", // Lowercase Ante meridiem and Post meridiem am or pm
"A", "PM", // Uppercase Ante meridiem and Post meridiem AM or PM
"i", "04", // Minutes with leading zeros 00 to 59
"s", "05", // Seconds, with leading zeros 00 through 59
// time zone
"T", "MST",
"P", "-07:00",
"O", "-0700",
// RFC 2822
"r", time.RFC1123Z,
}...)
// Date 跟 PHP 中 date 类似的使用方式
// layout 格式,如"Y-m-d H:i:s".
func Date(t time.Time, layout string) string {
layout = replacer.Replace(layout)
return t.Format(layout)
}
// Now 跟 PHP 中 date 类似的使用方式
// layout 格式,如"Y-m-d H:i:s".
func Now(layout string) string { return Date(time.Now(), layout) }
// Parse parse value use PHP time format.
func Parse(value string) time.Time {
if value == "" {
return time.Time{}
}
layouts := []string{
"2006-01-02 15:04:05 -0700 MST",
"2006-01-02 15:04:05 -0700",
"2006-01-02 15:04:05",
"2006/01/02 15:04:05 -0700 MST",
"2006/01/02 15:04:05 -0700",
"2006/01/02 15:04:05",
"2006-01-02 -0700 MST",
"2006-01-02 -0700",
"2006-01-02",
"2006/01/02 -0700 MST",
"2006/01/02 -0700",
"2006/01/02",
"2006-01-02 15:04:05 -0700 -0700",
"2006/01/02 15:04:05 -0700 -0700",
"2006-01-02 -0700 -0700",
"2006/01/02 -0700 -0700",
time.ANSIC,
time.UnixDate,
time.RubyDate,
time.RFC822,
time.RFC822Z,
time.RFC850,
time.RFC1123,
time.RFC1123Z,
time.RFC3339,
time.RFC3339Nano,
time.Kitchen,
time.Stamp,
time.StampMilli,
time.StampMicro,
time.StampNano,
}
var t time.Time
var err error
for _, layout := range layouts {
t, err = time.Parse(layout, value)
if err == nil {
return t
}
}
panic(err)
}
// ParseLocation parse location
func ParseLocation(value string) time.Time {
if value == "" {
return time.Time{}
}
zoneName, offset := time.Now().Zone()
zoneValue := offset / 3600 * 100
if zoneValue > 0 {
value += fmt.Sprintf(" +%04d", zoneValue)
} else {
value += fmt.Sprintf(" -%04d", zoneValue)
}
if zoneName != "" {
value += " " + zoneName
}
return Parse(value)
}
// StartOfDay 获取日期中当天的开始时间.
func StartOfDay(t time.Time) time.Time {
return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location())
}
// EndOfDay 获取日期中当天的结束时间.
func EndOfDay(t time.Time) time.Time {
return time.Date(t.Year(), t.Month(), t.Day(), 23, 59, 59, int(time.Second-time.Nanosecond), t.Location())
}
// StartOfMonth 获取日期中当月的开始时间.
func StartOfMonth(t time.Time) time.Time {
return time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, t.Location())
}
// EndOfMonth 获取日期中当月的结束时间.
func EndOfMonth(t time.Time) time.Time {
return StartOfMonth(t).AddDate(0, 1, 0).Add(-time.Nanosecond)
}
// StartOfYear 获取日期中当年的开始时间.
func StartOfYear(t time.Time) time.Time {
return time.Date(t.Year(), 1, 1, 0, 0, 0, 0, t.Location())
}
// EndOfYear 获取日期中当年的结束时间.
func EndOfYear(t time.Time) time.Time {
return StartOfYear(t).AddDate(1, 0, 0).Add(-time.Nanosecond)
}
// StartOfWeek 获取日期中当周的开始时间;
// weekStartDay 周几作为周的第一天,本库默认周一.
func StartOfWeek(date time.Time, weekStartDay ...time.Weekday) time.Time {
weekStart := time.Monday
if len(weekStartDay) > 0 {
weekStart = weekStartDay[0]
}
// 当前是周几
weekday := int(date.Weekday())
if weekStart != time.Sunday {
weekStartDayInt := int(weekStart)
if weekday < weekStartDayInt {
weekday += 7 - weekStartDayInt
} else {
weekday -= weekStartDayInt
}
}
return time.Date(date.Year(), date.Month(), date.Day()-weekday, 0, 0, 0, 0, date.Location())
}
// EndOfWeek 获取日期中当周的结束时间;
// weekStartDay 周几作为周的第一天,本库默认周一.
func EndOfWeek(date time.Time, weekStartDay ...time.Weekday) time.Time {
return StartOfWeek(date, weekStartDay...).AddDate(0, 0, 7).Add(-time.Nanosecond)
} | extime/extime.go | 0.607547 | 0.427158 | extime.go | starcoder |
package dbs
import (
"math"
"math/cmplx"
)
// Span of contour - line or arc
type Span struct {
A Point
Bulge float64
Z Point
}
func square(x float64) float64 {
return x * x
}
// Vector returns line direction
func (span *Span) Vector() Point {
return span.Z.Sub(&span.A)
}
// Area calculates area term for a Span
func (span *Span) Area() float64 {
s := (span.Z.X*span.A.Y - span.Z.Y*span.A.X) / 2
if span.Bulge != 0 {
BuBu := square(span.Bulge)
s -= (math.Atan(span.Bulge)*square(1+BuBu) - (1-BuBu)*span.Bulge) /
BuBu / 8 * span.Vector().Abs2()
}
return s
}
// Perimeter calculates length of a Span
func (span *Span) Perimeter() float64 {
p := span.Vector().Abs()
if span.Bulge != 0 {
p *= (math.Atan(span.Bulge) / span.Bulge) *
(1 + square(span.Bulge))
}
return p
}
// linear is internal function to find Points in local coordinates
func (span *Span) linear(pos complex128) Point {
return C2Point(((span.Z.C()-span.A.C())*pos + (span.Z.C() + span.A.C())) / 2)
}
// Zenith is a middle point of arc
func (span *Span) Zenith() Point {
return span.linear(complex(0, -span.Bulge))
}
// Nadir is a point opposite to a middle point of arc (not on arc itself)
func (span *Span) Nadir() Point {
return span.linear(complex(0, 1/span.Bulge))
}
// Center returns center of an arc
func (span *Span) Center() Point {
return span.linear(complex(0, (1/span.Bulge-span.Bulge)/2))
}
// Radius returns that of an arc
func (span *Span) Radius() float64 {
return math.Abs(1/span.Bulge+span.Bulge) / 4 * span.Vector().Abs()
}
// At finds position of a Point on the Arc
// -1: Start of Arc
// 0: Middle of Arc
// +1: End of Arc
func (span *Span) At(pos float64) Point {
return span.linear(
complex(pos, -span.Bulge) /
complex(1, -pos*span.Bulge))
}
// AtUniform is like At, returns position of Point on Arc, but slightly more uniform
// (Especially for arcs with big Bulge)
func (span *Span) AtUniform(pos float64) Point {
q := (math.Sqrt(9+8*square(span.Bulge)) + 1) / 4
return span.At(pos / (q - (q-1)*square(pos)))
}
// PositionOf finds position for a Point on the Arc
// (Reverse of At)
func (span *Span) PositionOf(point *Point) float64 {
a := point.Sub(&span.A).Abs()
z := point.Sub(&span.Z).Abs()
return (a - z) / (a + z)
}
// tgq - helper (static) function: tan(arg(sqrt(vector)))
func (span *Span) tgq(vector complex128) float64 {
if real(vector) < 0 {
return (cmplx.Abs(vector) - real(vector)) / imag(vector)
}
return imag(vector) / (cmplx.Abs(vector) + real(vector))
}
// BulgeOf finds bulge for an arc that passes thru a Point
func (span *Span) BulgeOf(point *Point) float64 {
return span.tgq(
cmplx.Conj(point.Sub(&span.A).C()) *
span.Z.Sub(point).C())
}
// LeftBulge finds Bulge for sub-arc from start to that position
func (span *Span) LeftBulge(pos float64) float64 {
return span.tgq(
complex(1, span.Bulge) *
complex(1, pos*span.Bulge))
}
// RightBulge finds Bulge for sub-arc from that position to end
func (span *Span) RightBulge(pos float64) float64 {
return span.LeftBulge(-pos)
} | dbs/span.go | 0.82347 | 0.677558 | span.go | starcoder |
package iotago
import (
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"strings"
)
// OutputType defines the type of outputs.
type OutputType = byte
const (
// OutputSigLockedSingleOutput denotes a type of output which is locked by a signature and deposits onto a single address.
OutputSigLockedSingleOutput OutputType = iota
// OutputSigLockedDustAllowanceOutput is like OutputSigLockedSingleOutput but it is used to increase the allowance/amount of dust outputs on a given address.
OutputSigLockedDustAllowanceOutput
// OutputTreasuryOutput denotes the type of the TreasuryOutput.
OutputTreasuryOutput
// OutputSigLockedDustAllowanceOutputMinDeposit defines the minimum deposit amount of a SigLockedDustAllowanceOutput.
OutputSigLockedDustAllowanceOutputMinDeposit uint64 = 1_000_000
)
var (
// ErrDepositAmountMustBeGreaterThanZero returned if the deposit amount of an output is less or equal zero.
ErrDepositAmountMustBeGreaterThanZero = errors.New("deposit amount must be greater than zero")
)
// Outputs is a slice of Output.
type Outputs []Output
// Output defines the deposit of funds.
type Output interface {
Serializable
// Deposit returns the amount this Output deposits.
Deposit() (uint64, error)
// Target returns the target of the deposit.
// If the type of output does not have/support a target, nil is returned.
Target() (Serializable, error)
// Type returns the type of the output.
Type() OutputType
}
// OutputSelector implements SerializableSelectorFunc for output types.
func OutputSelector(outputType uint32) (Serializable, error) {
var seri Serializable
switch byte(outputType) {
case OutputSigLockedSingleOutput:
seri = &SigLockedSingleOutput{}
case OutputSigLockedDustAllowanceOutput:
seri = &SigLockedDustAllowanceOutput{}
case OutputTreasuryOutput:
seri = &TreasuryOutput{}
default:
return nil, fmt.Errorf("%w: type %d", ErrUnknownOutputType, outputType)
}
return seri, nil
}
// OutputIDHex is the hex representation of an output ID.
type OutputIDHex string
// MustSplitParts returns the transaction ID and output index parts of the hex output ID.
// It panics if the hex output ID is invalid.
func (oih OutputIDHex) MustSplitParts() (*TransactionID, uint16) {
txID, outputIndex, err := oih.SplitParts()
if err != nil {
panic(err)
}
return txID, outputIndex
}
// SplitParts returns the transaction ID and output index parts of the hex output ID.
func (oih OutputIDHex) SplitParts() (*TransactionID, uint16, error) {
outputIDBytes, err := hex.DecodeString(string(oih))
if err != nil {
return nil, 0, err
}
var txID TransactionID
copy(txID[:], outputIDBytes[:TransactionIDLength])
outputIndex := binary.LittleEndian.Uint16(outputIDBytes[TransactionIDLength : TransactionIDLength+UInt16ByteSize])
return &txID, outputIndex, nil
}
// MustAsUTXOInput converts the hex output ID to a UTXOInput.
// It panics if the hex output ID is invalid.
func (oih OutputIDHex) MustAsUTXOInput() *UTXOInput {
utxoInput, err := oih.AsUTXOInput()
if err != nil {
panic(err)
}
return utxoInput
}
// AsUTXOInput converts the hex output ID to a UTXOInput.
func (oih OutputIDHex) AsUTXOInput() (*UTXOInput, error) {
var utxoInput UTXOInput
txID, outputIndex, err := oih.SplitParts()
if err != nil {
return nil, err
}
copy(utxoInput.TransactionID[:], txID[:])
utxoInput.TransactionOutputIndex = outputIndex
return &utxoInput, nil
}
// OutputsValidatorFunc which given the index of an output and the output itself, runs validations and returns an error if any should fail.
type OutputsValidatorFunc func(index int, output Output) error
// OutputsAddrUniqueValidator returns a validator which checks that all addresses are unique per OutputType.
func OutputsAddrUniqueValidator() OutputsValidatorFunc {
set := map[OutputType]map[string]int{}
return func(index int, dep Output) error {
var b strings.Builder
target, err := dep.Target()
if err != nil {
return fmt.Errorf("unable to get target of output: %w", err)
}
if target == nil {
return nil
}
// can't be reduced to one b.Write()
switch addr := target.(type) {
case *Ed25519Address:
if _, err := b.Write(addr[:]); err != nil {
return fmt.Errorf("%w: unable to serialize Ed25519 address in addr unique validator", err)
}
}
k := b.String()
m, ok := set[dep.Type()]
if !ok {
m = make(map[string]int)
set[dep.Type()] = m
}
if j, has := m[k]; has {
return fmt.Errorf("%w: output %d and %d share the same address", ErrOutputAddrNotUnique, j, index)
}
m[k] = index
return nil
}
}
// OutputsDepositAmountValidator returns a validator which checks that:
// 1. every output deposits more than zero
// 2. every output deposits less than the total supply
// 3. the sum of deposits does not exceed the total supply
// 4. SigLockedDustAllowanceOutput deposits at least OutputSigLockedDustAllowanceOutputMinDeposit.
// If -1 is passed to the validator func, then the sum is not aggregated over multiple calls.
func OutputsDepositAmountValidator() OutputsValidatorFunc {
var sum uint64
return func(index int, dep Output) error {
deposit, err := dep.Deposit()
if err != nil {
return fmt.Errorf("unable to get deposit of output: %w", err)
}
if deposit == 0 {
return fmt.Errorf("%w: output %d", ErrDepositAmountMustBeGreaterThanZero, index)
}
if _, isAllowanceOutput := dep.(*SigLockedDustAllowanceOutput); isAllowanceOutput {
if deposit < OutputSigLockedDustAllowanceOutputMinDeposit {
return fmt.Errorf("%w: output %d", ErrOutputDustAllowanceLessThanMinDeposit, index)
}
}
if deposit > TokenSupply {
return fmt.Errorf("%w: output %d", ErrOutputDepositsMoreThanTotalSupply, index)
}
if sum+deposit > TokenSupply {
return fmt.Errorf("%w: output %d", ErrOutputsSumExceedsTotalSupply, index)
}
if index != -1 {
sum += deposit
}
return nil
}
}
// supposed to be called with -1 as input in order to be used over multiple calls.
var outputAmountValidator = OutputsDepositAmountValidator()
// ValidateOutputs validates the outputs by running them against the given OutputsValidatorFunc.
func ValidateOutputs(outputs Serializables, funcs ...OutputsValidatorFunc) error {
for i, output := range outputs {
if _, isOutput := output.(Output); !isOutput {
return fmt.Errorf("%w: can only validate outputs but got %T instead", ErrUnknownOutputType, output)
}
for _, f := range funcs {
if err := f(i, output.(Output)); err != nil {
return err
}
}
}
return nil
}
// jsonOutputSelector selects the json output implementation for the given type.
func jsonOutputSelector(ty int) (JSONSerializable, error) {
var obj JSONSerializable
switch byte(ty) {
case OutputSigLockedSingleOutput:
obj = &jsonSigLockedSingleOutput{}
case OutputSigLockedDustAllowanceOutput:
obj = &jsonSigLockedDustAllowanceOutput{}
case OutputTreasuryOutput:
obj = &jsonTreasuryOutput{}
default:
return nil, fmt.Errorf("unable to decode output type from JSON: %w", ErrUnknownOutputType)
}
return obj, nil
} | output.go | 0.695028 | 0.411879 | output.go | starcoder |
package colorx
import (
"image/color"
"math"
"github.com/somebadcode/go-colorx/v2/internal/mathx"
)
// HSVA is an implementation of the HSV (Hue, Saturation and Value) color model. HSV is also known as HSB (Hue,
// Saturation, Brightness).
type HSVA struct {
H float64 // Hue ∈ [0, 360)
S float64 // Saturation ∈ [0, 1]
V float64 // Value/Brightness ∈ [0, 1]
A float64 // Alpha ∈ [0, 1]
}
// HSVAModel can convert the color to the HSVA color model defined in this package.
var HSVAModel = color.ModelFunc(hsvaModel)
func hsvaModel(c color.Color) color.Color {
if _, ok := c.(HSVA); ok {
return c
}
r, g, b, a := c.RGBA()
h, s, v, ha := RGBAToHSVA(uint8(r>>8), uint8(g>>8), uint8(b>>8), uint8(a>>8))
return HSVA{
H: h,
S: s,
V: v,
A: ha,
}
}
// RGBAToHSVA converts RGBA to Hue, Saturation, Value and Alpha.
func RGBAToHSVA(r, g, b, a uint8) (float64, float64, float64, float64) {
var hue, saturation, value, alpha float64
// Convert R, G and B to floats.
red := float64(r) / math.MaxUint8
green := float64(g) / math.MaxUint8
blue := float64(b) / math.MaxUint8
alpha = float64(a) / math.MaxUint8
// Get the most and least dominant colors.
cMax := math.Max(red, math.Max(green, blue))
cMin := math.Min(red, math.Min(green, blue))
// Value is the value of the dominant color.
value = cMax
// Get color delta.
delta := cMax - cMin
// Saturation is derived from the delta, but it's zero if cMax is zero (saturation is initialized as zero).
if cMax != 0.0 {
saturation = delta / cMax
}
// Hue is derived from the dominant color.
switch cMax {
case cMin: // delta == 0
hue = 0.0
case red:
hue = math.FMA(60.0, math.Mod((green-blue)/delta, 6), 360.0)
case green:
hue = math.FMA(60.0, (blue-red)/delta+2, 360.0)
case blue:
hue = math.FMA(60.0, (red-green)/delta+4, 360.0)
}
hue = math.Mod(hue, 360.0)
return hue, saturation, value, alpha
}
// RGBA returns the alpha-premultiplied red, green, blue and alpha values for the color.
func (hsva HSVA) RGBA() (r, g, b, a uint32) {
var rgba color.RGBA
rgba.A = uint8(hsva.A * math.MaxUint8)
if mathx.Equal(hsva.S, 0.0) {
rgba.R = uint8(hsva.V * math.MaxUint8)
rgba.G = uint8(hsva.V * math.MaxUint8)
rgba.B = uint8(hsva.V * math.MaxUint8)
return rgba.RGBA()
}
angle := math.Mod(hsva.H+360.0, 360.0)
// sextant will be the sextant of the dominant color.
sextant, frac := math.Modf(angle / 60.0)
p := hsva.V * (1.0 - hsva.S)
q := hsva.V * (1.0 - (hsva.S * frac))
t := hsva.V * (1.0 - (hsva.S * (1.0 - frac)))
switch sextant {
case 0:
rgba.R = uint8(math.Floor(hsva.V * math.MaxUint8))
rgba.G = uint8(math.Floor(t * math.MaxUint8))
rgba.B = uint8(math.Floor(p * math.MaxUint8))
case 1:
rgba.R = uint8(math.Floor(q * math.MaxUint8))
rgba.G = uint8(math.Floor(hsva.V * math.MaxUint8))
rgba.B = uint8(math.Floor(p * math.MaxUint8))
case 2:
rgba.R = uint8(math.Floor(p * math.MaxUint8))
rgba.G = uint8(math.Floor(hsva.V * math.MaxUint8))
rgba.B = uint8(math.Floor(t * math.MaxUint8))
case 3:
rgba.R = uint8(math.Floor(p * math.MaxUint8))
rgba.G = uint8(math.Floor(q * math.MaxUint8))
rgba.B = uint8(math.Floor(hsva.V * math.MaxUint8))
case 4:
rgba.R = uint8(math.Floor(t * math.MaxUint8))
rgba.G = uint8(math.Floor(p * math.MaxUint8))
rgba.B = uint8(math.Floor(hsva.V * math.MaxUint8))
default: // case 5
rgba.R = uint8(math.Floor(hsva.V * math.MaxUint8))
rgba.G = uint8(math.Floor(p * math.MaxUint8))
rgba.B = uint8(math.Floor(q * math.MaxUint8))
}
return rgba.RGBA()
} | hsv.go | 0.850841 | 0.497986 | hsv.go | starcoder |
// +build ingore
// Package log implements a simple logging package. It defines a type, Logger, with
// methods for formatting output. It also has a predefined 'standard' Logger
// accessible through helper functions Print[f|ln], Fatal[f|ln], and Panic[f|ln],
// which are easier to use than creating a Logger manually. That logger writes to
// standard error and prints the date and time of each logged message. The Fatal
// functions call os.Exit(1) after writing the log message. The Panic functions
// call panic after writing the log message.
package log
// These flags define which text to prefix to each log entry generated by the
// Logger.
const (
// Bits or'ed together to control what's printed. There is no control over the
// order they appear (the order listed here) or the format they present (as
// described in the comments). A colon appears after these items:
// 2009/01/23 01:23:23.123123 /a/b/c/d.go:23: message
Ldate = 1 << iota // the date: 2009/01/23
Ltime // the time: 01:23:23
Lmicroseconds // microsecond resolution: 01:23:23.123123. assumes Ltime.
Llongfile // full file name and line number: /a/b/c/d.go:23
Lshortfile // final file name element and line number: d.go:23. overrides Llongfile
LstdFlags = Ldate | Ltime // initial values for the standard logger
)
// Fatal is equivalent to Print() followed by a call to os.Exit(1).
func Fatal(v ...interface{})
// Fatalf is equivalent to Printf() followed by a call to os.Exit(1).
func Fatalf(format string, v ...interface{})
// Fatalln is equivalent to Println() followed by a call to os.Exit(1).
func Fatalln(v ...interface{})
// Flags returns the output flags for the standard logger.
func Flags() int
// Panic is equivalent to Print() followed by a call to panic().
func Panic(v ...interface{})
// Panicf is equivalent to Printf() followed by a call to panic().
func Panicf(format string, v ...interface{})
// Panicln is equivalent to Println() followed by a call to panic().
func Panicln(v ...interface{})
// Prefix returns the output prefix for the standard logger.
func Prefix() string
// Print calls Output to print to the standard logger. Arguments are handled in the
// manner of fmt.Print.
func Print(v ...interface{})
// Printf calls Output to print to the standard logger. Arguments are handled in
// the manner of fmt.Printf.
func Printf(format string, v ...interface{})
// Println calls Output to print to the standard logger. Arguments are handled in
// the manner of fmt.Println.
func Println(v ...interface{})
// SetFlags sets the output flags for the standard logger.
func SetFlags(flag int)
// SetOutput sets the output destination for the standard logger.
func SetOutput(w io.Writer)
// SetPrefix sets the output prefix for the standard logger.
func SetPrefix(prefix string)
// A Logger represents an active logging object that generates lines of output to
// an io.Writer. Each logging operation makes a single call to the Writer's Write
// method. A Logger can be used simultaneously from multiple goroutines; it
// guarantees to serialize access to the Writer.
type Logger struct {
// contains filtered or unexported fields
}
// New creates a new Logger. The out variable sets the destination to which log
// data will be written. The prefix appears at the beginning of each generated log
// line. The flag argument defines the logging properties.
func New(out io.Writer, prefix string, flag int) *Logger
// Fatal is equivalent to l.Print() followed by a call to os.Exit(1).
func (l *Logger) Fatal(v ...interface{})
// Fatalf is equivalent to l.Printf() followed by a call to os.Exit(1).
func (l *Logger) Fatalf(format string, v ...interface{})
// Fatalln is equivalent to l.Println() followed by a call to os.Exit(1).
func (l *Logger) Fatalln(v ...interface{})
// Flags returns the output flags for the logger.
func (l *Logger) Flags() int
// Output writes the output for a logging event. The string s contains the text to
// print after the prefix specified by the flags of the Logger. A newline is
// appended if the last character of s is not already a newline. Calldepth is used
// to recover the PC and is provided for generality, although at the moment on all
// pre-defined paths it will be 2.
func (l *Logger) Output(calldepth int, s string) error
// Panic is equivalent to l.Print() followed by a call to panic().
func (l *Logger) Panic(v ...interface{})
// Panicf is equivalent to l.Printf() followed by a call to panic().
func (l *Logger) Panicf(format string, v ...interface{})
// Panicln is equivalent to l.Println() followed by a call to panic().
func (l *Logger) Panicln(v ...interface{})
// Prefix returns the output prefix for the logger.
func (l *Logger) Prefix() string
// Print calls l.Output to print to the logger. Arguments are handled in the manner
// of fmt.Print.
func (l *Logger) Print(v ...interface{})
// Printf calls l.Output to print to the logger. Arguments are handled in the
// manner of fmt.Printf.
func (l *Logger) Printf(format string, v ...interface{})
// Println calls l.Output to print to the logger. Arguments are handled in the
// manner of fmt.Println.
func (l *Logger) Println(v ...interface{})
// SetFlags sets the output flags for the logger.
func (l *Logger) SetFlags(flag int)
// SetPrefix sets the output prefix for the logger.
func (l *Logger) SetPrefix(prefix string) | src/log/doc_zh_CN.go | 0.651909 | 0.469034 | doc_zh_CN.go | starcoder |
package gslb
/**
* Configuration for GSLB site resource.
*/
type Gslbsite struct {
/**
* Name for the GSLB site. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my gslbsite" or 'my gslbsite').
*/
Sitename string `json:"sitename,omitempty"`
/**
* Type of site to create. If the type is not specified, the appliance automatically detects and sets the type on the basis of the IP address being assigned to the site. If the specified site IP address is owned by the appliance (for example, a MIP address or SNIP address), the site is a local site. Otherwise, it is a remote site.
*/
Sitetype string `json:"sitetype,omitempty"`
/**
* IP address for the GSLB site. The GSLB site uses this IP address to communicate with other GSLB sites. For a local site, use any IP address that is owned by the appliance (for example, a SNIP or MIP address, or the IP address of the ADNS service).
*/
Siteipaddress string `json:"siteipaddress,omitempty"`
/**
* Public IP address for the local site. Required only if the appliance is deployed in a private address space and the site has a public IP address hosted on an external firewall or a NAT device.
*/
Publicip string `json:"publicip,omitempty"`
/**
* Exchange metrics with other sites. Metrics are exchanged by using Metric Exchange Protocol (MEP). The appliances in the GSLB setup exchange health information once every second.
If you disable metrics exchange, you can use only static load balancing methods (such as round robin, static proximity, or the hash-based methods), and if you disable metrics exchange when a dynamic load balancing method (such as least connection) is in operation, the appliance falls back to round robin. Also, if you disable metrics exchange, you must use a monitor to determine the state of GSLB services. Otherwise, the service is marked as DOWN.
*/
Metricexchange string `json:"metricexchange,omitempty"`
/**
* Exchange, with other GSLB sites, network metrics such as round-trip time (RTT), learned from communications with various local DNS (LDNS) servers used by clients. RTT information is used in the dynamic RTT load balancing method, and is exchanged every 5 seconds.
*/
Nwmetricexchange string `json:"nwmetricexchange,omitempty"`
/**
* Exchange persistent session entries with other GSLB sites every five seconds.
*/
Sessionexchange string `json:"sessionexchange,omitempty"`
/**
* Specify the conditions under which the GSLB service must be monitored by a monitor, if one is bound. Available settings function as follows:
* ALWAYS - Monitor the GSLB service at all times.
* MEPDOWN - Monitor the GSLB service only when the exchange of metrics through the Metrics Exchange Protocol (MEP) is disabled.
MEPDOWN_SVCDOWN - Monitor the service in either of the following situations:
* The exchange of metrics through MEP is disabled.
* The exchange of metrics through MEP is enabled but the status of the service, learned through metrics exchange, is DOWN.
*/
Triggermonitor string `json:"triggermonitor,omitempty"`
/**
* Parent site of the GSLB site, in a parent-child topology.
*/
Parentsite string `json:"parentsite,omitempty"`
/**
* Cluster IP address. Specify this parameter to connect to the remote cluster site for GSLB auto-sync. Note: The cluster IP address is defined when creating the cluster.
*/
Clip string `json:"clip,omitempty"`
/**
* IP address to be used to globally access the remote cluster when it is deployed behind a NAT. It can be same as the normal cluster IP address.
*/
Publicclip string `json:"publicclip,omitempty"`
/**
* The naptr replacement suffix configured here will be used to construct the naptr replacement field in NAPTR record.
*/
Naptrreplacementsuffix string `json:"naptrreplacementsuffix,omitempty"`
/**
* The list of backup gslb sites configured in preferred order. Need to be parent gsb sites.
*/
Backupparentlist []string `json:"backupparentlist,omitempty"`
//------- Read only Parameter ---------;
Status string `json:"status,omitempty"`
Persistencemepstatus string `json:"persistencemepstatus,omitempty"`
Version string `json:"version,omitempty"`
Curbackupparentip string `json:"curbackupparentip,omitempty"`
Sitestate string `json:"sitestate,omitempty"`
} | resource/config/gslb/gslbsite.go | 0.722821 | 0.406567 | gslbsite.go | starcoder |
package cephalodistance
import (
"math"
"github.com/paulidealiste/Cephalopod/cephalobjects"
"github.com/paulidealiste/Cephalopod/cephaloutils"
)
type pair struct {
x1, y1, x2, y2 float64
}
// CalculateDistanceMatrix fills the distance property of a datastore based on the supplied metric
func CalculateDistanceMatrix(input *cephalobjects.DataStore, metric cephalobjects.DistanceMetric) {
var dmc cephalobjects.DataMatrix
dmc.Matrix = make([][]float64, len(input.Basic))
dmc.Variables = make([]string, len(input.Basic))
dmc.Grep = make(map[string]cephalobjects.GrepFold)
var cummulative int
for i, dp := range input.Basic {
dmc.Matrix[i] = make([]float64, len(input.Basic))
dmc.Variables[i] = dp.UID
for j, dpi := range input.Basic {
p := pair{x1: input.Basic[i].X, y1: input.Basic[i].Y, x2: input.Basic[j].X, y2: input.Basic[j].Y}
dmc.Matrix[i][j] = p.distance(metric, input)
dmc.Grep[dp.UID+" "+dpi.UID] = cephalobjects.GrepFold{Row: i, Col: j}
cummulative++
}
}
input.Distance = dmc
}
func (p pair) distance(metric cephalobjects.DistanceMetric, input *cephalobjects.DataStore) float64 {
var distC float64
switch metric {
case cephalobjects.Euclidean:
distC = math.Sqrt((p.x2-p.x1)*(p.x2-p.x1) + (p.y2-p.y1)*(p.y2-p.y1))
case cephalobjects.SquaredEuclidean:
distC = (p.x2-p.x1)*(p.x2-p.x1) + (p.y2-p.y1)*(p.y2-p.y1)
case cephalobjects.Manhattan:
distC = math.Abs(p.x2-p.x1) + math.Abs(p.y2-p.y1)
case cephalobjects.Maximum:
distC = math.Max(math.Abs(p.x2-p.x1), math.Abs(p.y2-p.y1))
case cephalobjects.Mahalanobis:
distC = mahalanobis(p, input)
}
return distC
}
func mahalanobis(p pair, input *cephalobjects.DataStore) float64 {
desc := cephaloutils.CalculateDescriptors(input.Basic)
invcovmat := cephaloutils.InverseMatrix(cephaloutils.CovarianceMatrix(desc))
p1res := []float64{(p.x1 - desc.MeanX), (p.y1 - desc.MeanY)}
var p1ma cephalobjects.DataMatrix
p1ma.Matrix = [][]float64{
cephaloutils.DotProduct(invcovmat, p1res),
}
mahalanobis := math.Sqrt(cephaloutils.DotProduct(p1ma, p1res)[0])
return mahalanobis
} | cephalodistance/cephalodistance.go | 0.698638 | 0.458531 | cephalodistance.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// UnifiedRoleAssignmentScheduleRequest
type UnifiedRoleAssignmentScheduleRequest struct {
Request
// Represents the type of the operation on the role assignment. The possible values are: AdminAssign: For administrators to assign roles to users or groups.AdminRemove: For administrators to remove users or groups from roles. AdminUpdate: For administrators to change existing role assignments.AdminExtend: For administrators to extend expiring assignments.AdminRenew: For administrators to renew expired assignments.SelfActivate: For users to activate their assignments.SelfDeactivate: For users to deactivate their active assignments.SelfExtend: For users to request to extend their expiring assignments.SelfRenew: For users to request to renew their expired assignments.
action *string
// If the request is from an eligible administrator to activate a role, this parameter will show the related eligible assignment for that activation.
activatedUsing UnifiedRoleEligibilityScheduleable
// Read-only property with details of the app specific scope when the assignment scope is app specific. Containment entity.
appScope AppScopeable
// Identifier of the app-specific scope when the assignment scope is app-specific. The scope of an assignment determines the set of resources for which the principal has been granted access. App scopes are scopes that are defined and understood by this application only. Use / for tenant-wide app scopes. Use directoryScopeId to limit the scope to particular directory objects, for example, administrative units.
appScopeId *string
// Property referencing the directory object that is the scope of the assignment. Provided so that callers can get the directory object using $expand at the same time as getting the role assignment. Read-only.
directoryScope DirectoryObjectable
// Identifier of the directory object representing the scope of the assignment. The scope of an assignment determines the set of resources for which the principal has been granted access. Directory scopes are shared scopes stored in the directory that are understood by multiple applications. Use / for tenant-wide scope. Use appScopeId to limit the scope to an application only.
directoryScopeId *string
// A boolean that determines whether the call is a validation or an actual call. Only set this property if you want to check whether an activation is subject to additional rules like MFA before actually submitting the request.
isValidationOnly *bool
// A message provided by users and administrators when create the request about why it is needed.
justification *string
// Property referencing the principal that is getting a role assignment through the request. Provided so that callers can get the principal using $expand at the same time as getting the role assignment. Read-only.
principal DirectoryObjectable
// Identifier of the principal to which the assignment is being granted to.
principalId *string
// Property indicating the roleDefinition the assignment is for. Provided so that callers can get the role definition using $expand at the same time as getting the role assignment. roleDefinition.Id will be auto expanded.
roleDefinition UnifiedRoleDefinitionable
// Identifier of the unifiedRoleDefinition the assignment is for. Read only.
roleDefinitionId *string
// The schedule object of the role assignment request.
scheduleInfo RequestScheduleable
// Property indicating the schedule for an eligible role assignment.
targetSchedule UnifiedRoleAssignmentScheduleable
// Identifier of the schedule object attached to the assignment.
targetScheduleId *string
// The ticketInfo object attached to the role assignment request which includes details of the ticket number and ticket system.
ticketInfo TicketInfoable
}
// NewUnifiedRoleAssignmentScheduleRequest instantiates a new unifiedRoleAssignmentScheduleRequest and sets the default values.
func NewUnifiedRoleAssignmentScheduleRequest()(*UnifiedRoleAssignmentScheduleRequest) {
m := &UnifiedRoleAssignmentScheduleRequest{
Request: *NewRequest(),
}
return m
}
// CreateUnifiedRoleAssignmentScheduleRequestFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateUnifiedRoleAssignmentScheduleRequestFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewUnifiedRoleAssignmentScheduleRequest(), nil
}
// GetAction gets the action property value. Represents the type of the operation on the role assignment. The possible values are: AdminAssign: For administrators to assign roles to users or groups.AdminRemove: For administrators to remove users or groups from roles. AdminUpdate: For administrators to change existing role assignments.AdminExtend: For administrators to extend expiring assignments.AdminRenew: For administrators to renew expired assignments.SelfActivate: For users to activate their assignments.SelfDeactivate: For users to deactivate their active assignments.SelfExtend: For users to request to extend their expiring assignments.SelfRenew: For users to request to renew their expired assignments.
func (m *UnifiedRoleAssignmentScheduleRequest) GetAction()(*string) {
if m == nil {
return nil
} else {
return m.action
}
}
// GetActivatedUsing gets the activatedUsing property value. If the request is from an eligible administrator to activate a role, this parameter will show the related eligible assignment for that activation.
func (m *UnifiedRoleAssignmentScheduleRequest) GetActivatedUsing()(UnifiedRoleEligibilityScheduleable) {
if m == nil {
return nil
} else {
return m.activatedUsing
}
}
// GetAppScope gets the appScope property value. Read-only property with details of the app specific scope when the assignment scope is app specific. Containment entity.
func (m *UnifiedRoleAssignmentScheduleRequest) GetAppScope()(AppScopeable) {
if m == nil {
return nil
} else {
return m.appScope
}
}
// GetAppScopeId gets the appScopeId property value. Identifier of the app-specific scope when the assignment scope is app-specific. The scope of an assignment determines the set of resources for which the principal has been granted access. App scopes are scopes that are defined and understood by this application only. Use / for tenant-wide app scopes. Use directoryScopeId to limit the scope to particular directory objects, for example, administrative units.
func (m *UnifiedRoleAssignmentScheduleRequest) GetAppScopeId()(*string) {
if m == nil {
return nil
} else {
return m.appScopeId
}
}
// GetDirectoryScope gets the directoryScope property value. Property referencing the directory object that is the scope of the assignment. Provided so that callers can get the directory object using $expand at the same time as getting the role assignment. Read-only.
func (m *UnifiedRoleAssignmentScheduleRequest) GetDirectoryScope()(DirectoryObjectable) {
if m == nil {
return nil
} else {
return m.directoryScope
}
}
// GetDirectoryScopeId gets the directoryScopeId property value. Identifier of the directory object representing the scope of the assignment. The scope of an assignment determines the set of resources for which the principal has been granted access. Directory scopes are shared scopes stored in the directory that are understood by multiple applications. Use / for tenant-wide scope. Use appScopeId to limit the scope to an application only.
func (m *UnifiedRoleAssignmentScheduleRequest) GetDirectoryScopeId()(*string) {
if m == nil {
return nil
} else {
return m.directoryScopeId
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *UnifiedRoleAssignmentScheduleRequest) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := m.Request.GetFieldDeserializers()
res["action"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetAction(val)
}
return nil
}
res["activatedUsing"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateUnifiedRoleEligibilityScheduleFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetActivatedUsing(val.(UnifiedRoleEligibilityScheduleable))
}
return nil
}
res["appScope"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateAppScopeFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetAppScope(val.(AppScopeable))
}
return nil
}
res["appScopeId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetAppScopeId(val)
}
return nil
}
res["directoryScope"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateDirectoryObjectFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetDirectoryScope(val.(DirectoryObjectable))
}
return nil
}
res["directoryScopeId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetDirectoryScopeId(val)
}
return nil
}
res["isValidationOnly"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetIsValidationOnly(val)
}
return nil
}
res["justification"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetJustification(val)
}
return nil
}
res["principal"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateDirectoryObjectFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetPrincipal(val.(DirectoryObjectable))
}
return nil
}
res["principalId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetPrincipalId(val)
}
return nil
}
res["roleDefinition"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateUnifiedRoleDefinitionFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetRoleDefinition(val.(UnifiedRoleDefinitionable))
}
return nil
}
res["roleDefinitionId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetRoleDefinitionId(val)
}
return nil
}
res["scheduleInfo"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateRequestScheduleFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetScheduleInfo(val.(RequestScheduleable))
}
return nil
}
res["targetSchedule"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateUnifiedRoleAssignmentScheduleFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetTargetSchedule(val.(UnifiedRoleAssignmentScheduleable))
}
return nil
}
res["targetScheduleId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetTargetScheduleId(val)
}
return nil
}
res["ticketInfo"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateTicketInfoFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetTicketInfo(val.(TicketInfoable))
}
return nil
}
return res
}
// GetIsValidationOnly gets the isValidationOnly property value. A boolean that determines whether the call is a validation or an actual call. Only set this property if you want to check whether an activation is subject to additional rules like MFA before actually submitting the request.
func (m *UnifiedRoleAssignmentScheduleRequest) GetIsValidationOnly()(*bool) {
if m == nil {
return nil
} else {
return m.isValidationOnly
}
}
// GetJustification gets the justification property value. A message provided by users and administrators when create the request about why it is needed.
func (m *UnifiedRoleAssignmentScheduleRequest) GetJustification()(*string) {
if m == nil {
return nil
} else {
return m.justification
}
}
// GetPrincipal gets the principal property value. Property referencing the principal that is getting a role assignment through the request. Provided so that callers can get the principal using $expand at the same time as getting the role assignment. Read-only.
func (m *UnifiedRoleAssignmentScheduleRequest) GetPrincipal()(DirectoryObjectable) {
if m == nil {
return nil
} else {
return m.principal
}
}
// GetPrincipalId gets the principalId property value. Identifier of the principal to which the assignment is being granted to.
func (m *UnifiedRoleAssignmentScheduleRequest) GetPrincipalId()(*string) {
if m == nil {
return nil
} else {
return m.principalId
}
}
// GetRoleDefinition gets the roleDefinition property value. Property indicating the roleDefinition the assignment is for. Provided so that callers can get the role definition using $expand at the same time as getting the role assignment. roleDefinition.Id will be auto expanded.
func (m *UnifiedRoleAssignmentScheduleRequest) GetRoleDefinition()(UnifiedRoleDefinitionable) {
if m == nil {
return nil
} else {
return m.roleDefinition
}
}
// GetRoleDefinitionId gets the roleDefinitionId property value. Identifier of the unifiedRoleDefinition the assignment is for. Read only.
func (m *UnifiedRoleAssignmentScheduleRequest) GetRoleDefinitionId()(*string) {
if m == nil {
return nil
} else {
return m.roleDefinitionId
}
}
// GetScheduleInfo gets the scheduleInfo property value. The schedule object of the role assignment request.
func (m *UnifiedRoleAssignmentScheduleRequest) GetScheduleInfo()(RequestScheduleable) {
if m == nil {
return nil
} else {
return m.scheduleInfo
}
}
// GetTargetSchedule gets the targetSchedule property value. Property indicating the schedule for an eligible role assignment.
func (m *UnifiedRoleAssignmentScheduleRequest) GetTargetSchedule()(UnifiedRoleAssignmentScheduleable) {
if m == nil {
return nil
} else {
return m.targetSchedule
}
}
// GetTargetScheduleId gets the targetScheduleId property value. Identifier of the schedule object attached to the assignment.
func (m *UnifiedRoleAssignmentScheduleRequest) GetTargetScheduleId()(*string) {
if m == nil {
return nil
} else {
return m.targetScheduleId
}
}
// GetTicketInfo gets the ticketInfo property value. The ticketInfo object attached to the role assignment request which includes details of the ticket number and ticket system.
func (m *UnifiedRoleAssignmentScheduleRequest) GetTicketInfo()(TicketInfoable) {
if m == nil {
return nil
} else {
return m.ticketInfo
}
}
// Serialize serializes information the current object
func (m *UnifiedRoleAssignmentScheduleRequest) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
err := m.Request.Serialize(writer)
if err != nil {
return err
}
{
err = writer.WriteStringValue("action", m.GetAction())
if err != nil {
return err
}
}
{
err = writer.WriteObjectValue("activatedUsing", m.GetActivatedUsing())
if err != nil {
return err
}
}
{
err = writer.WriteObjectValue("appScope", m.GetAppScope())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("appScopeId", m.GetAppScopeId())
if err != nil {
return err
}
}
{
err = writer.WriteObjectValue("directoryScope", m.GetDirectoryScope())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("directoryScopeId", m.GetDirectoryScopeId())
if err != nil {
return err
}
}
{
err = writer.WriteBoolValue("isValidationOnly", m.GetIsValidationOnly())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("justification", m.GetJustification())
if err != nil {
return err
}
}
{
err = writer.WriteObjectValue("principal", m.GetPrincipal())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("principalId", m.GetPrincipalId())
if err != nil {
return err
}
}
{
err = writer.WriteObjectValue("roleDefinition", m.GetRoleDefinition())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("roleDefinitionId", m.GetRoleDefinitionId())
if err != nil {
return err
}
}
{
err = writer.WriteObjectValue("scheduleInfo", m.GetScheduleInfo())
if err != nil {
return err
}
}
{
err = writer.WriteObjectValue("targetSchedule", m.GetTargetSchedule())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("targetScheduleId", m.GetTargetScheduleId())
if err != nil {
return err
}
}
{
err = writer.WriteObjectValue("ticketInfo", m.GetTicketInfo())
if err != nil {
return err
}
}
return nil
}
// SetAction sets the action property value. Represents the type of the operation on the role assignment. The possible values are: AdminAssign: For administrators to assign roles to users or groups.AdminRemove: For administrators to remove users or groups from roles. AdminUpdate: For administrators to change existing role assignments.AdminExtend: For administrators to extend expiring assignments.AdminRenew: For administrators to renew expired assignments.SelfActivate: For users to activate their assignments.SelfDeactivate: For users to deactivate their active assignments.SelfExtend: For users to request to extend their expiring assignments.SelfRenew: For users to request to renew their expired assignments.
func (m *UnifiedRoleAssignmentScheduleRequest) SetAction(value *string)() {
if m != nil {
m.action = value
}
}
// SetActivatedUsing sets the activatedUsing property value. If the request is from an eligible administrator to activate a role, this parameter will show the related eligible assignment for that activation.
func (m *UnifiedRoleAssignmentScheduleRequest) SetActivatedUsing(value UnifiedRoleEligibilityScheduleable)() {
if m != nil {
m.activatedUsing = value
}
}
// SetAppScope sets the appScope property value. Read-only property with details of the app specific scope when the assignment scope is app specific. Containment entity.
func (m *UnifiedRoleAssignmentScheduleRequest) SetAppScope(value AppScopeable)() {
if m != nil {
m.appScope = value
}
}
// SetAppScopeId sets the appScopeId property value. Identifier of the app-specific scope when the assignment scope is app-specific. The scope of an assignment determines the set of resources for which the principal has been granted access. App scopes are scopes that are defined and understood by this application only. Use / for tenant-wide app scopes. Use directoryScopeId to limit the scope to particular directory objects, for example, administrative units.
func (m *UnifiedRoleAssignmentScheduleRequest) SetAppScopeId(value *string)() {
if m != nil {
m.appScopeId = value
}
}
// SetDirectoryScope sets the directoryScope property value. Property referencing the directory object that is the scope of the assignment. Provided so that callers can get the directory object using $expand at the same time as getting the role assignment. Read-only.
func (m *UnifiedRoleAssignmentScheduleRequest) SetDirectoryScope(value DirectoryObjectable)() {
if m != nil {
m.directoryScope = value
}
}
// SetDirectoryScopeId sets the directoryScopeId property value. Identifier of the directory object representing the scope of the assignment. The scope of an assignment determines the set of resources for which the principal has been granted access. Directory scopes are shared scopes stored in the directory that are understood by multiple applications. Use / for tenant-wide scope. Use appScopeId to limit the scope to an application only.
func (m *UnifiedRoleAssignmentScheduleRequest) SetDirectoryScopeId(value *string)() {
if m != nil {
m.directoryScopeId = value
}
}
// SetIsValidationOnly sets the isValidationOnly property value. A boolean that determines whether the call is a validation or an actual call. Only set this property if you want to check whether an activation is subject to additional rules like MFA before actually submitting the request.
func (m *UnifiedRoleAssignmentScheduleRequest) SetIsValidationOnly(value *bool)() {
if m != nil {
m.isValidationOnly = value
}
}
// SetJustification sets the justification property value. A message provided by users and administrators when create the request about why it is needed.
func (m *UnifiedRoleAssignmentScheduleRequest) SetJustification(value *string)() {
if m != nil {
m.justification = value
}
}
// SetPrincipal sets the principal property value. Property referencing the principal that is getting a role assignment through the request. Provided so that callers can get the principal using $expand at the same time as getting the role assignment. Read-only.
func (m *UnifiedRoleAssignmentScheduleRequest) SetPrincipal(value DirectoryObjectable)() {
if m != nil {
m.principal = value
}
}
// SetPrincipalId sets the principalId property value. Identifier of the principal to which the assignment is being granted to.
func (m *UnifiedRoleAssignmentScheduleRequest) SetPrincipalId(value *string)() {
if m != nil {
m.principalId = value
}
}
// SetRoleDefinition sets the roleDefinition property value. Property indicating the roleDefinition the assignment is for. Provided so that callers can get the role definition using $expand at the same time as getting the role assignment. roleDefinition.Id will be auto expanded.
func (m *UnifiedRoleAssignmentScheduleRequest) SetRoleDefinition(value UnifiedRoleDefinitionable)() {
if m != nil {
m.roleDefinition = value
}
}
// SetRoleDefinitionId sets the roleDefinitionId property value. Identifier of the unifiedRoleDefinition the assignment is for. Read only.
func (m *UnifiedRoleAssignmentScheduleRequest) SetRoleDefinitionId(value *string)() {
if m != nil {
m.roleDefinitionId = value
}
}
// SetScheduleInfo sets the scheduleInfo property value. The schedule object of the role assignment request.
func (m *UnifiedRoleAssignmentScheduleRequest) SetScheduleInfo(value RequestScheduleable)() {
if m != nil {
m.scheduleInfo = value
}
}
// SetTargetSchedule sets the targetSchedule property value. Property indicating the schedule for an eligible role assignment.
func (m *UnifiedRoleAssignmentScheduleRequest) SetTargetSchedule(value UnifiedRoleAssignmentScheduleable)() {
if m != nil {
m.targetSchedule = value
}
}
// SetTargetScheduleId sets the targetScheduleId property value. Identifier of the schedule object attached to the assignment.
func (m *UnifiedRoleAssignmentScheduleRequest) SetTargetScheduleId(value *string)() {
if m != nil {
m.targetScheduleId = value
}
}
// SetTicketInfo sets the ticketInfo property value. The ticketInfo object attached to the role assignment request which includes details of the ticket number and ticket system.
func (m *UnifiedRoleAssignmentScheduleRequest) SetTicketInfo(value TicketInfoable)() {
if m != nil {
m.ticketInfo = value
}
} | models/unified_role_assignment_schedule_request.go | 0.749179 | 0.444565 | unified_role_assignment_schedule_request.go | starcoder |
package packed
// Efficient sequential read/write of packed integers.
type BulkOperationPacked12 struct {
*BulkOperationPacked
}
func newBulkOperationPacked12() BulkOperation {
return &BulkOperationPacked12{newBulkOperationPacked(12)}
}
func (op *BulkOperationPacked12) decodeLongToInt(blocks []int64, values []int32, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i++ {
block0 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(int64(uint64(block0) >> 52))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>40) & 4095)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>28) & 4095)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>16) & 4095)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>4) & 4095)
valuesOffset++
block1 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block0 & 15) << 8) | (int64(uint64(block1) >> 56)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>44) & 4095)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>32) & 4095)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>20) & 4095)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>8) & 4095)
valuesOffset++
block2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block1 & 255) << 4) | (int64(uint64(block2) >> 60)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>48) & 4095)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>36) & 4095)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>24) & 4095)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>12) & 4095)
valuesOffset++
values[valuesOffset] = int32(block2 & 4095)
valuesOffset++
}
}
func (op *BulkOperationPacked12) DecodeByteToInt(blocks []byte, values []int32, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i++ {
byte0 := blocks[blocksOffset]
blocksOffset++
byte1 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte0) << 4) | int64(uint8(byte1)>>4))
valuesOffset++
byte2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte1&15) << 8) | int64(byte2))
valuesOffset++
}
}
func (op *BulkOperationPacked12) DecodeLongToLong(blocks []int64, values []int64, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i++ {
block0 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64(uint64(block0) >> 52)
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>40) & 4095
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>28) & 4095
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>16) & 4095
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>4) & 4095
valuesOffset++
block1 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block0 & 15) << 8) | (int64(uint64(block1) >> 56))
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>44) & 4095
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>32) & 4095
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>20) & 4095
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>8) & 4095
valuesOffset++
block2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block1 & 255) << 4) | (int64(uint64(block2) >> 60))
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>48) & 4095
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>36) & 4095
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>24) & 4095
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>12) & 4095
valuesOffset++
values[valuesOffset] = block2 & 4095
valuesOffset++
}
}
func (op *BulkOperationPacked12) decodeByteToLong(blocks []byte, values []int64, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i++ {
byte0 := blocks[blocksOffset]
blocksOffset++
byte1 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte0) << 4) | int64(uint8(byte1)>>4))
valuesOffset++
byte2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte1&15) << 8) | int64(byte2))
valuesOffset++
}
} | core/util/packed/bulkOperation12.go | 0.617397 | 0.671124 | bulkOperation12.go | starcoder |
package gfx
import (
"fmt"
"unsafe"
"github.com/go-gl/gl/v4.1-core/gl"
)
// Mesh is a collection of vertices, that
// define the shape of a 3D object
type Mesh struct {
// VAO is the vertex array object that stores the vertex data
VAO uint32
// VBO is the vertex buffer object used as a source for vertex array data
VBO uint32
// EBO is the element buffer object used to store the indices for the vertex data
EBO uint32
// Vertices are the actual points - used only for the initialization
Vertices []float32
// Indices are the actual indices for the vertices - used only for the initialization
Indices []uint32
// Textures are the textures that the Mesh references in the fragment shader
Textures []*Texture
}
// NewMesh allocates the buffer objects
func NewMesh(vertices []float32, indices []uint32, textures []*Texture) *Mesh {
VAO, VBO, EBO := createVAO(vertices, indices)
return &Mesh{
VAO: VAO,
VBO: VBO,
EBO: EBO,
Vertices: vertices,
Indices: indices,
Textures: textures,
}
}
// Draw binds all referenced textures and vertex array objects
// and draws triangles
func (m *Mesh) Draw(program *Program) {
for i, tex := range m.Textures {
tex.Bind(uint32(gl.TEXTURE0 + i))
location := program.GetUniformLocation(fmt.Sprintf("texture%d", i))
gl.Uniform1i(location, int32(i))
}
gl.BindVertexArray(m.VAO)
gl.DrawElements(gl.TRIANGLES, int32(len(m.Indices)), gl.UNSIGNED_INT, unsafe.Pointer(nil))
gl.BindVertexArray(0)
for _, tex := range m.Textures {
tex.Unbind()
}
}
func createVAO(vertices []float32, indices []uint32) (uint32, uint32, uint32) {
var VAO, VBO, EBO uint32
gl.GenVertexArrays(1, &VAO)
gl.GenBuffers(1, &VBO)
gl.GenBuffers(1, &EBO)
gl.BindVertexArray(VAO)
// vertices
gl.BindBuffer(gl.ARRAY_BUFFER, VBO)
gl.BufferData(gl.ARRAY_BUFFER, len(vertices)*4, gl.Ptr(vertices), gl.STATIC_DRAW)
// indices
gl.BindBuffer(gl.ELEMENT_ARRAY_BUFFER, EBO)
gl.BufferData(gl.ELEMENT_ARRAY_BUFFER, len(indices)*4, gl.Ptr(indices), gl.STATIC_DRAW)
// stride = sum of attributes
var stride int32 = 3*4 + 3*4 + 2*4
var offset int
// position
gl.VertexAttribPointer(0, 3, gl.FLOAT, false, stride, gl.PtrOffset(offset))
gl.EnableVertexAttribArray(0)
offset += 3 * 4
// position
gl.VertexAttribPointer(1, 3, gl.FLOAT, false, stride, gl.PtrOffset(offset))
gl.EnableVertexAttribArray(1)
offset += 3 * 4
// texture
gl.VertexAttribPointer(2, 2, gl.FLOAT, false, stride, gl.PtrOffset(offset))
gl.EnableVertexAttribArray(2)
offset += 2 * 4
gl.BindVertexArray(0)
return VAO, VBO, EBO
} | gfx/mesh.go | 0.590543 | 0.481454 | mesh.go | starcoder |
package indicators
import (
"github.com/jaybutera/gotrade"
)
// A Linear Regression Intercept Indicator (LinRegInt)
type LinRegInt struct {
*LinRegWithoutStorage
selectData gotrade.DOHLCVDataSelectionFunc
// public variables
Data []float64
}
// NewLinRegInt creates a Linear Regression Intercept Indicator (LinRegInt) for online usage
func NewLinRegInt(timePeriod int, selectData gotrade.DOHLCVDataSelectionFunc) (indicator *LinRegInt, err error) {
if selectData == nil {
return nil, ErrDOHLCVDataSelectFuncIsNil
}
ind := LinRegInt{
selectData: selectData,
}
ind.LinRegWithoutStorage, err = NewLinRegWithoutStorage(timePeriod,
func(dataItem float64, slope float64, intercept float64, streamBarIndex int) {
result := intercept
ind.UpdateMinMax(result, result)
ind.Data = append(ind.Data, result)
})
return &ind, err
}
// NewDefaultLinRegInt creates a Linear Regression Intercept Indicator (LinRegInt) for online usage with default parameters
// - timePeriod: 14
func NewDefaultLinRegInt() (indicator *LinRegInt, err error) {
timePeriod := 14
return NewLinRegInt(timePeriod, gotrade.UseClosePrice)
}
// NewLinRegIntWithSrcLen creates a Linear Regression Intercept Indicator (LinRegInt) for offline usage
func NewLinRegIntWithSrcLen(sourceLength uint, timePeriod int, selectData gotrade.DOHLCVDataSelectionFunc) (indicator *LinRegInt, err error) {
ind, err := NewLinRegInt(timePeriod, selectData)
// only initialise the storage if there is enough source data to require it
if sourceLength-uint(ind.GetLookbackPeriod()) > 1 {
ind.Data = make([]float64, 0, sourceLength-uint(ind.GetLookbackPeriod()))
}
return ind, err
}
// NewDefaultLinRegIntWithSrcLen creates a Linear Regression Intercept Indicator (LinRegInt) for offline usage with default parameters
func NewDefaultLinRegIntWithSrcLen(sourceLength uint) (indicator *LinRegInt, err error) {
ind, err := NewDefaultLinRegInt()
// only initialise the storage if there is enough source data to require it
if sourceLength-uint(ind.GetLookbackPeriod()) > 1 {
ind.Data = make([]float64, 0, sourceLength-uint(ind.GetLookbackPeriod()))
}
return ind, err
}
// NewLinRegIntForStream creates a Linear Regression Intercept Indicator (LinRegInt) for online usage with a source data stream
func NewLinRegIntForStream(priceStream gotrade.DOHLCVStreamSubscriber, timePeriod int, selectData gotrade.DOHLCVDataSelectionFunc) (indicator *LinRegInt, err error) {
ind, err := NewLinRegInt(timePeriod, selectData)
priceStream.AddTickSubscription(ind)
return ind, err
}
// NewDefaultLinRegIntForStream creates a Linear Regression Intercept Indicator (LinRegInt) for online usage with a source data stream
func NewDefaultLinRegIntForStream(priceStream gotrade.DOHLCVStreamSubscriber) (indicator *LinRegInt, err error) {
ind, err := NewDefaultLinRegInt()
priceStream.AddTickSubscription(ind)
return ind, err
}
// NewLinRegIntForStreamWithSrcLen creates a Linear Regression Intercept Indicator (LinRegInt) for offline usage with a source data stream
func NewLinRegIntForStreamWithSrcLen(sourceLength uint, priceStream gotrade.DOHLCVStreamSubscriber, timePeriod int, selectData gotrade.DOHLCVDataSelectionFunc) (indicator *LinRegInt, err error) {
ind, err := NewLinRegIntWithSrcLen(sourceLength, timePeriod, selectData)
priceStream.AddTickSubscription(ind)
return ind, err
}
// NewDefaultLinRegIntForStreamWithSrcLen creates a Linear Regression Intercept Indicator (LinRegInt) for offline usage with a source data stream
func NewDefaultLinRegIntForStreamWithSrcLen(sourceLength uint, priceStream gotrade.DOHLCVStreamSubscriber) (indicator *LinRegInt, err error) {
ind, err := NewDefaultLinRegIntWithSrcLen(sourceLength)
priceStream.AddTickSubscription(ind)
return ind, err
}
// ReceiveDOHLCVTick consumes a source data DOHLCV price tick
func (ind *LinRegInt) ReceiveDOHLCVTick(tickData gotrade.DOHLCV, streamBarIndex int) {
var selectedData = ind.selectData(tickData)
ind.ReceiveTick(selectedData, streamBarIndex)
} | indicators/linregint.go | 0.704567 | 0.611237 | linregint.go | starcoder |
package src
/** Computes the determinant of a matrix
* Matrix must be square.
*/
func det(m *Matrix) (float64, string) {
if (m.N != m.M) {
return 0.0, "ERROR: Matrix must be square";
}
reduced := copyMatrix(m);
det := findDet(reduced, 1);
return det, "";
}
/** Method:
* det of identity matrix == 1
* Scaling a row by x scales the determinant by x
* Swapping a row scales the determinant by -1
* Adding a scaled row to another row doesn't change the determinant
This function is a modified version of rref.go -- it reduces m while keeping track of what row operations have been performed.
*/
func findDet(m *Matrix, currDet float64) float64 {
for i := 0; i < len(m.rows); i++ {
factor := 0.0;
m.rows[i], factor = invertRow(m.rows[i]);
row := m.rows[i];
currDet *= 1.0/factor;
pivotIndex := getPivotIndex(row);
if pivotIndex == -1 {
continue;
}
for j:= 0; j < len(m.rows); j++ {
if (i == j || m.rows[j][pivotIndex] == 0) {
continue;
}
scaleFactor := m.rows[j][pivotIndex] * -1.0;
newRow := addRows(scaleRow(m.rows[i], scaleFactor), m.rows[j]);
m.rows[j] = newRow;
}
}
currDet = swapRowsDet(m, currDet);
// Multiply entries along the diagonal
for i := 0; i < len(m.rows); i++ {
currDet *= m.rows[i][i];
}
return currDet;
}
func swapRowsDet(m *Matrix, currDet float64) float64 {
for i := 0; i < len(m.rows); i++ {
pivotIndex := getPivotIndex(m.rows[i]);
for j := i; j < len(m.rows); j++ {
if (i == j) {
continue;
}
otherPivotIndex := getPivotIndex(m.rows[j]);
if (otherPivotIndex == -1) {
continue;
}
if ((pivotIndex > otherPivotIndex) || (pivotIndex == -1)) {
swap(m, i, j);
i = 0;
currDet *= -1;
}
}
}
return currDet;
} | src/det.go | 0.869382 | 0.562717 | det.go | starcoder |
package tado
import (
"fmt"
"net/http"
"time"
)
// DayReport contains the daily report info
type DayReport struct {
ZoneType string `json:"zoneType"`
Interval struct {
From time.Time `json:"from"`
To time.Time `json:"to"`
} `json:"interval"`
HoursInDay int `json:"hoursInDay"`
MeasuredData struct {
MeasuringDeviceConnected struct {
TimeSeriesType string `json:"timeSeriesType"`
ValueType string `json:"valueType"`
DataIntervals []struct {
From time.Time `json:"from"`
To time.Time `json:"to"`
Value bool `json:"value"`
} `json:"dataIntervals"`
} `json:"measuringDeviceConnected"`
InsideTemperature struct {
TimeSeriesType string `json:"timeSeriesType"`
ValueType string `json:"valueType"`
Min struct {
Celsius float64 `json:"celsius"`
Fahrenheit float64 `json:"fahrenheit"`
} `json:"min"`
Max struct {
Celsius float64 `json:"celsius"`
Fahrenheit float64 `json:"fahrenheit"`
} `json:"max"`
DataPoints []struct {
Timestamp time.Time `json:"timestamp"`
Value struct {
Celsius float64 `json:"celsius"`
Fahrenheit float64 `json:"fahrenheit"`
} `json:"value"`
} `json:"dataPoints"`
} `json:"insideTemperature"`
Humidity struct {
TimeSeriesType string `json:"timeSeriesType"`
ValueType string `json:"valueType"`
PercentageUnit string `json:"percentageUnit"`
Min float64 `json:"min"`
Max float64 `json:"max"`
DataPoints []struct {
Timestamp time.Time `json:"timestamp"`
Value float64 `json:"value"`
} `json:"dataPoints"`
} `json:"humidity"`
} `json:"measuredData"`
Stripes struct {
TimeSeriesType string `json:"timeSeriesType"`
ValueType string `json:"valueType"`
DataIntervals []struct {
From time.Time `json:"from"`
To time.Time `json:"to"`
Value struct {
StripeType string `json:"stripeType"`
Setting struct {
Type string `json:"type"`
Power string `json:"power"`
Temperature struct {
Celsius float64 `json:"celsius"`
Fahrenheit float64 `json:"fahrenheit"`
} `json:"temperature"`
} `json:"setting"`
} `json:"value"`
} `json:"dataIntervals"`
} `json:"stripes"`
Settings struct {
TimeSeriesType string `json:"timeSeriesType"`
ValueType string `json:"valueType"`
DataIntervals []struct {
From time.Time `json:"from"`
To time.Time `json:"to"`
Value struct {
Type string `json:"type"`
Power string `json:"power"`
Temperature struct {
Celsius float64 `json:"celsius"`
Fahrenheit float64 `json:"fahrenheit"`
} `json:"temperature"`
} `json:"value"`
} `json:"dataIntervals"`
} `json:"settings"`
CallForHeat struct {
TimeSeriesType string `json:"timeSeriesType"`
ValueType string `json:"valueType"`
DataIntervals []struct {
From time.Time `json:"from"`
To time.Time `json:"to"`
Value string `json:"value"`
} `json:"dataIntervals"`
} `json:"callForHeat"`
Weather struct {
Condition struct {
TimeSeriesType string `json:"timeSeriesType"`
ValueType string `json:"valueType"`
DataIntervals []struct {
From time.Time `json:"from"`
To time.Time `json:"to"`
Value struct {
State string `json:"state"`
Temperature struct {
Celsius float64 `json:"celsius"`
Fahrenheit float64 `json:"fahrenheit"`
} `json:"temperature"`
} `json:"value"`
} `json:"dataIntervals"`
} `json:"condition"`
Sunny struct {
TimeSeriesType string `json:"timeSeriesType"`
ValueType string `json:"valueType"`
DataIntervals []struct {
From time.Time `json:"from"`
To time.Time `json:"to"`
Value bool `json:"value"`
} `json:"dataIntervals"`
} `json:"sunny"`
Slots struct {
TimeSeriesType string `json:"timeSeriesType"`
ValueType string `json:"valueType"`
Slots struct {
Zero400 struct {
State string `json:"state"`
Temperature struct {
Celsius float64 `json:"celsius"`
Fahrenheit float64 `json:"fahrenheit"`
} `json:"temperature"`
} `json:"04:00"`
Zero800 struct {
State string `json:"state"`
Temperature struct {
Celsius float64 `json:"celsius"`
Fahrenheit float64 `json:"fahrenheit"`
} `json:"temperature"`
} `json:"08:00"`
One200 struct {
State string `json:"state"`
Temperature struct {
Celsius float64 `json:"celsius"`
Fahrenheit float64 `json:"fahrenheit"`
} `json:"temperature"`
} `json:"12:00"`
One600 struct {
State string `json:"state"`
Temperature struct {
Celsius float64 `json:"celsius"`
Fahrenheit float64 `json:"fahrenheit"`
} `json:"temperature"`
} `json:"16:00"`
Two000 struct {
State string `json:"state"`
Temperature struct {
Celsius float64 `json:"celsius"`
Fahrenheit float64 `json:"fahrenheit"`
} `json:"temperature"`
} `json:"20:00"`
} `json:"slots"`
} `json:"slots"`
} `json:"weather"`
}
// GetDayReportInput is the input for GetDayReport
type GetDayReportInput struct {
HomeID int
ZoneID int
Date time.Time
}
func (gdri *GetDayReportInput) method() string {
return http.MethodGet
}
func (gdri *GetDayReportInput) path() string {
return fmt.Sprintf("/v2/homes/%d/zones/%d/dayReport?date=%s", gdri.HomeID, gdri.ZoneID, gdri.Date.Format("2006-01-02"))
}
func (gdri *GetDayReportInput) body() interface{} {
return nil
}
// GetDayReportOutput is the output for GetDayReport
type GetDayReportOutput struct {
DayReport
} | models_dayreport.go | 0.690142 | 0.416915 | models_dayreport.go | starcoder |
package filter
import (
"errors"
"image"
"math"
"github.com/jangler/imp/util"
)
var joinHelp = `join <file> <edge> [<align>]
Adjoin another image to an edge of the working image. Possible values for 'edge'
are top, bottom, left, and right. The 'align' argument is used to control which
edge of the resulting image the adjoined images are flush with. Possible values
for 'align' are top, bottom, left, right, and center. The default align is top
when 'edge' is left or right, or left when 'edge' is top or bottom.`
func joinFunc(img *image.RGBA, args []string) (*image.RGBA, []string) {
if len(args) < 2 {
util.Die(errors.New(joinHelp))
}
joinImg := util.ReadImage(args[0])
edge := args[1]
if edge != "top" && edge != "bottom" && edge != "left" && edge != "right" {
util.Die(errors.New(joinHelp))
}
var gravity string
if edge == "top" || edge == "bottom" {
gravity = "left"
} else {
gravity = "top"
}
if len(args) >= 3 {
if args[2] != "top" && args[2] != "bottom" && args[2] != "left" &&
args[2] != "right" && args[2] != "center" {
args = args[2:]
} else {
gravity = args[2]
args = args[3:]
}
} else {
args = args[2:]
}
b1, b2 := img.Bounds(), joinImg.Bounds()
var width, height int
if edge == "top" || edge == "bottom" {
width = int(math.Max(float64(b1.Dx()), float64(b2.Dx())))
height = b1.Dy() + b2.Dy()
} else {
width = b1.Dx() + b2.Dx()
height = int(math.Max(float64(b1.Dy()), float64(b2.Dy())))
}
newImg := image.NewRGBA(image.Rect(0, 0, width, height))
b3 := newImg.Bounds()
var xOffset1, yOffset1, xOffset2, yOffset2 int
switch edge {
case "top", "bottom":
switch gravity {
case "left":
xOffset1 = 0
xOffset2 = 0
case "right":
xOffset1 = width - b1.Dx()
xOffset2 = width - b2.Dx()
default:
xOffset1 = (width - b1.Dx()) / 2
xOffset2 = (width - b2.Dx()) / 2
}
case "left", "right":
switch gravity {
case "top":
yOffset1 = 0
yOffset2 = 0
case "bottom":
yOffset1 = height - b1.Dy()
yOffset2 = height - b2.Dy()
default:
yOffset1 = (height - b1.Dy()) / 2
yOffset2 = (height - b2.Dy()) / 2
}
}
switch edge {
case "top":
yOffset1, yOffset2 = b2.Dy(), 0
case "bottom":
yOffset1, yOffset2 = 0, b1.Dy()
case "left":
xOffset1, xOffset2 = b2.Dx(), 0
case "right":
xOffset1, xOffset2 = 0, b1.Dx()
}
for y := 0; y < b1.Dy(); y++ {
for x := 0; x < b1.Dx(); x++ {
newImg.Set(x+xOffset1+b3.Min.X, y+yOffset1+b3.Min.Y,
img.At(x+b1.Min.X, y+b1.Min.Y))
}
}
for y := 0; y < b2.Dy(); y++ {
for x := 0; x < b2.Dx(); x++ {
newImg.Set(x+xOffset2+b3.Min.X, y+yOffset2+b3.Min.Y,
joinImg.At(x+b2.Min.X, y+b2.Min.Y))
}
}
return newImg, args
}
func init() {
addFilter(&Filter{"join", joinHelp, joinFunc})
} | filter/join.go | 0.608943 | 0.494934 | join.go | starcoder |
package main
import (
"fmt"
"math/rand"
"os"
"strconv"
"strings"
"time"
lib "devstats"
yaml "gopkg.in/yaml.v2"
)
// gaps contain list of metrics to fill gaps
type gaps struct {
Metrics []metricGap `yaml:"metrics"`
}
// metricGap conain list of series names and periods to fill gaps
// Series formula allows writing a lot of series name in a shorter way
// Say we have series in this form prefix_{x}_{y}_{z}_suffix
// and {x} can be a,b,c,d, {y} can be 1,2,3, z can be yes,no
// Instead of listing all combinations prefix_a_1_yes_suffix, ..., prefix_d_3_no_suffix
// Which is 4 * 3 * 2 = 24 items, You can write series formula:
// "=prefix;suffix;_;a,b,c,d;1,2,3;yes,no"
// format is "=prefix;suffix;join;list1item1,list1item2,...;list2item1,list2item2,...;..."
// Values can be set the same way as Series, it is the array of series properties to clear
// If not specified, ["value"] is assumed - it is used for multi-value series
type metricGap struct {
Name string `yaml:"name"`
Series []string `yaml:"series"`
Periods string `yaml:"periods"`
Aggregate string `yaml:"aggregate"`
Skip string `yaml:"skip"`
Desc bool `yaml:"desc"`
Values []string `yaml:"values"`
}
// metrics contain list of metrics to evaluate
type metrics struct {
Metrics []metric `yaml:"metrics"`
}
// metric contain each metric data
type metric struct {
Name string `yaml:"name"`
Periods string `yaml:"periods"`
SeriesNameOrFunc string `yaml:"series_name_or_func"`
MetricSQL string `yaml:"sql"`
AddPeriodToName bool `yaml:"add_period_to_name"`
Histogram bool `yaml:"histogram"`
Aggregate string `yaml:"aggregate"`
Skip string `yaml:"skip"`
Desc string `yaml:"desc"`
MultiValue bool `yaml:"multi_value"`
EscapeValueName bool `yaml:"escape_value_name"`
AnnotationsRanges bool `yaml:"annotations_ranges"`
}
// Add _period to all array items
func addPeriodSuffix(seriesArr []string, period string) (result []string) {
for _, series := range seriesArr {
result = append(result, series+"_"+period)
}
return
}
// Return cartesian product of all arrays starting with prefix, joined by "join" ending with suffix
func joinedCartesian(mat [][]string, prefix, join, suffix string) (result []string) {
// rows - number of arrays to join, rowsm1 (last index of array to join)
rows := len(mat)
rowsm1 := rows - 1
// lens[i] - i-th row length - 1 (last i-th row column index)
// curr[i] - current position in i-th row, we're processing N x M x ... positions
// All possible combinations = Cartesian
var (
lens []int
curr []int
)
for _, row := range mat {
lens = append(lens, len(row)-1)
curr = append(curr, 0)
}
// While not for all i curr[i] == lens[i]
for {
// Create one of output combinations
str := prefix
for i := 0; i < rows; i++ {
str += mat[i][curr[i]]
if i < rowsm1 {
str += join
}
}
str += suffix
result = append(result, str)
// Stop if for all i curr[i] == lens[i]
// Which means we processed all possible combinations
stop := true
for i := 0; i < rows; i++ {
if curr[i] < lens[i] {
stop = false
break
}
}
if stop {
break
}
// increase curr[i] for some i
for i := 0; i < rows; i++ {
// We can move to next permutation at this i
if curr[i] < lens[i] {
curr[i]++
break
} else {
// We have to go to another row and zero all lower positions
for j := 0; j <= i; j++ {
curr[j] = 0
}
}
}
}
// Retunrs "result" containing all possible permutations
return
}
// Parse formula in format "=prefix;suffix;join;list1item1,list1item2,...;list2item1,list2item2,...;..."
func createSeriesFromFormula(def string) (result []string) {
ary := strings.Split(def[1:], ";")
if len(ary) < 4 {
lib.Fatalf(
"series formula must have at least 4 paramaters: "+
"prefix, suffix, join, list, %v",
def,
)
}
// prefix, join value (how to connect strings from different arrays), suffix
prefix, suffix, join := ary[0], ary[1], ary[2]
// Create "matrix" of strings (not a real matrix because rows can have different counts)
var matrix [][]string
for _, list := range ary[3:] {
vals := strings.Split(list, ",")
matrix = append(matrix, vals)
}
// Create cartesian result with all possible combinations
result = joinedCartesian(matrix, prefix, join, suffix)
return
}
// fills series gaps
// Reads config from YAML (which series, for which periods)
func fillGapsInSeries(ctx *lib.Ctx, from, to time.Time) {
lib.Printf("Fill gaps in series\n")
var gaps gaps
// Local or cron mode?
cmdPrefix := ""
dataPrefix := lib.DataDir
if ctx.Local {
cmdPrefix = "./"
dataPrefix = "./"
}
data, err := lib.ReadFile(ctx, dataPrefix+ctx.GapsYaml)
if err != nil {
lib.FatalOnError(err)
return
}
lib.FatalOnError(yaml.Unmarshal(data, &gaps))
// Iterate metrics and periods
bSize := 1000
for _, metric := range gaps.Metrics {
extraParams := []string{}
if metric.Desc {
extraParams = append(extraParams, "desc")
}
// Parse multi values
values := []string{}
for _, value := range metric.Values {
if value[0:1] == "=" {
valuesArr := createSeriesFromFormula(value)
values = append(values, valuesArr...)
} else {
values = append(values, value)
}
}
if len(values) == 0 {
values = append(values, "value")
}
extraParams = append(extraParams, "values:"+strings.Join(values, ";"))
// Parse series
series := []string{}
for _, ser := range metric.Series {
if ser[0:1] == "=" {
formulaSeries := createSeriesFromFormula(ser)
series = append(series, formulaSeries...)
} else {
series = append(series, ser)
}
}
nSeries := len(series)
nBuckets := nSeries / bSize
if nSeries%bSize > 0 {
nBuckets++
}
periods := strings.Split(metric.Periods, ",")
aggregate := metric.Aggregate
if aggregate == "" {
aggregate = "1"
}
aggregateArr := strings.Split(aggregate, ",")
skips := strings.Split(metric.Skip, ",")
skipMap := make(map[string]struct{})
for _, skip := range skips {
skipMap[skip] = struct{}{}
}
for _, aggrStr := range aggregateArr {
_, err := strconv.Atoi(aggrStr)
lib.FatalOnError(err)
aggrSuffix := aggrStr
if aggrSuffix == "1" {
aggrSuffix = ""
}
for _, period := range periods {
periodAggr := period + aggrSuffix
_, found := skipMap[periodAggr]
if found {
lib.Printf("Skipped filling gaps on period %s\n", periodAggr)
continue
}
if !ctx.ResetIDB && !lib.ComputePeriodAtThisDate(ctx, period, to) {
lib.Printf("Skipping filling gaps for period \"%s\" for date %v\n", periodAggr, to)
continue
}
for i := 0; i < nBuckets; i++ {
bFrom := i * bSize
bTo := bFrom + bSize
if bTo > nSeries {
bTo = nSeries
}
lib.Printf("Filling metric gaps %v, descriptions %v, period: %s, %d series (%d - %d)...\n", metric.Name, metric.Desc, periodAggr, nSeries, bFrom, bTo)
_, err := lib.ExecCommand(
ctx,
[]string{
cmdPrefix + "z2influx",
strings.Join(addPeriodSuffix(series[bFrom:bTo], periodAggr), ","),
lib.ToYMDHDate(from),
lib.ToYMDHDate(to),
periodAggr,
strings.Join(extraParams, ","),
},
nil,
)
lib.FatalOnError(err)
}
}
}
}
}
func sync(ctx *lib.Ctx, args []string) {
// Strip function to be used by MapString
stripFunc := func(x string) string { return strings.TrimSpace(x) }
// Orgs & Repos
sOrg := ""
if len(args) > 0 {
sOrg = args[0]
}
sRepo := ""
if len(args) > 1 {
sRepo = args[1]
}
org := lib.StringsMapToArray(stripFunc, strings.Split(sOrg, ","))
repo := lib.StringsMapToArray(stripFunc, strings.Split(sRepo, ","))
lib.Printf("gha2db_sync.go: Running on: %s/%s\n", strings.Join(org, "+"), strings.Join(repo, "+"))
// Local or cron mode?
cmdPrefix := ""
dataPrefix := lib.DataDir
if ctx.Local {
cmdPrefix = "./"
dataPrefix = "./"
}
// Connect to Postgres DB
con := lib.PgConn(ctx)
defer func() { lib.FatalOnError(con.Close()) }()
// Connect to InfluxDB
ic := lib.IDBConn(ctx)
defer func() { lib.FatalOnError(ic.Close()) }()
// Get max event date from Postgres database
var maxDtPtr *time.Time
maxDtPg := ctx.DefaultStartDate
if !ctx.ForceStartDate {
lib.FatalOnError(lib.QueryRowSQL(con, ctx, "select max(created_at) from gha_events").Scan(&maxDtPtr))
if maxDtPtr != nil {
maxDtPg = *maxDtPtr
}
}
// Get max series date from Influx database
maxDtIDB := ctx.DefaultStartDate
if !ctx.ForceStartDate {
res := lib.QueryIDB(ic, ctx, "select last(value) from "+ctx.LastSeries)
series := res[0].Series
if len(series) > 0 {
maxDtIDB = lib.TimeParseIDB(series[0].Values[0][0].(string))
}
}
// Create date range
// Just to get into next GHA hour
from := maxDtPg.Add(5 * time.Minute)
to := time.Now()
fromDate := lib.ToYMDDate(from)
fromHour := strconv.Itoa(from.Hour())
toDate := lib.ToYMDDate(to)
toHour := strconv.Itoa(to.Hour())
// Get new GHAs
if !ctx.SkipPDB {
// Clear old DB logs
lib.ClearDBLogs()
// gha2db
lib.Printf("GHA range: %s %s - %s %s\n", fromDate, fromHour, toDate, toHour)
_, err := lib.ExecCommand(
ctx,
[]string{
cmdPrefix + "gha2db",
fromDate,
fromHour,
toDate,
toHour,
strings.Join(org, ","),
strings.Join(repo, ","),
},
nil,
)
lib.FatalOnError(err)
// Only run commits analysis for current DB here
// We have updated repos to the newest state as 1st step in "devstats" call
// We have also fetched all data from current GHA hour using "gha2db"
// Now let's update new commits files (from newest hour)
lib.Printf("Update git commits\n")
_, err = lib.ExecCommand(
ctx,
[]string{
cmdPrefix + "get_repos",
},
map[string]string{
"GHA2DB_PROCESS_COMMITS": "1",
"GHA2DB_PROJECTS_COMMITS": ctx.Project,
},
)
lib.FatalOnError(err)
// GitHub API calls to get open issues state
// It updates milestone and/or label(s) when different sice last comment state
lib.Printf("Update data from GitHub API\n")
// Recompute views and DB summaries
_, err = lib.ExecCommand(
ctx,
[]string{
cmdPrefix + "ghapi2db",
},
nil,
)
lib.FatalOnError(err)
// Eventual postprocess SQL's from 'structure' call
lib.Printf("Update structure\n")
// Recompute views and DB summaries
_, err = lib.ExecCommand(
ctx,
[]string{
cmdPrefix + "structure",
},
map[string]string{
"GHA2DB_SKIPTABLE": "1",
"GHA2DB_MGETC": "y",
},
)
lib.FatalOnError(err)
}
// DB2Influx
if !ctx.SkipIDB {
metricsDir := dataPrefix + "metrics"
if ctx.Project != "" {
metricsDir += "/" + ctx.Project
}
// Regenerate points from this date
if ctx.ResetIDB {
from = ctx.DefaultStartDate
} else {
from = maxDtIDB
}
lib.Printf("Influx range: %s - %s\n", lib.ToYMDHDate(from), lib.ToYMDHDate(to))
// InfluxDB tags (repo groups template variable currently)
if ctx.ResetIDB || time.Now().Hour() == 0 {
_, err := lib.ExecCommand(ctx, []string{cmdPrefix + "idb_tags"}, nil)
lib.FatalOnError(err)
} else {
lib.Printf("Skipping `idb_tags` recalculation, it is only computed once per day\n")
}
// Annotations
if ctx.Project != "" && (ctx.ResetIDB || time.Now().Hour() == 0) {
_, err := lib.ExecCommand(
ctx,
[]string{
cmdPrefix + "annotations",
},
nil,
)
lib.FatalOnError(err)
} else {
lib.Printf("Skipping `annotations` recalculation, it is only computed once per day\n")
}
// Get Quick Ranges from IDB (it is filled by annotations command)
quickRanges := lib.GetTagValues(ic, ctx, "quick_ranges_suffix")
lib.Printf("Quick ranges: %+v\n", quickRanges)
// Fill gaps in series
fillGapsInSeries(ctx, from, to)
// Read metrics configuration
data, err := lib.ReadFile(ctx, dataPrefix+ctx.MetricsYaml)
if err != nil {
lib.FatalOnError(err)
return
}
var allMetrics metrics
lib.FatalOnError(yaml.Unmarshal(data, &allMetrics))
// Keep all histograms here
var hists [][]string
// Iterate all metrics
for _, metric := range allMetrics.Metrics {
extraParams := []string{}
if metric.Histogram {
extraParams = append(extraParams, "hist")
}
if metric.MultiValue {
extraParams = append(extraParams, "multivalue")
}
if metric.EscapeValueName {
extraParams = append(extraParams, "escape_value_name")
}
if metric.Desc != "" {
extraParams = append(extraParams, "desc:"+metric.Desc)
}
periods := strings.Split(metric.Periods, ",")
aggregate := metric.Aggregate
if aggregate == "" {
aggregate = "1"
}
if metric.AnnotationsRanges {
extraParams = append(extraParams, "annotations_ranges")
periods = quickRanges
aggregate = "1"
}
aggregateArr := strings.Split(aggregate, ",")
skips := strings.Split(metric.Skip, ",")
skipMap := make(map[string]struct{})
for _, skip := range skips {
skipMap[skip] = struct{}{}
}
if !ctx.ResetIDB && !ctx.ResetRanges {
extraParams = append(extraParams, "skip_past")
}
for _, aggrStr := range aggregateArr {
_, err := strconv.Atoi(aggrStr)
lib.FatalOnError(err)
aggrSuffix := aggrStr
if aggrSuffix == "1" {
aggrSuffix = ""
}
for _, period := range periods {
periodAggr := period + aggrSuffix
_, found := skipMap[periodAggr]
if found {
lib.Printf("Skipped period %s\n", periodAggr)
continue
}
if !ctx.ResetIDB && !lib.ComputePeriodAtThisDate(ctx, period, to) {
lib.Printf("Skipping recalculating period \"%s%s\" for date to %v\n", period, aggrSuffix, to)
continue
}
seriesNameOrFunc := metric.SeriesNameOrFunc
if metric.AddPeriodToName {
seriesNameOrFunc += "_" + periodAggr
}
// Histogram metrics usualy take long time, but executes single query, so there is no way to
// Implement multi threading inside "db2influx" call fro them
// So we're creating array of such metrics to be executed at the end - each in a separate go routine
if metric.Histogram {
lib.Printf("Scheduled histogram metric %v, period %v, desc: '%v', aggregate: '%v' ...\n", metric.Name, period, metric.Desc, aggrSuffix)
hists = append(
hists,
[]string{
cmdPrefix + "db2influx",
seriesNameOrFunc,
fmt.Sprintf("%s/%s.sql", metricsDir, metric.MetricSQL),
lib.ToYMDHDate(from),
lib.ToYMDHDate(to),
periodAggr,
strings.Join(extraParams, ","),
},
)
} else {
lib.Printf("Calculate metric %v, period %v, desc: '%v', aggregate: '%v' ...\n", metric.Name, period, metric.Desc, aggrSuffix)
_, err = lib.ExecCommand(
ctx,
[]string{
cmdPrefix + "db2influx",
seriesNameOrFunc,
fmt.Sprintf("%s/%s.sql", metricsDir, metric.MetricSQL),
lib.ToYMDHDate(from),
lib.ToYMDHDate(to),
periodAggr,
strings.Join(extraParams, ","),
},
nil,
)
lib.FatalOnError(err)
}
}
}
}
// Process histograms (possibly MT)
// Get number of CPUs available
thrN := lib.GetThreadsNum(ctx)
if thrN > 1 {
lib.Printf("Now processing %d histograms using MT%d version\n", len(hists), thrN)
ch := make(chan bool)
nThreads := 0
for _, hist := range hists {
go calcHistogram(ch, ctx, hist)
nThreads++
if nThreads == thrN {
<-ch
nThreads--
}
}
lib.Printf("Final threads join\n")
for nThreads > 0 {
<-ch
nThreads--
}
} else {
lib.Printf("Now processing %d histograms using ST version\n", len(hists))
for _, hist := range hists {
calcHistogram(nil, ctx, hist)
}
}
}
lib.Printf("Sync success\n")
}
// calcHistogram - calculate single histogram by calling "db2influx" program with parameters from "hist"
func calcHistogram(ch chan bool, ctx *lib.Ctx, hist []string) {
if len(hist) != 7 {
lib.Fatalf("calcHistogram, expected 7 strings, got: %d: %v", len(hist), hist)
}
envMap := make(map[string]string)
rSrc := rand.NewSource(time.Now().UnixNano())
rnd := rand.New(rSrc)
if rnd.Intn(15) == 1 {
envMap["GHA2DB_IDB_DROP_SERIES"] = "1"
}
lib.Printf(
"Calculate histogram %s,%s,%s,%s,%s,%s ...\n",
hist[1],
hist[2],
hist[3],
hist[4],
hist[5],
hist[6],
)
// Execute "db2influx"
_, err := lib.ExecCommand(
ctx,
[]string{
hist[0],
hist[1],
hist[2],
hist[3],
hist[4],
hist[5],
hist[6],
},
envMap,
)
lib.FatalOnError(err)
// Synchronize go routine
if ch != nil {
ch <- true
}
}
// Return per project args (if no args given) or get args from command line (if given)
// When no args given and no project set (via GHA2DB_PROJECT) it panics
func getSyncArgs(ctx *lib.Ctx, osArgs []string) []string {
// User commandline override
if len(osArgs) > 1 {
return osArgs[1:]
}
// No user commandline, get args specific to project GHA2DB_PROJECT
if ctx.Project == "" {
lib.Fatalf(
"you have to set project via GHA2DB_PROJECT environment variable if you provide no commandline arguments",
)
}
// Local or cron mode?
dataPrefix := lib.DataDir
if ctx.Local {
dataPrefix = "./"
}
// Read defined projects
data, err := lib.ReadFile(ctx, dataPrefix+ctx.ProjectsYaml)
if err != nil {
lib.FatalOnError(err)
return []string{}
}
var projects lib.AllProjects
lib.FatalOnError(yaml.Unmarshal(data, &projects))
proj, ok := projects.Projects[ctx.Project]
if ok {
if proj.StartDate != nil && !ctx.ForceStartDate {
ctx.DefaultStartDate = *proj.StartDate
}
return proj.CommandLine
}
// No user commandline and project not found
lib.Fatalf(
"project '%s' is not defined in '%s'",
ctx.Project,
ctx.ProjectsYaml,
)
return []string{}
}
func main() {
dtStart := time.Now()
// Environment context parse
var ctx lib.Ctx
ctx.Init()
sync(&ctx, getSyncArgs(&ctx, os.Args))
dtEnd := time.Now()
lib.Printf("Time: %v\n", dtEnd.Sub(dtStart))
} | cmd/gha2db_sync/gha2db_sync.go | 0.664758 | 0.439567 | gha2db_sync.go | starcoder |
package types
import (
"sort"
"github.com/attic-labs/noms/go/hash"
)
type Set struct {
seq orderedSequence
h *hash.Hash
}
func newSet(seq orderedSequence) Set {
return Set{seq, &hash.Hash{}}
}
func NewSet(v ...Value) Set {
data := buildSetData(v)
ch := newEmptySetSequenceChunker(nil, nil)
for _, v := range data {
ch.Append(v)
}
return newSet(ch.Done().(orderedSequence))
}
func NewStreamingSet(vrw ValueReadWriter, vals <-chan Value) <-chan Set {
outChan := make(chan Set)
go func() {
gb := NewGraphBuilder(vrw, SetKind, false)
for v := range vals {
gb.SetInsert(nil, v)
}
outChan <- gb.Build().(Set)
}()
return outChan
}
// Computes the diff from |last| to |s| using "best" algorithm, which balances returning results early vs completing quickly.
func (s Set) Diff(last Set, changes chan<- ValueChanged, closeChan <-chan struct{}) {
if s.Equals(last) {
return
}
orderedSequenceDiffBest(last.seq, s.seq, changes, closeChan)
}
// Like Diff() but uses a left-to-right streaming approach, optimised for returning results early, but not completing quickly.
func (s Set) DiffLeftRight(last Set, changes chan<- ValueChanged, closeChan <-chan struct{}) {
if s.Equals(last) {
return
}
orderedSequenceDiffLeftRight(last.seq, s.seq, changes, closeChan)
}
// Collection interface
func (s Set) Len() uint64 {
return s.seq.numLeaves()
}
func (s Set) Empty() bool {
return s.Len() == 0
}
func (s Set) sequence() sequence {
return s.seq
}
func (s Set) hashPointer() *hash.Hash {
return s.h
}
// Value interface
func (s Set) Equals(other Value) bool {
return s.Hash() == other.Hash()
}
func (s Set) Less(other Value) bool {
return valueLess(s, other)
}
func (s Set) Hash() hash.Hash {
if s.h.IsEmpty() {
*s.h = getHash(s)
}
return *s.h
}
func (s Set) WalkValues(cb ValueCallback) {
s.IterAll(func(v Value) {
cb(v)
})
}
func (s Set) WalkRefs(cb RefCallback) {
s.seq.WalkRefs(cb)
}
func (s Set) Type() *Type {
return s.seq.Type()
}
func (s Set) First() Value {
cur := newCursorAt(s.seq, emptyKey, false, false)
if !cur.valid() {
return nil
}
return cur.current().(Value)
}
func (s Set) Insert(values ...Value) Set {
if len(values) == 0 {
return s
}
head, tail := values[0], values[1:]
var res Set
if cur, found := s.getCursorAtValue(head); !found {
res = s.splice(cur, 0, head)
} else {
res = s
}
return res.Insert(tail...)
}
func (s Set) Remove(values ...Value) Set {
if len(values) == 0 {
return s
}
head, tail := values[0], values[1:]
var res Set
if cur, found := s.getCursorAtValue(head); found {
res = s.splice(cur, 1)
} else {
res = s
}
return res.Remove(tail...)
}
func (s Set) splice(cur *sequenceCursor, deleteCount uint64, vs ...Value) Set {
ch := newSequenceChunker(cur, s.seq.valueReader(), nil, makeSetLeafChunkFn(s.seq.valueReader()), newOrderedMetaSequenceChunkFn(SetKind, s.seq.valueReader()), hashValueBytes)
for deleteCount > 0 {
ch.Skip()
deleteCount--
}
for _, v := range vs {
ch.Append(v)
}
ns := newSet(ch.Done().(orderedSequence))
return ns
}
func (s Set) getCursorAtValue(v Value) (cur *sequenceCursor, found bool) {
cur = newCursorAtValue(s.seq, v, true, false)
found = cur.idx < cur.seq.seqLen() && cur.current().(Value).Equals(v)
return
}
func (s Set) Has(v Value) bool {
cur := newCursorAtValue(s.seq, v, false, false)
return cur.valid() && cur.current().(Value).Equals(v)
}
type setIterCallback func(v Value) bool
func (s Set) Iter(cb setIterCallback) {
cur := newCursorAt(s.seq, emptyKey, false, false)
cur.iter(func(v interface{}) bool {
return cb(v.(Value))
})
}
type setIterAllCallback func(v Value)
func (s Set) IterAll(cb setIterAllCallback) {
cur := newCursorAt(s.seq, emptyKey, false, false)
cur.iter(func(v interface{}) bool {
cb(v.(Value))
return false
})
}
func (s Set) Iterator() SetIterator {
return &setIterator{s: s, cursor: nil}
}
func (s Set) elemType() *Type {
return s.Type().Desc.(CompoundDesc).ElemTypes[0]
}
func buildSetData(values ValueSlice) ValueSlice {
if len(values) == 0 {
return ValueSlice{}
}
uniqueSorted := make(ValueSlice, 0, len(values))
sort.Stable(values)
last := values[0]
for i := 1; i < len(values); i++ {
v := values[i]
if !v.Equals(last) {
uniqueSorted = append(uniqueSorted, last)
}
last = v
}
return append(uniqueSorted, last)
}
func makeSetLeafChunkFn(vr ValueReader) makeChunkFn {
return func(items []sequenceItem) (Collection, orderedKey, uint64) {
setData := make([]Value, len(items), len(items))
for i, v := range items {
setData[i] = v.(Value)
}
set := newSet(newSetLeafSequence(vr, setData...))
var key orderedKey
if len(setData) > 0 {
key = newOrderedKey(setData[len(setData)-1])
}
return set, key, uint64(len(items))
}
}
func newEmptySetSequenceChunker(vr ValueReader, vw ValueWriter) *sequenceChunker {
return newEmptySequenceChunker(vr, vw, makeSetLeafChunkFn(vr), newOrderedMetaSequenceChunkFn(SetKind, vr), hashValueBytes)
} | go/types/set.go | 0.75101 | 0.419707 | set.go | starcoder |
package mockreader
import (
"bytes"
"fmt"
"io"
"io/fs"
"strconv"
"sync"
"github.com/chronos-tachyon/assert"
"github.com/chronos-tachyon/bufferpool"
)
type Expectation struct {
op Op
input0 []byte
input1 int64
input2 int
input3 string
output0 int64
output1 fs.FileInfo
output2 error
hasOutput bool
}
func ExpectMark(str string) Expectation {
return Expectation{
op: MarkOp,
input3: str,
hasOutput: true,
}
}
func ExpectStat(fi fs.FileInfo, err error) Expectation {
return Expectation{
op: StatOp,
output1: fi,
output2: err,
hasOutput: true,
}
}
func ExpectRead(p []byte, n int, err error) Expectation {
assert.Assertf(n >= 0, "%d >= 0", n)
assert.Assertf(n <= len(p), "%d <= %d", n, len(p))
return Expectation{
op: ReadOp,
input0: p,
output0: int64(n),
output2: err,
hasOutput: true,
}
}
func ExpectReadAt(p []byte, offset int64, n int, err error) Expectation {
assert.Assertf(n >= 0, "%d >= 0", n)
assert.Assertf(n <= len(p), "%d <= %d", n, len(p))
return Expectation{
op: ReadAtOp,
input0: p,
input1: offset,
output0: int64(n),
output2: err,
hasOutput: true,
}
}
func ExpectSeek(offset int64, whence int, newOffset int64, err error) Expectation {
return Expectation{
op: SeekOp,
input1: offset,
input2: whence,
output0: newOffset,
output2: err,
hasOutput: true,
}
}
func ExpectClose(err error) Expectation {
return Expectation{
op: CloseOp,
output2: err,
hasOutput: true,
}
}
func (x Expectation) Matches(y Expectation) bool {
return (x.op == y.op &&
x.input1 == y.input1 &&
x.input2 == y.input2 &&
x.input3 == y.input3 &&
len(x.input0) == len(y.input0))
}
func (x Expectation) GoStringTo(buf *bytes.Buffer) {
var tail int
buf.WriteString("Expectation{")
buf.WriteString(x.op.GoString())
buf.WriteString(" | ")
switch x.op {
case MarkOp:
buf.WriteString(strconv.Quote(x.input3))
tail = 0
case StatOp:
buf.WriteString("∅")
tail = 3
case ReadOp:
formatBytesTo(buf, x.input0)
tail = 2
case ReadAtOp:
formatBytesTo(buf, x.input0)
buf.WriteString(", ")
buf.WriteString(formatInt64(x.input1))
tail = 2
case SeekOp:
buf.WriteString(formatInt64(x.input1))
buf.WriteString(", ")
buf.WriteString(formatInt(x.input2))
tail = 2
case CloseOp:
buf.WriteString("∅")
tail = 1
}
if x.hasOutput {
buf.WriteString(" | ")
switch tail {
case 0:
buf.WriteString("∅")
case 1:
buf.WriteString(formatAny(x.output2))
case 2:
buf.WriteString(formatInt64(x.output0))
buf.WriteString(", ")
buf.WriteString(formatAny(x.output2))
case 3:
buf.WriteString(formatAny(x.output1))
buf.WriteString(", ")
buf.WriteString(formatAny(x.output2))
}
}
buf.WriteString("}")
}
func (x Expectation) StringTo(buf *bytes.Buffer) {
var tail int
switch x.op {
case MarkOp:
buf.WriteString("Mark(")
buf.WriteString(strconv.Quote(x.input3))
buf.WriteString(")")
tail = 0
case StatOp:
buf.WriteString("Stat()")
tail = 3
case ReadOp:
buf.WriteString("Read([")
buf.WriteString(formatInt(len(x.input0)))
buf.WriteString(" bytes])")
tail = 2
case ReadAtOp:
buf.WriteString("ReadAt([")
buf.WriteString(formatInt(len(x.input0)))
buf.WriteString(" bytes], ")
buf.WriteString(formatInt64(x.input1))
buf.WriteString(")")
tail = 2
case SeekOp:
buf.WriteString("Seek(")
buf.WriteString(formatInt64(x.input1))
buf.WriteString(", ")
buf.WriteString(whenceString(x.input2))
buf.WriteString(")")
tail = 2
case CloseOp:
buf.WriteString("Close()")
tail = 1
default:
buf.WriteString("Nothing")
}
if x.hasOutput {
switch tail {
case 0:
buf.WriteString(" => ()")
case 1:
buf.WriteString(" => (")
buf.WriteString(formatAny(x.output2))
buf.WriteString(")")
case 2:
buf.WriteString(" => (")
buf.WriteString(formatInt64(x.output0))
buf.WriteString(", ")
buf.WriteString(formatAny(x.output2))
buf.WriteString(")")
case 3:
buf.WriteString(" => (")
buf.WriteString(formatAny(x.output1))
buf.WriteString(", ")
buf.WriteString(formatAny(x.output2))
buf.WriteString(")")
}
}
}
func (x Expectation) GoString() string {
buf := bufferpool.Get()
defer bufferpool.Put(buf)
x.GoStringTo(buf)
return buf.String()
}
func (x Expectation) String() string {
buf := bufferpool.Get()
defer bufferpool.Put(buf)
x.StringTo(buf)
return buf.String()
}
var (
_ fmt.GoStringer = Expectation{}
_ fmt.Stringer = Expectation{}
)
func New(list ...Expectation) *MockReader {
return &MockReader{expect: list}
}
type MockReader struct {
mu sync.Mutex
expect []Expectation
index uint
}
func (r *MockReader) next() (uint, Expectation) {
if r == nil {
return 0, Expectation{}
}
r.mu.Lock()
defer r.mu.Unlock()
index := r.index
length := uint(len(r.expect))
if index >= length {
return length, Expectation{}
}
r.index++
return index, r.expect[index]
}
func (r *MockReader) Mark(str string) {
if r == nil {
return
}
index, expect := r.next()
actual := Expectation{op: MarkOp, input3: str}
if !expect.Matches(actual) {
panic(ExpectationFailedError{index, expect, actual})
}
debug("mock index %d: matched %+v", index, expect)
}
func (r *MockReader) Stat() (fs.FileInfo, error) {
index, expect := r.next()
actual := Expectation{op: StatOp}
if !expect.Matches(actual) {
panic(ExpectationFailedError{index, expect, actual})
}
debug("mock index %d: matched %+v", index, expect)
fi := expect.output1
err := expect.output2
return fi, err
}
func (r *MockReader) Read(p []byte) (int, error) {
index, expect := r.next()
actual := Expectation{op: ReadOp, input0: p}
if !expect.Matches(actual) {
panic(ExpectationFailedError{index, expect, actual})
}
debug("mock index %d: matched %+v", index, expect)
n := int(expect.output0)
err := expect.output2
if n > 0 {
copy(p[:n], expect.input0[:n])
}
return n, err
}
func (r *MockReader) ReadAt(p []byte, offset int64) (int, error) {
index, expect := r.next()
actual := Expectation{op: ReadAtOp, input0: p, input1: offset}
if !expect.Matches(actual) {
panic(ExpectationFailedError{index, expect, actual})
}
debug("mock index %d: matched %+v", index, expect)
n := int(expect.output0)
err := expect.output2
if n > 0 {
copy(p[:n], expect.input0[:n])
}
return n, err
}
func (r *MockReader) Seek(offset int64, whence int) (int64, error) {
index, expect := r.next()
actual := Expectation{op: SeekOp, input1: offset, input2: whence}
if !expect.Matches(actual) {
panic(ExpectationFailedError{index, expect, actual})
}
debug("mock index %d: matched %+v", index, expect)
n := expect.output0
err := expect.output2
return n, err
}
func (r *MockReader) Close() error {
index, expect := r.next()
actual := Expectation{op: CloseOp}
if !expect.Matches(actual) {
panic(ExpectationFailedError{index, expect, actual})
}
debug("mock index %d: matched %+v", index, expect)
err := expect.output2
return err
}
var (
_ fs.File = (*MockReader)(nil)
_ io.Reader = (*MockReader)(nil)
_ io.ReaderAt = (*MockReader)(nil)
_ io.Seeker = (*MockReader)(nil)
_ io.Closer = (*MockReader)(nil)
) | internal/mockreader/mockreader.go | 0.523177 | 0.522872 | mockreader.go | starcoder |
package gnuplot
const (
cpuPlotPNG = `set term png size 1024,768
set output 'cpu.png'
set timefmt '%s'
set xdata time
set title 'CPU(Cores)'
set xlabel 'Time'
plot 'cpu.data' using 1:2 with lines`
memPlotPNG = `set term png size 1024,768
set output 'mem.png'
set timefmt '%s'
set xdata time
set title 'Mem(Mb)'
set xlabel 'Time'
plot 'mem.data' using 1:2 with lines`
latencyPlotPNG = `set term png size 1024,768
set output 'latency.png'
set title 'Latency(s)'
set xlabel 'Message'
f(x)=m*x+b
fit f(x) 'latency.data' using 1:2 via m,b
plot 'latency.data' using 1:2 with lines title 'Data', f(x) title 'Trend'`
cpuPlotDumb = `set term dumb
set timefmt '%s'
set xdata time
set title 'CPU(Cores)'
set xlabel 'Time'
plot 'cpu.data' using 1:2 with lines`
memPlotDumb = `set term dumb
set timefmt '%s'
set xdata time
set title 'Mem(Mb)'
set xlabel 'Time'
plot 'mem.data' using 1:2 with lines`
latencyPlotDumb = `set term dumb
set title 'Latency(s)'
set xlabel 'Message'
plot 'latency.data' using 1:2 with lines`
html = `
<html>
<div>
<div><b>Options</b><div>
<div>Image: %s</div>
<div>Total Log Stressors: %d</div>
<div>Lines Per Second: %d</div>
<div>Run Duration: %s</div>
<div>Payload Source: %s</div>
</div>
<div>
Latency of logs collected based on the time the log was generated and ingested
</div>
<table>
<tr>
<th>Total</th>
<th>Size</th>
<th>Elapsed</th>
<th>Mean</th>
<th>Min</th>
<th>Max</th>
<th>Median</th>
</tr>
<tr>
<th>Msg</th>
<th></th>
<th>(s)</th>
<th>(s)</th>
<th>(s)</th>
<th>(s)</th>
<th>(s)</th>
</tr>
<tr>
<td>%d</td>
<td>%d</td>
<td>%s</td>
<td>%.3f</td>
<td>%.3f</td>
<td>%.3f</td>
<td>%.3f</td>
</tr>
</table>
<div>
<img src="cpu.png">
</div>
<div>
<img src="mem.png">
</div>
<div>
<img src="latency.png">
</div>
<div>
%s
</div>
</html>
`
markdown = `
# Collector Functionl Benchmark Results
## Options
* Image: %s
* Total Log Stressors: %d
* Lines Per Second: %d
* Run Duration: %s
* Payload Source: %s
## Latency of logs collected based on the time the log was generated and ingested
Total Msg| Size | Elapsed (s) | Mean (s)| Min(s) | Max (s)| Median (s)
---------|------|-------------|---------|--------|--------|---
%d|%d|%s|%.3f|%.3f|%.3f%.3f



## Config
<code style="white-space:pre;">
%s
</code>
`
) | internal/cmd/functional-benchmarker/reports/gnuplot/plot_scripts.go | 0.740831 | 0.439326 | plot_scripts.go | starcoder |
Package client provides a method to query an API endpoint to extract data about
the latest covid numbers and report these to the cmd line via stdout or save them as a csv or markdown file
*/
package client
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"strings"
"time"
)
//APIClient the client and the request url for the api server
type APIClient struct {
Client *http.Client
RequestURL string
}
//RawData the raw response timeseries
type RawData struct {
Cases map[string]int `json:"cases"`
Deaths map[string]int `json:"deaths"`
Recovered map[string]int `json:"recovered"`
}
//APIResponse the main response from the server
type APIResponse struct {
Country string `json:"country"`
Province []string `json:"province"`
RawData RawData `json:"timeline"`
TimeSeries TimeSeries
}
var (
ErrorBadDateFormat = errors.New("Incorrect Date Format") //Bad date format from the command line
)
//NewClient returns a client for the user to query the api server
func NewClient(RequestURL string) *APIClient {
httpClient := &http.Client{
Transport: &http.Transport{
IdleConnTimeout: 10 * time.Second,
},
}
return &APIClient{Client: httpClient, RequestURL: RequestURL}
}
//Get the main get function which queries the server
func (c *APIClient) Get(country string, from, to time.Time, latest bool) (APIResponse, error) {
var data APIResponse
totalDays := calcDays(from)
resp, err := c.Client.Get(fmt.Sprintf(c.RequestURL, country, totalDays))
if err != nil {
return data, err
}
if resp.StatusCode != 200 {
return data, parseErrorMessage(resp)
}
d := json.NewDecoder(resp.Body)
err = d.Decode(&data)
if err != nil {
return data, err
}
data.FormatResponse(from, to, latest)
return data, nil
}
//FormatResponse format the timeseries map to something with more structure (i.e. []Day)
func (r *APIResponse) FormatResponse(from, to time.Time, latest bool) error {
var timeSeries TimeSeries
for date := range r.RawData.Cases {
formattedTime, err := cleanReturnedDate(date)
if err != nil {
return err
}
timeSeries.Data = append(timeSeries.Data, Day{
Country: r.Country,
Date: formattedTime,
Cases: r.RawData.Cases[date],
Deaths: r.RawData.Deaths[date],
Recovered: r.RawData.Recovered[date],
})
}
r.TimeSeries = timeSeries
r.TimeSeries.Order()
r.TimeSeries.Filter(from, to, latest)
return nil
}
func cleanReturnedDate(date string) (time.Time, error) {
dateParts := strings.Split(date, "/")
if len(dateParts) != 3 {
return time.Time{}, ErrorBadDateFormat
}
cleanDate := fmt.Sprintf("20%v-%v%v-%v%v", dateParts[2], strings.Repeat("0", 2-len(dateParts[0])), dateParts[0], strings.Repeat("0", 2-len(dateParts[1])), dateParts[1])
formattedTime, err := time.Parse("2006-01-02", cleanDate)
return formattedTime, err
}
func calcDays(from time.Time) int {
return int(time.Now().Sub(from).Hours()/24) + 1
}
func parseErrorMessage(resp *http.Response) error {
var errMessage struct {
Message string `json:"message"`
}
errdecoder := json.NewDecoder(resp.Body)
err := errdecoder.Decode(&errMessage)
if err != nil {
return err
}
return errors.New(errMessage.Message)
} | client/client.go | 0.70202 | 0.413951 | client.go | starcoder |
package cephalobjects
// GroupType - Possible values for grouping keys in a DataPoint struct
type GroupType int
// Possible values for grouping keys in a DataPoint struct => enum-like
const (
Actual GroupType = iota
Grouped
)
// DistanceMetric - possible distances for calculating the distance matrix of a DataStore
type DistanceMetric int
// Possible distance metrics
const (
Euclidean DistanceMetric = iota + 1
SquaredEuclidean
Manhattan
Maximum
Mahalanobis
)
// LinkageCriteria - Hierarchical clustering linkage criteria
type LinkageCriteria int
// Possible linkage criteria
const (
Complete LinkageCriteria = iota + 1
Single
Average
)
// GrepFold shows row/column position for any entry in the DataMatrix Matrix
type GrepFold struct {
Row int
Col int
}
// DataMatrix represents a simple matrix like structure with variable labels on cols and rows
type DataMatrix struct {
Variables []string
Matrix [][]float64
Grep map[string]GrepFold
}
// DataMatrixExtreme represents a single extreme value with the info on row and column
// of the extreme value, as well as represntative column/row grep
type DataMatrixExtreme struct {
Value float64
Row int
Col int
RowName string
ColName string
Cumulative int
}
// DataPoint is the basic xy analytic data type with a simple annotation (A - actual, G - groupped)
type DataPoint struct {
UID string
X, Y float64
A, G string
}
// DataStore is a bit complex annotated slice-like data type (other properties to be added)
type DataStore struct {
Basic []DataPoint
Distance DataMatrix
}
// Descriptors represent basic statistics from an array of DataPoints by X and Y coordinates
type Descriptors struct {
MeanX, MeanY, VarX, VarY, SdX, SdY, CovarXY float64
}
// ModelSummary holds the usual result structure from a linear regression a (intercept), b (slope) and R squared
type ModelSummary struct {
A, B, R2 float64
}
// AnovaSummary represents basic analysis of variance table
type AnovaSummary struct {
SSM, SST, SSE float64
Dfm, Dft, Dfe float64
MSM, MST, MSE float64
F float64
P float64
}
//TimeSeriesDataLike default output of timeseries for json
type TimeSeriesDataLike struct {
ID int `json:"ID"`
Data []TimeSeriesDataPoint `json:"series_data"`
}
// TimeSeriesDataPoint default output of timeseries data for json
type TimeSeriesDataPoint struct {
ID int `json:"point_id"`
Datetime string `json:"date_time"`
Data float64 `json:"data_value"`
} | cephalobjects/cephalobjects.go | 0.664323 | 0.638751 | cephalobjects.go | starcoder |
package tetra3d
import (
"github.com/kvartborg/vector"
)
// TextureAnimation is an animation struct. There is no function to create a new TextureAnimation because instantiating a struct is actually cleaner
// and easier to read than doing so through a function for this simple of a struct. The TextureAnimation.Frames value is a []vector.Vector, with each
// Vector representing a frame of the animation (and the offset from the original, base position for all animated vertices).
type TextureAnimation struct {
FPS float64 // The playback frame per second (or FPS) of the animation
Frames []vector.Vector // A slice of vectors, with each indicating the offset of the frame from the original position for the mesh.
}
// TexturePlayer is a struct that allows you to animate a collection of vertices' UV values using a TextureAnimation.
type TexturePlayer struct {
OriginalOffsets map[*Vertex]vector.Vector // OriginalOffsets is a map of vertices to their base UV offsets. All animating happens relative to these values.
Animation *TextureAnimation // Animation is a pointer to the currently playing Animation.
// Playhead increases as the TexturePlayer plays. The rounded down values is the frame that the TexturePlayer
// resides in (so a Playhead of 1.2 indicates that it is in frame 1, the second frame).
Playhead float64
Speed float64 // Speed indicates the playback speed and direction of the TexturePlayer, with a value of 1.0 being 100%.
Playing bool // Playing indicates whether the TexturePlayer is currently playing or not.
}
// NewTexturePlayer returns a new TexturePlayer instance.
func NewTexturePlayer(verts []*Vertex) *TexturePlayer {
player := &TexturePlayer{
Speed: 1,
}
player.Reset(verts)
return player
}
// Reset resets a TexturePlayer to be ready to run on a new selection of vertices. Note that this also resets the base UV offsets
// to use the current values of the passed vertices in the slice.
func (player *TexturePlayer) Reset(verts []*Vertex) {
player.OriginalOffsets = map[*Vertex]vector.Vector{}
for _, vert := range verts {
player.OriginalOffsets[vert] = vert.UV.Clone()
}
}
// Play plays the passed TextureAnimation, resetting the playhead if the TexturePlayer is not playing an animation. If the player is not playing, it will begin playing.
func (player *TexturePlayer) Play(animation *TextureAnimation) {
if !player.Playing || player.Animation != animation {
player.Animation = animation
player.Playhead = 0
}
player.Playing = true
}
// Update updates the TexturePlayer, using the passed delta time variable to animate the TexturePlayer's vertices.
func (player *TexturePlayer) Update(dt float64) {
if player.Animation != nil && player.Playing && len(player.Animation.Frames) > 0 {
player.Playhead += dt * player.Animation.FPS * player.Speed
playhead := int(player.Playhead)
for playhead >= len(player.Animation.Frames) {
playhead -= len(player.Animation.Frames)
}
for playhead < 0 {
playhead += len(player.Animation.Frames)
}
frameOffset := player.Animation.Frames[playhead]
player.ApplyUVOffset(frameOffset[0], frameOffset[1])
}
}
// ApplyUVOffset applies a specified UV offset to all vertices a player is assigned to. This offset is not additive, but rather is
// set once, regardless of how many times ApplyUVOffset is called.
func (player *TexturePlayer) ApplyUVOffset(offsetX, offsetY float64) {
for vert, ogOffset := range player.OriginalOffsets {
vert.UV[0] = ogOffset[0] + offsetX
vert.UV[1] = ogOffset[1] + offsetY
}
} | textureAnimation.go | 0.825203 | 0.69643 | textureAnimation.go | starcoder |
package aes
// m(x) = x^8 + x^4 + x^3 + x + 1 = 0x11B
const m uint = 0x11B
func getOrder(p uint) uint {
// Check for edge case where p = 0
if p == 0 {
return 0
}
// Returns the order of polynomial p
order := uint(0)
// Keep increasing order until we reach order of p
for (p >> order) != 0 {
order++
}
return order - 1
}
func polyMultiply(a uint, b uint) uint {
// Polynomial multiplication
var c uint = 0
var a_bit, b_bit uint
a_limit, b_limit := getOrder(a)+1, getOrder(b)+1
// Multiply polynomials a with b to produce c
for a_pow := uint(0); a_pow < a_limit; a_pow++ {
for b_pow := uint(0); b_pow < b_limit; b_pow++ {
// Extract current bits for a and b
a_bit = (a >> a_pow) & 1
b_bit = (b >> b_pow) & 1
// If non-zero term, multiply the polynomial terms
if a_bit == 1 && b_bit == 1 {
var order uint = a_pow + b_pow
var bit uint = 1 << order
// Add to c
c = c ^ bit
}
}
}
return c
}
func polyDivide(p uint, m uint) (uint, uint) {
// Divides polynomial p by polynomial m
// Returns both remainder and quotient
var order uint = 15
var quotient uint = 0
// Keep reducing until order of p is smaller than order of m
for {
order = getOrder(p)
// If order is less than 8, we have finished
if order < 8 {
return p, quotient
}
// Find what we need to multiply m by to get p
var q uint = order - 8
// Find the remainder for this round
p = p ^ (m << q)
// Update the quotient
quotient ^= 1 << q
}
}
func galoisReduce(p uint) (uint, uint) {
// Reduces p modulo the irreducible polynomial m(x)
return polyDivide(p, m)
}
func GaloisMultiply(a byte, b byte) byte {
// Multiply a by b to get c
c := polyMultiply(uint(a), uint(b))
// Reduce modulo m(x) and return
result, _ := galoisReduce(c)
return byte(result)
} | galois.go | 0.833189 | 0.62289 | galois.go | starcoder |
package vision
import (
"fmt"
"strconv"
)
// ParseInteger parse value and return Integer object
func ParseInteger(arg interface{}) (Integer, Error) {
switch obj := arg.(type) {
case string:
i, err := strconv.ParseInt(obj, 10, 64)
if err != nil {
return 0, ErrInvalid
}
return Integer(i), nil
case bool:
if obj {
return Integer(1), nil
}
return Integer(0), nil
case int:
return Integer(obj), nil
case int8:
return Integer(obj), nil
case int32:
return Integer(obj), nil
case int64:
return Integer(obj), nil
case uint:
return Integer(obj), nil
case uint8:
return Integer(obj), nil
case uint32:
return Integer(obj), nil
case uint64:
return Integer(obj), nil
case float32:
return Integer(obj), nil
case float64:
return Integer(obj), nil
case complex64, complex128:
i, err := strconv.ParseInt(fmt.Sprintf("%f", obj), 10, 64)
if err != nil {
return 0, ErrInvalid
}
return Integer(i), nil
}
return 0, ErrUnsupported
}
// IsPositive checks if the integer number is positive
func (i Integer) IsPositive() bool {
return i >= 0
}
// IsNegative checks if the integer number is negative
func (i Integer) IsNegative() bool {
return i < 0
}
// IsBetween checks if value is between (a) and (b)
func (i Integer) IsBetween(min, max int) bool {
return int(i) >= min && int(i) <= max
}
// IsLatitude checks if value is a valid latitude
func (i Integer) IsLatitude() bool {
s := fmt.Sprintf("%d", i)
return regexLatitude.MatchString(s)
}
// IsLongitude checks if value is a valid longitude
func (i Integer) IsLongitude() bool {
s := fmt.Sprintf("%d", i)
return regexLongitude.MatchString(s)
}
// IsUSPhoneNumber checks if the value is a valid us phone number
func (i Integer) IsUSPhoneNumber() bool {
s := fmt.Sprintf("%d", i)
return regexUSPhoneNumber.MatchString(s)
}
// String returns the string type value of the integer
func (i Integer) String() string {
return fmt.Sprintf("%d", i)
}
// Bool returns the bool type value of the integer
func (i Integer) Bool() Bool {
if i == 1 {
return Bool(true)
}
return Bool(false)
}
// Float returns the float type value of the integer
func (i Integer) Float() Float {
return Float(i)
}
// Val returns the build-in type value
func (i Integer) Val() int {
return int(i)
} | integer.go | 0.71889 | 0.409929 | integer.go | starcoder |
package utils
import (
"fmt"
"reflect"
"strconv"
)
func CoerceFloat(value interface{}) (float32, bool) {
switch result := value.(type) {
case int:
return float32(result), true
case int32:
return float32(result), true
case int64:
return float32(result), true
case uint:
return float32(result), true
case uint8:
return float32(result), true
case uint16:
return float32(result), true
case uint32:
return float32(result), true
case uint64:
return float32(result), true
case float32:
return result, true
case float64:
return float32(result), true
case bool:
if result == true {
return 1.0, true
}
return 0.0, true
case string:
val, err := strconv.ParseFloat(result, 64)
if err != nil {
return 0.0, false
}
return float32(val), true
default:
v := reflect.ValueOf(value)
kind := v.Kind()
switch kind {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return float32(v.Int()), true
case reflect.Float32, reflect.Float64:
return float32(v.Float()), true
}
return 0.0, false
}
}
func CoerceInt(value interface{}) (int32, bool) {
switch result := value.(type) {
case int:
return int32(result), true
case int32:
return result, true
case int64:
return int32(result), true
case uint:
return int32(result), true
case uint8:
return int32(result), true
case uint16:
return int32(result), true
case uint32:
return int32(result), true
case uint64:
return int32(result), true
case float32:
return int32(result), true
case float64:
return int32(result), true
case bool:
if result == true {
return 1, true
}
return 0, true
case string:
val, err := strconv.ParseInt(result, 10, 64)
if err != nil {
return 0, false
}
return int32(val), true
default:
v := reflect.ValueOf(value)
kind := v.Kind()
switch kind {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return int32(v.Int()), true
case reflect.Float32, reflect.Float64:
return int32(v.Float()), true
}
return 0, false
}
}
func CoerceBoolean(value interface{}) (bool, bool) {
switch result := value.(type) {
case int:
if result != 0 {
return true, true
}
return false, true
case int32:
if result != 0 {
return true, true
}
return false, true
case int64:
if result != 0 {
return true, true
}
return false, true
case float32:
if result != 0.0 {
return true, true
}
return false, true
case float64:
if result != 0.0 {
return true, true
}
return false, true
case uint:
if result != 0 {
return true, true
}
return false, true
case uint8:
if result != 0 {
return true, true
}
return false, true
case uint16:
if result != 0 {
return true, true
}
return false, true
case uint32:
if result != 0 {
return true, true
}
return false, true
case uint64:
if result != 0 {
return true, true
}
return false, true
case bool:
return result, true
case string:
if result == "false" {
return false, true
} else if result == "true" {
return true, true
} else if result == "" {
return false, true
}
return true, true
default:
v := reflect.ValueOf(value)
if v.Kind() == reflect.Bool {
return v.Bool(), true
}
return false, false
}
}
func CoerceString(value interface{}) (string, bool) {
switch result := value.(type) {
case int:
return strconv.FormatInt(int64(result), 10), true
case int32:
return strconv.FormatInt(int64(result), 10), true
case int64:
return strconv.FormatInt(result, 10), true
case uint:
return strconv.FormatInt(int64(result), 10), true
case uint8:
return strconv.FormatInt(int64(result), 10), true
case uint16:
return strconv.FormatInt(int64(result), 10), true
case uint32:
return strconv.FormatInt(int64(result), 10), true
case uint64:
return strconv.FormatInt(int64(result), 10), true
case float32:
return strconv.FormatFloat(float64(result), 'f', -1, 64), true
case float64:
return strconv.FormatFloat(result, 'f', -1, 64), true
case bool:
return strconv.FormatBool(result), true
case string:
return result, true
case fmt.Stringer:
return result.String(), true
default:
v := reflect.ValueOf(value)
if v.Kind() == reflect.String {
return v.String(), true
}
return "", false
}
}
func CoerceEnum(value interface{}) (string, bool) {
switch result := value.(type) {
case int:
return strconv.FormatInt(int64(result), 32), true
case int32:
return strconv.FormatInt(int64(result), 32), true
case int64:
return strconv.FormatInt(result, 32), true
case float32:
return strconv.FormatFloat(float64(result), 'f', -1, 32), true
case float64:
return strconv.FormatFloat(result, 'f', -1, 32), true
case bool:
return strconv.FormatBool(result), true
case string:
return result, true
default:
v := reflect.ValueOf(value)
if v.Kind() == reflect.String {
return v.String(), true
}
return "", false
}
} | vendor/github.com/playlyfe/go-graphql/utils/coerce.go | 0.501709 | 0.468061 | coerce.go | starcoder |
package export
import (
"github.com/prometheus/client_golang/prometheus"
)
// QueryBrokerExporter contains all the Prometheus metrics that are possible to gather from the brokers
type QueryBrokerExporter struct {
QueryTime *prometheus.HistogramVec `description:"milliseconds taken to complete a query"`
QueryBytes *prometheus.HistogramVec `description:"number of bytes returned in query response"`
QueryNodeTime prometheus.Summary `description:"milliseconds taken to query individual historical/realtime processes"`
QueryNodeBytes prometheus.Summary `description:"number of bytes returned from querying individual historical/realtime processes"`
QueryNodetTtfb prometheus.Summary `description:"time to first byte. Milliseconds elapsed until Broker starts receiving the response from individual historical/realtime processes"`
QueryNodeBackpressure prometheus.Summary `description:"milliseconds that the channel to this process has spent suspended due to backpressure"`
QueryCount *prometheus.GaugeVec `description:"number of total queries"`
QuerySuccessCount *prometheus.GaugeVec `description:"number of queries successfully processed"`
QueryFailedCount *prometheus.GaugeVec `description:"number of failed queries"`
QueryInterruptedCount *prometheus.GaugeVec `description:"number of queries interrupted due to cancellation or timeout"`
SQLQueryTime prometheus.Summary `description:"milliseconds taken to complete a SQL query"`
SQLQueryBytes prometheus.Summary `description:"number of bytes returned in SQL query response"`
}
// NewQueryBrokerExporter returns a new broker exporter object
func NewQueryBrokerExporter() *QueryBrokerExporter {
qb := &QueryBrokerExporter{
QueryTime: prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "druid",
Subsystem: "broker",
Name: "query_time",
Help: "milliseconds taken to complete a query",
Buckets: []float64{10, 100, 500, 1000, 2000, 3000, 5000, 7000, 10000},
}, []string{"dataSource"}),
QueryBytes: prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "druid",
Subsystem: "broker",
Name: "query_bytes",
Help: "number of bytes returned in query response",
Buckets: []float64{10, 100, 500, 1000, 2000, 3000, 5000, 7000, 10000},
}, []string{"dataSource"}),
QueryNodeTime: prometheus.NewSummary(prometheus.SummaryOpts{
Namespace: "druid",
Subsystem: "broker",
Name: "query_node_time",
Help: "milliseconds taken to query individual historical/realtime processes",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}),
QueryNodeBytes: prometheus.NewSummary(prometheus.SummaryOpts{
Namespace: "druid",
Subsystem: "broker",
Name: "query_node_bytes",
Help: "number of bytes returned from querying individual historical/realtime processes",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}),
QueryNodetTtfb: prometheus.NewSummary(prometheus.SummaryOpts{
Namespace: "druid",
Subsystem: "broker",
Name: "query_node_ttfb",
Help: "time to first byte. Milliseconds elapsed until Broker starts receiving the response from individual historical/realtime processes",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}),
QueryNodeBackpressure: prometheus.NewSummary(prometheus.SummaryOpts{
Namespace: "druid",
Subsystem: "broker",
Name: "query_node_backpressure",
Help: "milliseconds that the channel to this process has spent suspended due to backpressure",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}),
QueryCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "druid",
Subsystem: "broker",
Name: "query_count",
Help: "number of total queries",
}, []string{}),
QuerySuccessCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "druid",
Subsystem: "broker",
Name: "query_success_count",
Help: "number of queries successfully processed",
}, []string{}),
QueryFailedCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "druid",
Subsystem: "broker",
Name: "query_failed_count",
Help: "number of failed queries",
}, []string{}),
QueryInterruptedCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "druid",
Subsystem: "broker",
Name: "query_interrupted_count",
Help: "number of queries interrupted due to cancellation or timeout",
}, []string{}),
SQLQueryTime: prometheus.NewSummary(prometheus.SummaryOpts{
Namespace: "druid",
Subsystem: "broker",
Name: "sql_query_time",
Help: "milliseconds taken to complete a SQL query",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}),
SQLQueryBytes: prometheus.NewSummary(prometheus.SummaryOpts{
Namespace: "druid",
Subsystem: "broker",
Name: "sql_query_bytes",
Help: "number of bytes returned in SQL query response",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}),
}
// register all the prometheus metrics
prometheus.MustRegister(qb.QueryTime)
prometheus.MustRegister(qb.QueryBytes)
prometheus.MustRegister(qb.QueryNodeTime)
prometheus.MustRegister(qb.QueryNodeBytes)
prometheus.MustRegister(qb.QueryNodetTtfb)
prometheus.MustRegister(qb.QueryNodeBackpressure)
prometheus.MustRegister(qb.QueryCount)
prometheus.MustRegister(qb.QuerySuccessCount)
prometheus.MustRegister(qb.QueryFailedCount)
prometheus.MustRegister(qb.QueryInterruptedCount)
prometheus.MustRegister(qb.SQLQueryTime)
prometheus.MustRegister(qb.SQLQueryBytes)
return qb
}
// SetQueryTime .
func (bc *QueryBrokerExporter) SetQueryTime(source string, val float64) {
bc.QueryTime.With(prometheus.Labels{"dataSource": source}).Observe(val)
}
// SetQueryBytes .
func (bc *QueryBrokerExporter) SetQueryBytes(source string, val float64) {
bc.QueryBytes.With(prometheus.Labels{"dataSource": source}).Observe(val)
}
// SetQueryNodeTime .
func (bc *QueryBrokerExporter) SetQueryNodeTime(val float64) {
bc.QueryNodeTime.Observe(val)
}
// SetQueryNodeBytes .
func (bc *QueryBrokerExporter) SetQueryNodeBytes(val float64) {
bc.QueryNodeBytes.Observe(val)
}
// SetQueryNodetTtfb .
func (bc *QueryBrokerExporter) SetQueryNodetTtfb(val float64) {
bc.QueryNodetTtfb.Observe(val)
}
// SetQueryNodeBackpressure .
func (bc *QueryBrokerExporter) SetQueryNodeBackpressure(val float64) {
bc.QueryNodeBackpressure.Observe(val)
}
// SetQueryCount .
func (bc *QueryBrokerExporter) SetQueryCount(val float64) {
bc.QueryCount.WithLabelValues().Add(val)
}
// SetQuerySuccessCount .
func (bc *QueryBrokerExporter) SetQuerySuccessCount(val float64) {
bc.QuerySuccessCount.WithLabelValues().Add(val)
}
// SetQueryFailedCount .
func (bc *QueryBrokerExporter) SetQueryFailedCount(val float64) {
bc.QueryFailedCount.WithLabelValues().Add(val)
}
// SetQueryInterruptedCount .
func (bc *QueryBrokerExporter) SetQueryInterruptedCount(val float64) {
bc.QueryInterruptedCount.WithLabelValues().Add(val)
}
// SetSQLQueryTime .
func (bc *QueryBrokerExporter) SetSQLQueryTime(val float64) {
bc.SQLQueryTime.Observe(val)
}
// SetSQLQueryBytes .
func (bc *QueryBrokerExporter) SetSQLQueryBytes(val float64) {
bc.SQLQueryBytes.Observe(val)
} | pkg/export/query_broker.go | 0.770378 | 0.467149 | query_broker.go | starcoder |
package tentsuyu
import (
"image"
"github.com/hajimehoshi/ebiten"
)
//BasicImageParts is easy to set up basic sprite image
type BasicImageParts struct {
Width, Height, Sx, Sy, DestWidth, DestHeight int
Reverse bool
SourceRect *image.Rectangle
}
//NewBasicImageParts returns a pointer to new BasicImageParts
func NewBasicImageParts(sx, sy, width, height int) *BasicImageParts {
b := &BasicImageParts{
Sx: sx,
Sy: sy,
Width: width,
Height: height,
DestHeight: height,
DestWidth: width,
}
return b
}
//ReturnSourceRect returns the image.Rectangle for the subImage in ebtien
//This replaces the overall ImageParts struct
func (b *BasicImageParts) ReturnSourceRect() image.Rectangle {
if b.Reverse {
return image.Rect((b.Sx + b.Width), (b.Sy), (b.Sx), (b.Sy + b.Height))
}
return image.Rect((b.Sx), (b.Sy), (b.Sx + b.Width), (b.Sy + b.Height))
}
//SetDestinationDimensions can be used to set the size the image should be drawn to the screen
func (b *BasicImageParts) SetDestinationDimensions(width, height int) {
b.DestWidth = width
b.DestHeight = height
}
//ReverseX flips the image
func (b *BasicImageParts) ReverseX(reverse bool) {
b.Reverse = reverse
}
//Len returns 1
func (b *BasicImageParts) Len() int {
return 1
}
//Dst we just make it 1:1
func (b *BasicImageParts) Dst(i int) (x0, y0, x1, y1 int) {
if b.DestHeight == 0 && b.DestWidth == 0 {
return 0, 0, b.Width, b.Height
}
return 0, 0, b.DestWidth, b.DestHeight
}
//Src cuts out the specified rectangle from the source image to display the sprite
func (b *BasicImageParts) Src(i int) (x0, y0, x1, y1 int) {
x := b.Sx
y := b.Sy
if b.Reverse {
return x + b.Width, y, x, y + b.Height
}
return x, y, x + b.Width, y + b.Height
}
//SubImage returns the sub image of the passed ebiten.Image based on the BasicImageParts properties
//Reduces the amount of coding needed in the actual game to get to drawing the image
func (b BasicImageParts) SubImage(img *ebiten.Image) *ebiten.Image {
if b.Reverse {
return img.SubImage(image.Rect(b.Sx+b.Width, b.Sy, b.Sx, b.Sy+b.Height)).(*ebiten.Image)
}
return img.SubImage(image.Rect(b.Sx, b.Sy, b.Sx+b.Width, b.Sy+b.Height)).(*ebiten.Image)
}
//SetScale sets the scale of the DrawImageOptions based on the given DestHeight and DestWidth of the BasicImageParts
func (b *BasicImageParts) SetScale(op *ebiten.DrawImageOptions) {
if b.DestWidth == 0 {
b.DestWidth = b.Width
}
if b.DestHeight == 0 {
b.DestHeight = b.Height
}
op.GeoM.Scale(float64(b.DestWidth/b.Width), float64(b.DestHeight/b.Height))
}
//BasicImagePartsFromSpriteSheet creates a BasicImageParts from a passed spritesheet on the passed frame.
//This is helpful to easily get the correct sx,sy,w,h without manually typing it in every time.
func BasicImagePartsFromSpriteSheet(spriteSheet *SpriteSheet, frame int) *BasicImageParts {
return &BasicImageParts{
Sx: spriteSheet.Frames[frame].Frame["x"],
Sy: spriteSheet.Frames[frame].Frame["y"],
Width: spriteSheet.Frames[frame].Frame["w"],
Height: spriteSheet.Frames[frame].Frame["h"],
}
} | basicimageparts.go | 0.755817 | 0.484746 | basicimageparts.go | starcoder |
package parse
import "regexp"
// ParseCountry accepts a VIN string and returns an ISO country code representing the country the vehicle was manufactured in
func ParseCountry(vin string) string {
for c, m := range countryMatchers {
if m.MatchString(vin) {
return c
}
}
return ""
}
var countryMatchers = map[string]*regexp.Regexp {
"ZA": regexp.MustCompile("^A[A-H]"),
"CI": regexp.MustCompile("^A[J-N]"),
"AO": regexp.MustCompile("^B[A-E]"),
"KE": regexp.MustCompile("^B[F-K]"),
"TZ": regexp.MustCompile("^B[L-R]"),
"BJ": regexp.MustCompile("^C[A-E]"),
"MG": regexp.MustCompile("^C[F-K]"),
"TN": regexp.MustCompile("^C[L-R]"),
"EG": regexp.MustCompile("^D[A-E]"),
"MA": regexp.MustCompile("^D[F-K]"),
"ZM": regexp.MustCompile("^D[L-R]"),
"ET": regexp.MustCompile("^E[A-E]"),
"NZ": regexp.MustCompile("(^E[F-K]|^7[A-E])"),
"GH": regexp.MustCompile("^F[A-E]"),
"NG": regexp.MustCompile("^F[F-K]"),
"JP": regexp.MustCompile("^J[A-Z0]"),
"LK": regexp.MustCompile("^K[A-E]"),
"IL": regexp.MustCompile("^K[F-K]"),
"KR": regexp.MustCompile("^K[L-R]"),
"KZ": regexp.MustCompile("^K[S-Z0]"),
"CN": regexp.MustCompile("^L[A-Z0]"),
"IN": regexp.MustCompile("^M[A-E]"),
"ID": regexp.MustCompile("^M[F-K]"),
"TH": regexp.MustCompile("^M[L-R]"),
"IR": regexp.MustCompile("^N[A-E]"),
"PK": regexp.MustCompile("^N[F-K]"),
"TR": regexp.MustCompile("^N[L-R]"),
"PH": regexp.MustCompile("^P[A-E]"),
"SG": regexp.MustCompile("^P[F-K]"),
"MY": regexp.MustCompile("^P[L-R]"),
"AE": regexp.MustCompile("^R[A-E]"),
"TW": regexp.MustCompile("^R[F-K]"),
"VN": regexp.MustCompile("^R[L-R]"),
"SA": regexp.MustCompile("^R[S-Z0]"),
"GB": regexp.MustCompile("^S[A-M]"),
"DE": regexp.MustCompile("(^S[N-T])|(^W[A-Z0])"), // East & West Germany
"PL": regexp.MustCompile("^S[U-Z]"),
"LV": regexp.MustCompile("^S[1-4]"),
"CH": regexp.MustCompile("^T[A-H]"),
"CZ": regexp.MustCompile("^T[J-P]"),
"HU": regexp.MustCompile("^T[R-V]"),
"PT": regexp.MustCompile("^T[W-Z0-1]"),
"DK": regexp.MustCompile("^U[H-M]"),
"IE": regexp.MustCompile("^U[N-T]"),
"RO": regexp.MustCompile("^U[U-Z]"),
"SK": regexp.MustCompile("^U[5-7]"),
"AT": regexp.MustCompile("^V[A-E]"),
"FR": regexp.MustCompile("^V[F-R]"),
"ES": regexp.MustCompile("^V[S-W]"),
"RS": regexp.MustCompile("^V[X-Z0-2]"),
"HR": regexp.MustCompile("^V[3-5]"),
"EE": regexp.MustCompile("^V[6-Z0]"),
"BG": regexp.MustCompile("^X[A-E]"),
"GR": regexp.MustCompile("^X[F-K]"),
"NL": regexp.MustCompile("^X[L-R]"),
"RU": regexp.MustCompile("(^X[S-W])|(^X[3-Z0])"), // Russia & USSR
"LU": regexp.MustCompile("^X[X-Z0-2]"),
"BE": regexp.MustCompile("^Y[A-E]"),
"FI": regexp.MustCompile("^Y[F-K]"),
"MT": regexp.MustCompile("^Y[L-R]"),
"SE": regexp.MustCompile("^Y[S-W]"),
"NO": regexp.MustCompile("^Y[X-Z0-2]"),
"BY": regexp.MustCompile("^Y[3-5]"),
"UA": regexp.MustCompile("^Y[6-Z0]"),
"IT": regexp.MustCompile("^Z[A-R]"),
"SI": regexp.MustCompile("^Z[X-Z0-2]"),
"LT": regexp.MustCompile("^Z[3-5]"),
"US": regexp.MustCompile("(^1[A-Z0])|(^4[A-Z0])|(^5[A-Z0])"),
"CA": regexp.MustCompile("^2[A-Z0]"),
"MX": regexp.MustCompile("^3[A-Z0-7]"),
"KY": regexp.MustCompile("^3[8-9]"),
"AU": regexp.MustCompile("^6[A-W]"),
"AR": regexp.MustCompile("^8[A-E]"),
"CL": regexp.MustCompile("^8[F-K]"),
"EC": regexp.MustCompile("^8[L-R]"),
"PE": regexp.MustCompile("^8[S-W]"),
"VE": regexp.MustCompile("^8[X-Z0-2]"),
"BR": regexp.MustCompile("^9[A-E,3-9]"),
"CO": regexp.MustCompile("^9[F-K]"),
"PY": regexp.MustCompile("^9[L-R]"),
"UY": regexp.MustCompile("^9[S-W]"),
"TT": regexp.MustCompile("^9[X-Z0-2]"),
} | parse/country.go | 0.557484 | 0.409811 | country.go | starcoder |
package transform
import (
"errors"
"fmt"
)
const (
_BWT_MAX_HEADER_SIZE = 8 * 4
)
// Utility class to en/de-code a BWT data block and its associated primary index(es)
// BWT stream format: Header (m bytes) Data (n bytes)
// Header: For each primary index,
// mode (8 bits) + primary index (8,16 or 24 bits)
// mode: bits 7-6 contain the size in bits of the primary index :
// 00: primary index size <= 6 bits (fits in mode byte)
// 01: primary index size <= 14 bits (1 extra byte)
// 10: primary index size <= 22 bits (2 extra bytes)
// 11: primary index size > 22 bits (3 extra bytes)
// bits 5-0 contain 6 most significant bits of primary index
// primary index: remaining bits (up to 3 bytes)
// BWTBlockCodec a codec that encapsulates a Burrows Wheeler Transform and
// takes care of encoding/decoding information about the primary indexes in a header.
type BWTBlockCodec struct {
bwt *BWT
}
// NewBWTBlockCodec creates a new instance of BWTBlockCodec
func NewBWTBlockCodec() (*BWTBlockCodec, error) {
this := &BWTBlockCodec{}
var err error
this.bwt, err = NewBWT()
return this, err
}
// NewBWTBlockCodecWithCtx creates a new instance of BWTBlockCodec
func NewBWTBlockCodecWithCtx(ctx *map[string]interface{}) (*BWTBlockCodec, error) {
this := &BWTBlockCodec{}
var err error
this.bwt, err = NewBWTWithCtx(ctx)
return this, err
}
// Forward applies the function to the src and writes the result
// to the destination. Returns number of bytes read, number of bytes
// written and possibly an error.
func (this *BWTBlockCodec) Forward(src, dst []byte) (uint, uint, error) {
if len(src) == 0 {
return 0, 0, nil
}
if &src[0] == &dst[0] {
return 0, 0, errors.New("Input and output buffers cannot be equal")
}
blockSize := len(src)
if len(dst) < this.MaxEncodedLen(blockSize) {
return 0, 0, fmt.Errorf("Output buffer is too small - size: %d, required %d",
len(dst), this.MaxEncodedLen(blockSize))
}
chunks := GetBWTChunks(blockSize)
log := uint(1)
for 1<<log <= len(src) {
log++
}
// Estimate header size based on block size
headerSizeBytes1 := uint(chunks) * ((2 + log + 7) >> 3)
// Apply forward Transform
iIdx, oIdx, err := this.bwt.Forward(src, dst[headerSizeBytes1:])
if err != nil {
return iIdx, oIdx, err
}
oIdx += headerSizeBytes1
headerSizeBytes2 := uint(0)
for i := 0; i < chunks; i++ {
primaryIndex := this.bwt.PrimaryIndex(i)
pIndexSizeBits := uint(6)
for 1<<pIndexSizeBits <= primaryIndex {
pIndexSizeBits++
}
// Compute block size based on primary index
headerSizeBytes2 += ((2 + pIndexSizeBits + 7) >> 3)
}
if headerSizeBytes2 != headerSizeBytes1 {
// Adjust space for header
copy(dst[headerSizeBytes2:], dst[headerSizeBytes1:headerSizeBytes1+uint(blockSize)])
oIdx = oIdx - headerSizeBytes1 + headerSizeBytes2
}
idx := 0
for i := 0; i < chunks; i++ {
primaryIndex := this.bwt.PrimaryIndex(i)
pIndexSizeBits := uint(6)
for 1<<pIndexSizeBits <= primaryIndex {
pIndexSizeBits++
}
// Compute primary index size
pIndexSizeBytes := (2 + pIndexSizeBits + 7) >> 3
// Write block header (mode + primary index). See top of file for format
shift := (pIndexSizeBytes - 1) << 3
blockMode := (pIndexSizeBits + 1) >> 3
blockMode = (blockMode << 6) | ((primaryIndex >> shift) & 0x3F)
dst[idx] = byte(blockMode)
idx++
for shift >= 8 {
shift -= 8
dst[idx] = byte(primaryIndex >> shift)
idx++
}
}
return iIdx, oIdx, nil
}
// Inverse applies the reverse function to the src and writes the result
// to the destination. Returns number of bytes read, number of bytes
// written and possibly an error.
func (this *BWTBlockCodec) Inverse(src, dst []byte) (uint, uint, error) {
if len(src) == 0 {
return 0, 0, nil
}
if &src[0] == &dst[0] {
return 0, 0, errors.New("Input and output buffers cannot be equal")
}
srcIdx := uint(0)
blockSize := uint(len(src))
chunks := GetBWTChunks(len(src))
for i := 0; i < chunks; i++ {
// Read block header (mode + primary index). See top of file for format
blockMode := uint(src[srcIdx])
srcIdx++
pIndexSizeBytes := 1 + ((blockMode >> 6) & 0x03)
if blockSize < pIndexSizeBytes {
return 0, 0, errors.New("Invalid compressed length in bitstream")
}
blockSize -= pIndexSizeBytes
shift := (pIndexSizeBytes - 1) << 3
primaryIndex := (blockMode & 0x3F) << shift
// Extract BWT primary index
for i := uint(1); i < pIndexSizeBytes; i++ {
shift -= 8
primaryIndex |= uint(src[srcIdx]) << shift
srcIdx++
}
if this.bwt.SetPrimaryIndex(i, primaryIndex) == false {
return 0, 0, errors.New("Invalid primary index in bitstream")
}
}
// Apply inverse Transform
return this.bwt.Inverse(src[srcIdx:srcIdx+blockSize], dst)
}
// MaxEncodedLen returns the max size required for the encoding output buffer
func (this BWTBlockCodec) MaxEncodedLen(srcLen int) int {
return srcLen + _BWT_MAX_HEADER_SIZE
} | transform/BWTBlockCodec.go | 0.749821 | 0.476153 | BWTBlockCodec.go | starcoder |
package sketchy
// PointHeap is a min/max heap for Points using inter-point distance as the metric
type PointHeap struct {
size int
points []MetricPoint
isMinHeap bool
}
func NewMaxPointHeap() *PointHeap {
return &PointHeap{
size: 0,
points: []MetricPoint{},
isMinHeap: false,
}
}
func NewMinPointHeap() *PointHeap {
return &PointHeap{
size: 0,
points: []MetricPoint{},
isMinHeap: true,
}
}
func (m *PointHeap) Len() int {
return m.size
}
func (m *PointHeap) Push(p MetricPoint) {
m.points = append(m.points, p)
index := m.size
m.size++
if m.isMinHeap {
for m.points[index].Metric < m.points[m.parent(index)].Metric {
m.swap(index, m.parent(index))
index = m.parent(index)
}
} else {
for m.points[index].Metric > m.points[m.parent(index)].Metric {
m.swap(index, m.parent(index))
index = m.parent(index)
}
}
}
func (m *PointHeap) Peek() MetricPoint {
return m.points[0]
}
func (m *PointHeap) Pop() MetricPoint {
if m.size == 0 {
panic("can't pop empty heap")
}
p := m.points[0]
m.points = m.points[1:m.size]
m.size--
m.heapify(0)
return p
}
func (m *PointHeap) Report() []MetricPoint {
n := m.size
result := make([]MetricPoint, n)
for i := 0; i < n; i++ {
result[i] = m.Pop()
}
return result
}
func (m *PointHeap) ReportReversed() []MetricPoint {
n := m.size
result := make([]MetricPoint, n)
for i := 0; i < n; i++ {
result[n-i-1] = m.Pop()
}
return result
}
func (m *PointHeap) parent(i int) int {
return (i - 1) / 2
}
func (m *PointHeap) left(i int) int {
return 2*i + 1
}
func (m *PointHeap) right(i int) int {
return 2*i + 2
}
func (m *PointHeap) swap(i, j int) {
m.points[i], m.points[j] = m.points[j], m.points[i]
}
func (m *PointHeap) isLeaf(i int) bool {
if i > (m.size/2) && i <= m.size {
return true
}
return false
}
func (m *PointHeap) heapify(i int) {
if m.size <= 1 {
return
}
if m.isLeaf(i) {
return
}
l := m.left(i)
r := m.right(i)
if m.isMinHeap {
var min int
if l < m.size && m.points[l].Metric < m.points[i].Metric {
min = l
} else {
min = i
}
if r < m.size && m.points[r].Metric < m.points[min].Metric {
min = r
}
if min != i {
m.swap(i, min)
m.heapify(min)
}
} else {
var max int
if l < m.size && m.points[l].Metric > m.points[i].Metric {
max = l
} else {
max = i
}
if r < m.size && m.points[r].Metric > m.points[max].Metric {
max = r
}
if max != i {
m.swap(i, max)
m.heapify(max)
}
}
} | heaps.go | 0.757346 | 0.502869 | heaps.go | starcoder |
package torch
// #include "torch.hpp"
// #include <stdlib.h>
import "C"
import (
"bytes"
"encoding/binary"
"fmt"
"reflect"
"runtime"
"unsafe"
)
// Tensor holds a multi-dimensional array of elements of a single data type.
type Tensor struct {
context C.Torch_TensorContext
goData unsafe.Pointer
}
// NewTensor converts from a Go value to a Tensor. Valid values are scalars, slices, and arrays. Every element of a slice must have the same length so that the resulting Tensor has a valid shape.
func NewTensor(value interface{}) (*Tensor, error) {
val := reflect.ValueOf(value)
shape, dataType, err := shapeAndDataTypeOf(val)
if err != nil {
return nil, err
}
return NewTensorWithShape(value, shape, dataType)
}
// NewTensorWithShape converts a single dimensional Go array or slice into a Tensor with given shape
func NewTensorWithShape(value interface{}, shape []int64, dt DType) (*Tensor, error) {
nflattened := numElements(shape)
nbytes := typeOf(dt, nil).Size() * uintptr(nflattened)
dataPtr := C.malloc(C.size_t(nbytes))
dataSlice := (*[1 << 30]byte)(dataPtr)[:nbytes:nbytes]
buf := bytes.NewBuffer(dataSlice[:0:nbytes])
encodeTensor(buf, reflect.ValueOf(value), shape)
ctx := createTensor(dataPtr, shape, dt)
t := tensorWithContext(ctx)
t.goData = dataPtr
return t, nil
}
func tensorWithContext(ctx C.Torch_TensorContext) *Tensor {
t := &Tensor{
context: ctx,
}
runtime.SetFinalizer(t, (*Tensor).finalize)
return t
}
// DType returns tensors datatype
func (t *Tensor) DType() DType {
return DType(C.Torch_TensorType(t.context))
}
// Value returns tensors value as a go type
func (t *Tensor) Value() interface{} {
dt := t.DType()
shape := t.Shape()
typ := typeOf(dt, shape)
val := reflect.New(typ)
nflattened := numElements(shape)
nbytes := typeOf(dt, nil).Size() * uintptr(nflattened)
dataPtr := C.Torch_TensorValue(t.context)
dataSlice := (*[1 << 30]byte)(dataPtr)[:nbytes:nbytes]
if err := decodeTensor(bytes.NewReader(dataSlice), shape, typ, val); err != nil {
panic(fmt.Sprintf("unable to decode Tensor of type %v and shape %v - %v", dt, shape, err))
}
return reflect.Indirect(val).Interface()
}
// Shape returns tensors shape
func (t *Tensor) Shape() []int64 {
var size C.ulong
shape := C.Torch_TensorShape(t.context, &size)
slice := (*[1 << 30]int64)(unsafe.Pointer(shape))[:size:size]
return slice
}
func (t *Tensor) finalize() {
C.Torch_DeleteTensor(t.context)
if t.goData != nil {
C.free(t.goData)
}
}
func createTensor(ptr unsafe.Pointer, shape []int64, dtype DType) C.Torch_TensorContext {
var shapePtr *C.int64_t
if len(shape) > 0 {
shapePtr = (*C.int64_t)(unsafe.Pointer(&shape[0]))
}
ctx := C.Torch_NewTensor(ptr, shapePtr, C.int(len(shape)), C.Torch_DataType(dtype))
runtime.KeepAlive(shape)
runtime.KeepAlive(ptr)
return ctx
}
// shapeAndDataTypeOf returns the data type and shape of the Tensor
// corresponding to a Go type.
func shapeAndDataTypeOf(val reflect.Value) (shape []int64, dt DType, err error) {
typ := val.Type()
for typ.Kind() == reflect.Array || typ.Kind() == reflect.Slice {
shape = append(shape, int64(val.Len()))
if val.Len() > 0 {
// In order to check tensor structure properly in general case we need to iterate over all slices of the tensor to check sizes match
// Since we already going to iterate over all elements in encodeTensor() let's
// 1) do the actual check in encodeTensor() to save some cpu cycles here
// 2) assume the shape is represented by lengths of elements with zero index in each dimension
val = val.Index(0)
}
typ = typ.Elem()
}
for _, t := range types {
if typ.Kind() == t.typ.Kind() {
return shape, DType(t.dataType), nil
}
}
return shape, dt, fmt.Errorf("unsupported type %v", typ)
}
// decodeTensor decodes the Tensor from the buffer to ptr using the format
// specified in c_api.h. Use stringDecoder for String tensors.
func decodeTensor(r *bytes.Reader, shape []int64, typ reflect.Type, ptr reflect.Value) error {
switch typ.Kind() {
case reflect.Bool:
b, err := r.ReadByte()
if err != nil {
return err
}
ptr.Elem().SetBool(b == 1)
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
if err := binary.Read(r, nativeEndian, ptr.Interface()); err != nil {
return err
}
case reflect.Slice:
val := reflect.Indirect(ptr)
val.Set(reflect.MakeSlice(typ, int(shape[0]), int(shape[0])))
// Optimization: if only one dimension is left we can use binary.Read() directly for this slice
if len(shape) == 1 && val.Len() > 0 {
switch val.Index(0).Kind() {
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
return binary.Read(r, nativeEndian, val.Interface())
}
}
for i := 0; i < val.Len(); i++ {
if err := decodeTensor(r, shape[1:], typ.Elem(), val.Index(i).Addr()); err != nil {
return err
}
}
default:
return fmt.Errorf("unsupported type %v", typ)
}
return nil
}
func encodeTensor(w *bytes.Buffer, v reflect.Value, shape []int64) error {
switch v.Kind() {
case reflect.Bool:
b := byte(0)
if v.Bool() {
b = 1
}
if err := w.WriteByte(b); err != nil {
return err
}
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
if err := binary.Write(w, nativeEndian, v.Interface()); err != nil {
return err
}
case reflect.Array, reflect.Slice:
// If current dimension is a slice, verify that it has the expected size
// Go's type system makes that guarantee for arrays.
if v.Kind() == reflect.Slice {
expected := int(shape[0])
if v.Len() != expected {
return fmt.Errorf("mismatched slice lengths: %d and %d", v.Len(), expected)
}
}
// Optimisation: if only one dimension is left we can use binary.Write() directly for this slice
if len(shape) == 1 && v.Len() > 0 {
switch v.Index(0).Kind() {
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
return binary.Write(w, nativeEndian, v.Interface())
}
}
subShape := shape[1:]
for i := 0; i < v.Len(); i++ {
err := encodeTensor(w, v.Index(i), subShape)
if err != nil {
return err
}
}
default:
return fmt.Errorf("unsupported type %v", v.Type())
}
return nil
}
// typeOf converts from a DType and Shape to the equivalent Go type.
func typeOf(dt DType, shape []int64) reflect.Type {
var ret reflect.Type
for _, t := range types {
if dt == DType(t.dataType) {
ret = t.typ
break
}
}
if ret == nil {
// TODO get tensor name
panic(fmt.Sprintf("Unsupported DType %d", int(dt)))
}
for range shape {
ret = reflect.SliceOf(ret)
}
return ret
}
func numElements(shape []int64) int64 {
n := int64(1)
for _, d := range shape {
n *= d
}
return n
}
var nativeEndian binary.ByteOrder
func init() {
buf := [2]byte{}
*(*uint16)(unsafe.Pointer(&buf[0])) = uint16(0xABCD)
switch buf {
case [2]byte{0xCD, 0xAB}:
nativeEndian = binary.LittleEndian
case [2]byte{0xAB, 0xCD}:
nativeEndian = binary.BigEndian
default:
panic("Could not determine native endianness.")
}
}
// PrintTensors prints tensors contents
func PrintTensors(inputs ...*Tensor) {
contexts := make([]C.Torch_TensorContext, len(inputs))
for i, t := range inputs {
contexts[i] = t.context
}
C.Torch_PrintTensors((*C.Torch_TensorContext)(&contexts[0]), C.ulong(len(contexts)))
runtime.KeepAlive(inputs)
} | tensor.go | 0.771155 | 0.698664 | tensor.go | starcoder |
package utils
import (
"encoding/binary"
"math"
)
func LIntToBytes(val int) ([]byte) {
return LUInt32ToBytes(uint32(val))
}
func LBytesToInt(bytes []byte) int {
val := LBytesToUInt32(bytes)
return int(val)
}
func BIntToBytes(val int) ([]byte) {
return BUInt32ToBytes(uint32(val))
}
func BBytesToInt(bytes []byte) int {
val := BBytesToUInt32(bytes)
return int(val)
}
func LUIntToBytes(val uint) ([]byte) {
return LUInt32ToBytes(uint32(val))
}
func LBytesToUInt(bytes []byte) uint {
val := LBytesToUInt32(bytes)
return uint(val)
}
func BUIntToBytes(val uint) ([]byte) {
return BUInt32ToBytes(uint32(val))
}
func BBytesToUInt(bytes []byte) uint {
val := BBytesToUInt32(bytes)
return uint(val)
}
func StringToBytes(val string) []byte {
return []byte(val)
}
func BytesToString(dat []byte) string {
return string(dat)
}
//单字节数据类型直接转换
func BoolToBytes(val bool) (dat []byte) {
dat = make([]byte, 1)
if val {
dat[0] = 0x1
} else {
dat[0] = 0x0
}
return
}
func BytesToBool(dat []byte) bool {
if dat[0] == 0x1 {
return true
} else {
return false
}
}
func Int8ToBytes(val int8) (dat []byte) {
dat = make([]byte, 1)
dat[0] = byte(val)
return
}
func BytesToInt8(dat []byte) (val int8) {
return int8(dat[0])
}
func UInt8ToBytes(val uint8) (dat []byte) {
dat = make([]byte, 1)
dat[0] = byte(val)
return
}
func BytesToUInt8(dat []byte) (val uint8) {
return uint8(dat[0])
}
//小端序
func LInt16ToBytes(val int16) (dat []byte) {
dat = make([]byte, 2)
binary.LittleEndian.PutUint16(dat, uint16(val))
return
}
func LBytesToInt16(bytes []byte) (int16) {
val := binary.LittleEndian.Uint16(bytes)
return int16(val)
}
func LUInt16ToBytes(val uint16) (dat []byte) {
dat = make([]byte, 2)
binary.LittleEndian.PutUint16(dat, val)
return
}
func LBytesToUInt16(bytes []byte) (uint16) {
val := binary.LittleEndian.Uint16(bytes)
return val
}
func LInt32ToBytes(val int32) (dat []byte) {
dat = make([]byte, 4)
binary.LittleEndian.PutUint32(dat, uint32(val))
return
}
func LBytesToInt32(bytes []byte) (int32) {
val := binary.LittleEndian.Uint32(bytes)
return int32(val)
}
func LUInt32ToBytes(val uint32) (dat []byte) {
dat = make([]byte, 4)
binary.LittleEndian.PutUint32(dat, val)
return
}
func LBytesToUInt32(bytes []byte) (uint32) {
val := binary.LittleEndian.Uint32(bytes)
return val
}
func LInt64ToBytes(val int64) (dat []byte) {
dat = make([]byte, 8)
binary.LittleEndian.PutUint64(dat, uint64(val))
return
}
func LBytesToInt64(bytes []byte) (int64) {
val := binary.LittleEndian.Uint64(bytes)
return int64(val)
}
func LUInt64ToBytes(val uint64) (dat []byte) {
dat = make([]byte, 8)
binary.LittleEndian.PutUint64(dat, val)
return
}
func LBytesToUInt64(bytes []byte) (uint64) {
val := binary.LittleEndian.Uint64(bytes)
return val
}
//大端序
func BInt16ToBytes(val int16) (dat []byte) {
dat = make([]byte, 2)
binary.BigEndian.PutUint16(dat, uint16(val))
return
}
func BBytesToInt16(bytes []byte) (int16) {
val := binary.BigEndian.Uint16(bytes)
return int16(val)
}
func BUInt16ToBytes(val uint16) (dat []byte) {
dat = make([]byte, 2)
binary.BigEndian.PutUint16(dat, val)
return
}
func BBytesToUInt16(bytes []byte) (uint16) {
val := binary.BigEndian.Uint16(bytes)
return val
}
func BInt32ToBytes(val int32) (dat []byte) {
dat = make([]byte, 4)
binary.BigEndian.PutUint32(dat, uint32(val))
return
}
func BBytesToInt32(bytes []byte) (int32) {
val := binary.BigEndian.Uint32(bytes)
return int32(val)
}
func BUInt32ToBytes(val uint32) (dat []byte) {
dat = make([]byte, 4)
binary.BigEndian.PutUint32(dat, val)
return
}
func BBytesToUInt32(bytes []byte) (uint32) {
val := binary.BigEndian.Uint32(bytes)
return val
}
func BInt64ToBytes(val int64) (dat []byte) {
dat = make([]byte, 8)
binary.BigEndian.PutUint64(dat, uint64(val))
return
}
func BBytesToInt64(bytes []byte) (int64) {
val := binary.BigEndian.Uint64(bytes)
return int64(val)
}
func BUInt64ToBytes(val uint64) (dat []byte) {
dat = make([]byte, 8)
binary.BigEndian.PutUint64(dat, val)
return
}
func BBytesToUInt64(bytes []byte) (uint64) {
val := binary.BigEndian.Uint64(bytes)
return val
}
//大小端序 浮点数
func LFloat32ToBytes(val float32) []byte {
bits := math.Float32bits(val)
return LUInt32ToBytes(bits)
}
func LBytesToFloat32(dat []byte) float32 {
bits := LBytesToUInt32(dat)
return math.Float32frombits(bits)
}
func LFloat64ToBytes(val float64) []byte {
bits := math.Float64bits(val)
return LUInt64ToBytes(bits)
}
func LBytesToFloat64(dat []byte) float64 {
bits := LBytesToUInt64(dat)
return math.Float64frombits(bits)
}
func BFloat32ToBytes(val float32) []byte {
bits := math.Float32bits(val)
return BUInt32ToBytes(bits)
}
func BBytesToFloat32(dat []byte) float32 {
bits := BBytesToUInt32(dat)
return math.Float32frombits(bits)
}
func BFloat64ToBytes(val float64) []byte {
bits := math.Float64bits(val)
return BUInt64ToBytes(bits)
}
func BBytesToFloat64(dat []byte) float64 {
bits := BBytesToUInt64(dat)
return math.Float64frombits(bits)
} | utils/bytes.go | 0.61115 | 0.541045 | bytes.go | starcoder |
package viertris
import (
"fmt"
"image/color"
"math/rand"
)
type ActiveTris struct {
Board *GameBoard
Rotation
X BoardDimension
Y BoardDimension
TrisKind
}
// Offsets on an ActiveTris takes into account rotation
func (at *ActiveTris) Offsets() [4][2]int8 {
return at.TestOffsets(at.Rotation)
}
func (at *ActiveTris) TestOffsets(rotation Rotation) [4][2]int8 {
rawOff := at.TrisKind.Offsets()
switch rotation {
case Rotation270:
for i, off := range rawOff {
off[0], off[1] = off[1], -1*off[0]
rawOff[i] = off
}
fallthrough
case Rotation180:
for i, off := range rawOff {
off[0], off[1] = off[1], -1*off[0]
rawOff[i] = off
}
fallthrough
case Rotation90:
for i, off := range rawOff {
off[0], off[1] = off[1], -1*off[0]
rawOff[i] = off
}
fallthrough
case Rotation0:
return rawOff
default:
panic(fmt.Sprintf("invalid rotation %v", at.Rotation))
}
}
func (at *ActiveTris) RotateLeft() {
newRotation := at.Rotation.RotateLeft()
off := at.TestOffsets(newRotation)
for _, o := range off {
x := int(at.X) + int(o[0])
y := int(at.Y) + int(o[1])
if at.Board.IsSet(x, y) || at.Board.IsOffscreen(x, y) {
return
}
}
at.Rotation = newRotation
}
func (at *ActiveTris) RotateRight() {
newRotation := at.Rotation.RotateRight()
off := at.TestOffsets(newRotation)
for _, o := range off {
x := int(at.X) + int(o[0])
y := int(at.Y) + int(o[1])
if at.Board.IsSet(x, y) || at.Board.IsOffscreen(x, y) {
return
}
}
at.Rotation = newRotation
}
func (at *ActiveTris) MoveLeft() {
minX := int(at.X)
off := at.Offsets()
for _, o := range off {
x := int(at.X) + int(o[0])
y := int(at.Y) + int(o[1])
if at.Board.IsSet(x-1, y) {
return
}
if x < minX {
minX = x
}
}
if minX > 0 {
at.X--
}
}
func (at *ActiveTris) MoveRight() {
maxX := int(at.X)
off := at.Offsets()
for _, o := range off {
x := int(at.X) + int(o[0])
y := int(at.Y) + int(o[1])
if at.Board.IsSet(x+1, y) {
return
}
if x > maxX {
maxX = x
}
}
if maxX < int(at.Board.Width-1) {
at.X++
}
}
func (at *ActiveTris) MoveDown() bool {
maxY := int(at.Y)
off := at.Offsets()
for _, o := range off {
y := int(at.Y) + int(o[1])
if y > maxY {
maxY = y
}
}
if maxY <= int(at.Board.Height-1) {
placed := at.Board.CheckIfTileIsPlaced()
if !placed {
at.Y++
}
return placed
}
return false
}
type TrisKind uint8
const (
KindNone TrisKind = iota
KindT TrisKind = iota
KindLine TrisKind = iota
KindSquare TrisKind = iota
KindZ TrisKind = iota
KindS TrisKind = iota
KindL TrisKind = iota
KindJ TrisKind = iota
KindFinal TrisKind = iota
)
var kindColors = []color.RGBA{
KindNone: {},
KindT: {200, 0, 0, 255},
KindLine: {0, 200, 0, 255},
KindSquare: {0, 0, 200, 255},
KindZ: {200, 200, 0, 255},
KindS: {200, 200, 200, 255},
KindL: {200, 0, 200, 255},
KindJ: {0, 200, 200, 255},
KindFinal: {50, 50, 50, 255},
}
func (tk TrisKind) Color() color.RGBA {
return kindColors[tk]
}
var kindOffsets = [][4][2]int8{
KindT: {
{0, 0},
{-1, 0},
{0, -1},
{1, 0},
},
KindLine: {
{0, 0},
{0, -1},
{0, 1},
{0, 2},
},
KindSquare: {
{0, 0},
{0, 1},
{1, 1},
{1, 0},
},
KindS: {
{0, 0},
{1, 0},
{0, 1},
{-1, 1},
},
KindZ: {
{0, 0},
{-1, 0},
{0, 1},
{1, 1},
},
KindL: {
{0, 0},
{0, -1},
{0, 1},
{1, 1},
},
KindJ: {
{0, 0},
{0, -1},
{0, 1},
{-1, 1},
},
}
func (tk TrisKind) Offsets() [4][2]int8 {
return kindOffsets[tk]
}
func RandomKind() TrisKind {
return TrisKind(rand.Intn(int(KindFinal-1)) + 1)
} | internal/scenes/viertris/tris.go | 0.652574 | 0.412116 | tris.go | starcoder |
package meter
import (
"sync"
"sync/atomic"
"time"
"unsafe"
)
// MultiHistogram associates Histogram objects to keys which can be
// selected when recording. Is completely go-routine safe.
type MultiHistogram struct {
// Size is used to initialize the Size member of the underlying Histogram
// objects.
Size int
// SamplingSeed is used to initialize the SamplingSeed member of underlying
// Histogram objects.
SamplingSeed int64
dists unsafe.Pointer
mutex sync.Mutex
}
// Record adds the given value to the histogram associated with the given
// key. New keys are lazily created as required.
func (multi *MultiHistogram) Record(key string, value float64) {
multi.get(key).Record(value)
}
// RecordDuration similar to Record but with time.Duration values.
func (multi *MultiHistogram) RecordDuration(key string, value time.Duration) {
multi.get(key).RecordDuration(value)
}
// RecordSince records a duration elapsed since the given time for the given
// key.
func (multi *MultiHistogram) RecordSince(key string, t0 time.Time) {
multi.get(key).RecordSince(t0)
}
// ReadMeter calls ReadMeter on all the underlying histograms where all the
// keys are prefixed by the key name used in the calls to Record.
func (multi *MultiHistogram) ReadMeter(delta time.Duration) map[string]float64 {
result := make(map[string]float64)
old := multi.load()
if old == nil {
return result
}
for prefix, dist := range *old {
for suffix, value := range dist.ReadMeter(delta) {
result[Join(prefix, suffix)] = value
}
}
return result
}
func (multi *MultiHistogram) get(key string) *Histogram {
if dists := multi.load(); dists != nil {
if dist, ok := (*dists)[key]; ok {
return dist
}
}
multi.mutex.Lock()
defer multi.mutex.Unlock()
oldDists := multi.load()
if oldDists != nil {
if dist, ok := (*oldDists)[key]; ok {
return dist
}
}
newDists := new(map[string]*Histogram)
*newDists = make(map[string]*Histogram)
if oldDists != nil {
for key, dist := range *oldDists {
(*newDists)[key] = dist
}
}
dist := &Histogram{
Size: multi.Size,
SamplingSeed: multi.SamplingSeed,
}
(*newDists)[key] = dist
multi.store(newDists)
return dist
}
func (multi *MultiHistogram) load() *map[string]*Histogram {
return (*map[string]*Histogram)(atomic.LoadPointer(&multi.dists))
}
func (multi *MultiHistogram) store(dists *map[string]*Histogram) {
atomic.StorePointer(&multi.dists, unsafe.Pointer(dists))
}
// GetMultiHistogram returns the histogram registered with the given key or
// creates a new one and registers it.
func GetMultiHistogram(prefix string) *MultiHistogram {
return GetOrAdd(prefix, new(MultiHistogram)).(*MultiHistogram)
} | meter_histogram_multi.go | 0.727104 | 0.52141 | meter_histogram_multi.go | starcoder |
package metrics
import "github.com/Jeffail/benthos/v3/internal/docs"
//------------------------------------------------------------------------------
func init() {
Constructors[TypePrometheus] = TypeSpec{
constructor: NewPrometheus,
Summary: `
Host endpoints (` + "`/metrics` and `/stats`" + `) for Prometheus scraping.`,
Description: `
Metrics paths will differ from [the standard list](/docs/components/metrics/about#metric_names) in order to comply with Prometheus naming restrictions, where dots are replaced with underscores (and underscores replaced with double underscores). This change is made _before_ the mapping from ` + "`path_mapping`" + ` is applied.`,
FieldSpecs: docs.FieldSpecs{
docs.FieldCommon("prefix", "A string prefix to add to all metrics."),
pathMappingDocs(true, true),
docs.FieldAdvanced("push_url", "An optional [Push Gateway URL](#push-gateway) to push metrics to."),
docs.FieldAdvanced("push_interval", "The period of time between each push when sending metrics to a Push Gateway."),
docs.FieldAdvanced("push_job_name", "An identifier for push jobs."),
docs.FieldAdvanced("push_basic_auth", "The Basic Authentication credentials.").WithChildren(
docs.FieldCommon("username", "The Basic Authentication username."),
docs.FieldCommon("password", "The Basic Authentication password."),
),
},
Footnotes: `
## Push Gateway
The field ` + "`push_url`" + ` is optional and when set will trigger a push of
metrics to a [Prometheus Push Gateway](https://prometheus.io/docs/instrumenting/pushing/)
once Benthos shuts down. It is also possible to specify a
` + "`push_interval`" + ` which results in periodic pushes.
The Push Gateway is useful for when Benthos instances are short lived. Do not
include the "/metrics/jobs/..." path in the push URL.
If the Push Gateway requires HTTP Basic Authentication it can be configured with
` + "`push_basic_auth`.",
}
}
//------------------------------------------------------------------------------
// PrometheusConfig is config for the Prometheus metrics type.
type PrometheusConfig struct {
Prefix string `json:"prefix" yaml:"prefix"`
PathMapping string `json:"path_mapping" yaml:"path_mapping"`
PushURL string `json:"push_url" yaml:"push_url"`
PushBasicAuth PrometheusPushBasicAuthConfig `json:"push_basic_auth" yaml:"push_basic_auth"`
PushInterval string `json:"push_interval" yaml:"push_interval"`
PushJobName string `json:"push_job_name" yaml:"push_job_name"`
}
// PrometheusPushBasicAuthConfig contains parameters for establishing basic
// authentication against a push service.
type PrometheusPushBasicAuthConfig struct {
Username string `json:"username" yaml:"username"`
Password string `json:"password" yaml:"password"`
}
// NewPrometheusPushBasicAuthConfig creates a new NewPrometheusPushBasicAuthConfig with default values.
func NewPrometheusPushBasicAuthConfig() PrometheusPushBasicAuthConfig {
return PrometheusPushBasicAuthConfig{
Username: "",
Password: "",
}
}
// NewPrometheusConfig creates an PrometheusConfig struct with default values.
func NewPrometheusConfig() PrometheusConfig {
return PrometheusConfig{
Prefix: "benthos",
PathMapping: "",
PushURL: "",
PushBasicAuth: NewPrometheusPushBasicAuthConfig(),
PushInterval: "",
PushJobName: "benthos_push",
}
}
//------------------------------------------------------------------------------ | lib/metrics/prometheus_config.go | 0.787114 | 0.418875 | prometheus_config.go | starcoder |
package list
type unit struct{}
// List represents a lazily evaluated list type
type List interface{}
// Mzero returnes an empty list
func Mzero() List {
return unit{}
}
// Return returns a single-element list
func Return(i interface{}) List {
return Returnf(func() interface{} { return i })
}
// Cons pushes an element onto the fron of the list
func Cons(i interface{}, l List) List {
return Consf(func() interface{} { return i }, l)
}
// Consf pushes an element-generating function onto the list
func Consf(f func() interface{}, l List) List {
if l == nil {
l = Mzero()
}
return [2]interface{}{f, func() List { return l }}
}
// Returnf creates a list with a single element-generating function
func Returnf(f func() interface{}) List {
return Consf(f, Mzero())
}
// Head returns the value from the front of the list
func Head(l List) interface{} {
if l == nil {
l = Mzero()
}
if _, ok := l.(unit); ok {
return unit{}
}
lf := l.([2]interface{})[0].(func() interface{})
return lf()
}
// Tail returns the list without the front element
func Tail(l List) List {
if l == nil {
l = Mzero()
}
if _, ok := l.(uint); ok {
return unit{}
}
ll := l.([2]interface{})
f := ll[1].(func() List)
return f()
}
// HdTail returns (Head l, Tail l)
func HdTail(l List) (interface{}, List) {
return Head(l), Tail(l)
}
// IsEmpty returns true if the list is empty
func IsEmpty(l List) bool {
if l == nil {
l = Mzero()
}
_, ok := l.(unit)
if ok {
return true
}
ll := l.([2]interface{})
if _, ok = ll[0].(uint); ok {
return true
}
return false
}
// Map returns a list with f (lazily) applied to each element
func Map(f func(interface{}) interface{}, l List) List {
if IsEmpty(l) {
return Mzero()
}
elem := l.([2]interface{})
valFunc := elem[0].(func() interface{})
next := elem[1].(func() List)
mapperFunc := func() interface{} {
return f(valFunc())
}
return Consf(mapperFunc, Map(f, next()))
}
// MapM applies f to each element then evaluates each function in sequence
func MapM(f func(interface{}), l List) {
adapter := func(i interface{}) interface{} {
f(i)
return nil
}
Seq(Map(adapter, l))
}
// Seq evaluates each function in the list
func Seq(l List) {
for !IsEmpty(l) {
Head(l)
l = Tail(l)
}
}
// Foldl performs a left fold over the list
func Foldl(f func(carry interface{}, elem interface{}) interface{}, val interface{}, l List) interface{} {
if IsEmpty(l) {
return val
}
hd, tl := HdTail(l)
return Foldl(f, f(val, hd), tl)
}
// Foldr performs a right fold over the list
func Foldr(f func(interface{}, interface{}) interface{}, val interface{}, l List) interface{} {
if IsEmpty(l) {
return val
}
hd, tl := HdTail(l)
return f(Foldr(f, val, tl), hd)
}
// Foldl1 performs a left fold over the list using it's head as the initial
// element
func Foldl1(f func(interface{}, interface{}) interface{}, l List) interface{} {
hd, tl := HdTail(l)
return Foldl(f, hd, tl)
}
// Index gets the specified element from the list (0-indexed)
func Index(idx uint, l List) interface{} {
for cur := uint(0); cur < idx; cur++ {
if IsEmpty(l) {
return Mzero()
}
l = Tail(l)
}
if IsEmpty(l) {
return Mzero()
}
return Head(l)
}
// Reverse returns the list revsersed
func Reverse(l List) List {
foldFunc := func(carry, elem interface{}) interface{} {
return Cons(elem, carry)
}
return Foldl(foldFunc, Mzero(), l).(List)
}
// Append adds an element to the end of the list
func Append(i interface{}, l List) List {
return Reverse(Cons(i, Reverse(l)))
}
// Concat joins two lists
func Concat(back, front List) List {
foldFunc := func(carry, elem interface{}) interface{} {
return Cons(elem, carry)
}
return Foldr(foldFunc, front, back).(List)
}
// New generates a List from any number of elements
func New(elems ...interface{}) List {
l := Mzero()
for _, elem := range elems {
l = Cons(elem, l)
}
return Reverse(l)
}
// ToSlice returns a slice of evaluated values from the list
func ToSlice(l List) []interface{} {
appendFunc := func(lst, elem interface{}) interface{} {
slice := lst.([]interface{})
slice = append(slice, elem)
return slice
}
return Foldl(appendFunc, []interface{}{}, l).([]interface{})
} | list/list.go | 0.818374 | 0.487795 | list.go | starcoder |
package biff
import (
"encoding/json"
"fmt"
"go/ast"
"go/parser"
"io/ioutil"
"os"
"reflect"
"runtime"
"runtime/debug"
"strings"
)
var exit = func() {
os.Exit(1)
}
// deprecated
// AssertNotEqual return true if `obtained` is not equal to `expected` otherwise
// it will print trace and exit.
func (a *A) AssertNotEqual(obtained, expected interface{}) bool {
if !reflect.DeepEqual(expected, obtained) {
l, r := printShould(expected)
fmt.Printf(" %s is not equal %s\n", l, r)
return true
}
printExpectedObtained(expected, obtained)
return false
}
// AssertNotEqual return true if `obtained` is not equal to `expected` otherwise
// it will print trace and exit.
func AssertNotEqual(obtained, expected interface{}) bool {
if !reflect.DeepEqual(expected, obtained) {
l, r := printShould(expected)
fmt.Printf(" %s is not equal %s\n", l, r)
return true
}
printExpectedObtained(expected, obtained)
return false
}
// deprecated
// AssertEqual return true if `obtained` is equal to `expected` otherwise it
// will print trace and exit.
func (a *A) AssertEqual(obtained, expected interface{}) bool {
if reflect.DeepEqual(expected, obtained) {
l, r := printShould(expected)
fmt.Printf(" %s is %s\n", l, r)
return true
}
printExpectedObtained(expected, obtained)
return false
}
// AssertEqual return true if `obtained` is equal to `expected` otherwise it
// will print trace and exit.
func AssertEqual(obtained, expected interface{}) bool {
if reflect.DeepEqual(expected, obtained) {
l, r := printShould(expected)
fmt.Printf(" %s is %s\n", l, r)
return true
}
printExpectedObtained(expected, obtained)
return false
}
func readFileLine(filename string, line int) string {
data, _ := ioutil.ReadFile(filename)
lines := strings.Split(string(data), "\n")
return lines[line-1]
}
// deprecated
// AssertEqualJson return true if `obtained` is equal to `expected`. Prior to
// comparison, both values are JSON Marshaled/Unmarshaled to avoid JSON type
// issues like int vs float etc. Otherwise it will print trace and exit.
func (a *A) AssertEqualJson(obtained, expected interface{}) bool {
e := interface{}(nil)
{
b, _ := json.Marshal(expected)
json.Unmarshal(b, &e)
}
o := interface{}(nil)
{
b, _ := json.Marshal(obtained)
json.Unmarshal(b, &o)
}
if reflect.DeepEqual(e, o) {
l, r := printShould(expected)
fmt.Printf(" %s is same JSON as %s\n", l, r)
return true
}
printExpectedObtained(e, o)
return false
}
// AssertEqualJson return true if `obtained` is equal to `expected`. Prior to
// comparison, both values are JSON Marshaled/Unmarshaled to avoid JSON type
// issues like int vs float etc. Otherwise it will print trace and exit.
func AssertEqualJson(obtained, expected interface{}) bool {
e := interface{}(nil)
{
b, _ := json.Marshal(expected)
json.Unmarshal(b, &e)
}
o := interface{}(nil)
{
b, _ := json.Marshal(obtained)
json.Unmarshal(b, &o)
}
if reflect.DeepEqual(e, o) {
l, r := printShould(expected)
fmt.Printf(" %s is same JSON as %s\n", l, r)
return true
}
printExpectedObtained(e, o)
return false
}
// deprecated
// AssertNil return true if `obtained` is nil, otherwise it will print trace and
// exit.
func (a *A) AssertNil(obtained interface{}) bool {
if nil == obtained || reflect.ValueOf(obtained).IsNil() {
l, _ := printShould(nil)
fmt.Printf(" %s is nil\n", l)
return true
}
printExpectedObtained(nil, obtained)
return false
}
// AssertNil return true if `obtained` is nil, otherwise it will print trace and
// exit.
func AssertNil(obtained interface{}) bool {
if nil == obtained || reflect.ValueOf(obtained).IsNil() {
l, _ := printShould(nil)
fmt.Printf(" %s is nil\n", l)
return true
}
printExpectedObtained(nil, obtained)
return false
}
// deprecated
// AssertNotNil return true if `obtained` is NOT nil, otherwise it will print trace
// and exit.
func (a *A) AssertNotNil(obtained interface{}) bool {
if isNil(obtained) {
line := getStackLine(2)
fmt.Printf(""+
" Expected: not nil\n"+
" Obtained: %#v\n"+
" at %s\n", obtained, line)
exit()
return false
}
l, _ := printShould(nil)
v := fmt.Sprintf("%#v", obtained)
if v != l {
v = " (" + v + ")"
}
fmt.Printf(" %s is not nil%s\n", l, v)
return true
}
// AssertNotNil return true if `obtained` is NOT nil, otherwise it will print trace
// and exit.
func AssertNotNil(obtained interface{}) bool {
if isNil(obtained) {
line := getStackLine(2)
fmt.Printf(""+
" Expected: not nil\n"+
" Obtained: %#v\n"+
" at %s\n", obtained, line)
exit()
return false
}
l, _ := printShould(nil)
v := fmt.Sprintf("%#v", obtained)
if v != l {
v = " (" + v + ")"
}
fmt.Printf(" %s is not nil%s\n", l, v)
return true
}
// deprecated
// AssertTrue return true if `obtained` is true, otherwise it will print trace
// and exit.
func (a *A) AssertTrue(obtained interface{}) bool {
if reflect.DeepEqual(true, obtained) {
l, _ := printShould(nil)
fmt.Printf(" %s is true\n", l)
return true
}
printExpectedObtained(true, obtained)
return false
}
// AssertTrue return true if `obtained` is true, otherwise it will print trace
// and exit.
func AssertTrue(obtained interface{}) bool {
if reflect.DeepEqual(true, obtained) {
l, _ := printShould(nil)
fmt.Printf(" %s is true\n", l)
return true
}
printExpectedObtained(true, obtained)
return false
}
// deprecated
// AssertFalse return true if `obtained` is false, otherwise it will print trace
// and exit.
func (a *A) AssertFalse(obtained interface{}) bool {
if reflect.DeepEqual(false, obtained) {
l, _ := printShould(nil)
fmt.Printf(" %s is false\n", l)
return true
}
printExpectedObtained(true, obtained)
return false
}
// AssertFalse return true if `obtained` is false, otherwise it will print trace
// and exit.
func AssertFalse(obtained interface{}) bool {
if reflect.DeepEqual(false, obtained) {
l, _ := printShould(nil)
fmt.Printf(" %s is false\n", l)
return true
}
printExpectedObtained(true, obtained)
return false
}
// deprecated
// AssertInArray return true if `item` match at least with one element of the
// array. Otherwise it will print trace and exit.
func (a *A) AssertInArray(array interface{}, item interface{}) bool {
v := reflect.ValueOf(array)
if v.Kind() != reflect.Array && v.Kind() != reflect.Slice {
line := getStackLine(2)
fmt.Printf("Expected second argument to be array:\n"+
" Obtained: %#v\n"+
" at %s\n", array, line)
exit()
}
l := v.Len()
for i := 0; i < l; i++ {
e := v.Index(i)
if reflect.DeepEqual(e.Interface(), item) {
l, r := printShould(item)
fmt.Printf(" %s[%d] is %s\n", l, i, r)
return true
}
}
line := getStackLine(2)
fmt.Printf(""+
" Expected item to be in array.\n"+
" Item: %#v\n"+
" Array: %#v\n"+
" at %s\n", item, array, line)
exit()
return false
}
// AssertInArray return true if `item` match at least with one element of the
// array. Otherwise it will print trace and exit.
func AssertInArray(array interface{}, item interface{}) bool {
v := reflect.ValueOf(array)
if v.Kind() != reflect.Array && v.Kind() != reflect.Slice {
line := getStackLine(2)
fmt.Printf("Expected second argument to be array:\n"+
" Obtained: %#v\n"+
" at %s\n", array, line)
exit()
}
l := v.Len()
for i := 0; i < l; i++ {
e := v.Index(i)
if reflect.DeepEqual(e.Interface(), item) {
l, r := printShould(item)
fmt.Printf(" %s[%d] is %s\n", l, i, r)
return true
}
}
line := getStackLine(2)
fmt.Printf(""+
" Expected item to be in array.\n"+
" Item: %#v\n"+
" Array: %#v\n"+
" at %s\n", item, array, line)
exit()
return false
}
func getStackLine(linesToSkip int) string {
stack := debug.Stack()
lines := make([]string, 0)
index := 0
for i := 0; i < len(stack); i++ {
if stack[i] == []byte("\n")[0] {
lines = append(lines, string(stack[index:i-1]))
index = i + 1
}
}
return lines[linesToSkip*2+3] + " " + lines[linesToSkip*2+4]
}
func printExpectedObtained(expected, obtained interface{}) {
line := getStackLine(3)
fmt.Printf(""+
" Expected: %#v\n"+
" Obtained: %#v\n"+
" at %s\n", expected, obtained, line)
exit()
}
func printShould(value interface{}) (arg0, arg1 string) {
arg0 = "It"
arg1 = fmt.Sprintf("%#v", value)
func() {
p := make([]runtime.StackRecord, 50)
_, ok := runtime.GoroutineProfile(p)
if !ok {
return
}
frames := runtime.CallersFrames(p[0].Stack())
// Make it compatible with latests golang versions (1.14 on)
frame, more := frames.Next()
for ; more; frame, more = frames.Next() {
if frame.Function == "github.com/fulldump/biff.printShould" {
break
}
}
frame, _ = frames.Next()
frame, _ = frames.Next()
l := readFileLine(frame.File, frame.Line)
a, err := parser.ParseExpr(l)
if nil != err {
return
}
aFunc, ok := a.(*ast.CallExpr)
if !ok {
return
}
a0 := aFunc.Args[0]
arg0 = l[a0.Pos()-1 : a0.End()-1]
if len(aFunc.Args) > 1 {
a1 := aFunc.Args[1]
arg1 = l[a1.Pos()-1 : a1.End()-1]
}
}()
v := fmt.Sprintf("%#v", value)
if v != arg1 {
arg1 = arg1 + " (" + v + ")"
}
return
}
// Source: https://sourcegraph.com/github.com/stretchr/testify/-/blob/assert/assertions.go#L520:6
// isNil checks if a specified object is nil or not, without Failing.
func isNil(object interface{}) bool {
if object == nil {
return true
}
value := reflect.ValueOf(object)
kind := value.Kind()
isNilableKind := containsKind(
[]reflect.Kind{
reflect.Chan, reflect.Func,
reflect.Interface, reflect.Map,
reflect.Ptr, reflect.Slice},
kind)
if isNilableKind && value.IsNil() {
return true
}
return false
}
// source: github.com/stretchr/testify/-/blob/assert/assertions.go#L524
// containsKind checks if a specified kind in the slice of kinds.
func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool {
for i := 0; i < len(kinds); i++ {
if kind == kinds[i] {
return true
}
}
return false
} | asserts.go | 0.575111 | 0.457682 | asserts.go | starcoder |
Ported from Java com.google.gwt.dev.util.editdistance, which is:
Copyright 2010 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
*/
/*
Package berghelraoch is a modification of the original Berghel-Roach edit
distance (based on prior work by Ukkonen) described in
ACM Transactions on Information Systems, Vol. 14, No. 1,
January 1996, pages 94-106.
I observed that only O(d) prior computations are required
to compute edit distance. Rather than keeping all prior
f(k,p) results in a matrix, we keep only the two "outer edges"
in the triangular computation pattern that will be used in
subsequent rounds. We cannot reconstruct the edit path,
but many applications do not require that; for them, this
modification uses less space (and empirically, slightly
less time).
First, some history behind the algorithm necessary to understand
Berghel-Roach and our modification...
The traditional algorithm for edit distance uses dynamic programming,
building a matrix of distances for substrings:
D[i,j] holds the distance for string1[0..i]=>string2[0..j].
The matrix is initially populated with the trivial values
D[0,j]=j and D[i,0]=i; and then expanded with the rule:
D[i,j] = min( D[i-1,j]+1, // insertion
D[i,j-1]+1, // deletion
(D[i-1,j-1]
+ (string1[i]==string2[j])
? 0 // match
: 1 // substitution ) )
Ukkonen observed that each diagonal of the matrix must increase
by either 0 or 1 from row to row. If D[i,j] = p, then the
matching rule requires that D[i+x,j+x] = p for all x
where string1[i..i+x) matches string2[j..j+j+x). Ukkonen
defined a function f(k,p) as the highest row number in which p
appears on the k-th diagonal (those D[i,j] where k=(i-j), noting
that k may be negative). The final result of the edit
distance is the D[n,m] cell, on the (n-m) diagonal; it is
the value of p for which f(n-m, p) = m. The function f can
also be computed dynamically, according to a simple recursion:
f(k,p) {
contains_p = max(f(k-1,p-1), f(k,p-1)+1, f(k+1,p-1)+1)
while (string1[contains_p] == string2[contains_p + k])
contains_p++;
return contains_p;
}
The max() expression finds a row where the k-th diagonal must
contain p by virtue of an edit from the prior, same, or following
diagonal (corresponding to an insert, substitute, or delete);
we need not consider more distant diagonals because row-to-row
and column-to-column changes are at most +/- 1.
The original Ukkonen algorithm computed f(k,p) roughly as
follows:
for (p = 0; ; p++) {
compute f(k,p) for all valid k
if (f(n-m, p) == m) return p;
}
Berghel and Roach observed that many values of f(k,p) are
computed unnecessarily, and reorganized the computation into
a just-in-time sequence. In each iteration, we are primarily
interested in the terminating value f(main,p), where main=(n-m)
is the main diagonal. To compute that we need f(x,p-1) for
three values of x: main-1, main, and main+1. Those depend on
values for p-2, and so forth. We will already have computed
f(main,p-1) in the prior round, and thus f(main-1,p-2) and
f(main+1,p-2), and so forth. The only new values we need to compute
are on the edges: f(main-i,p-i) and f(main+i,p-i). Noting that
f(k,p) is only meaningful when abs(k) is no greater than p,
one of the Berghel-Roach reviewers noted that we can compute
the bounds for i:
(main+i &le p-i) implies (i ≤ (p-main)/2)
(where main+i is limited on the positive side) and similarly
(-(main-i) &le p-i) implies (i ≤ (p+main)/2).
(where main-i is limited on the negative side).
This reduces the computation sequence to
for (i = (p-main)/2; i > 0; i--) compute f(main+i,p-i);
for (i = (p+main)/2; i > 0; i--) compute f(main-i,p-i);
if (f(main, p) == m) return p;
The original Berghel-Roach algorithm recorded prior values
of f(k,p) in a matrix, using O(distance^2) space, enabling
reconstruction of the edit path, but if all we want is the
edit *distance*, we only need to keep O(distance) prior computations.
The requisite prior k-1, k, and k+1 values are conveniently
computed in the current round and the two preceding it.
For example, on the higher-diagonal side, we compute:
current[i] = f(main+i, p-i)
We keep the two prior rounds of results, where p was one and two
smaller. So, from the preceidng round
last[i] = f(main+i, (p-1)-i)
and from the prior round, but one position back:
prior[i-1] = f(main+(i-1), (p-2)-(i-1))
In the current round, one iteration earlier:
current[i+1] = f(main+(i+1), p-(i+1))
Note that the distance in all of these evaluates to p-i-1,
and the diagonals are (main+i) and its neighbors... just
what we need. The lower-diagonal side behaves similarly.
We need to materialize values that are not computed in prior
rounds, for either of two reasons:
- Initially, we have no prior rounds, so we need to fill
all of the "last" and "prior" values for use in the
first round. The first round uses only on one side
of the main diagonal or the other.
- In every other round, we compute one more diagonal than before.
In all of these cases, the missing f(k,p) values are for abs(k) > p,
where a real value of f(k,p) is undefined. [The original Berghel-Roach
algorithm prefills its F matrix with these values, but we fill
them as we go, as needed.] We define
f(-p-1,p) = p, so that we start diagonal -p with row p,
f(p+1,p) = -1, so that we start diagonal p with row 0.
(We also allow f(p+2,p)=f(-p-2,p)=-1, causing those values to
have no effect in the starting row computation.]
We only expand the set of diagonals visited every other round,
when (p-main) or (p+main) is even. We keep track of even/oddness
to save some arithmetic. The first round is always even, as p=abs(main).
Note that we rename the "f" function to "computeRow" to be Googley.
*/
package berghelroach | triage/berghelroach/doc.go | 0.80784 | 0.68462 | doc.go | starcoder |
package image
import (
"bufio"
"image"
"io"
"os"
"strings"
)
// Options are the encoding and decoding parameters.
type Options interface {
Lossless() bool
Quality() float32
}
type internalOptions struct {
Options struct {
Lossless bool
Quality float32
}
}
func (opt *internalOptions) Lossless() bool {
return opt.Options.Lossless
}
func (opt *internalOptions) Quality() float32 {
return opt.Options.Quality
}
func NewOptions(lossless bool, quality float32) Options {
return &internalOptions{
Options: struct {
Lossless bool
Quality float32
}{
Lossless: lossless,
Quality: quality,
},
}
}
// A Format holds an image format's name, magic header and how to decode it.
// Name is the name of the format, like "jpeg" or "png".
// Extensions is the name extensions, like ".jpg" or ".jpeg".
// Magics is the magic prefix that identifies the format's encoding. The magic
// string can contain "?" wildcards that each match any one byte.
// Decode is the function that decodes the encoded image.
// DecodeConfig is the function that decodes just its configuration.
// Encode is the function that encodes just its configuration.
type Format struct {
Name string
Extensions []string
Magics []string
DecodeConfig func(r io.Reader) (image.Config, error)
Decode func(r io.Reader) (image.Image, error)
Encode func(w io.Writer, m image.Image, opt Options) error
}
// Formats is the list of registered formats.
var formats []Format
// RegisterFormat registers an image format for use by Encode and Decode.
func RegisterFormat(fmt Format) {
formats = append(formats, Format{
Name: fmt.Name,
Extensions: append([]string(nil), fmt.Extensions...),
Magics: append([]string(nil), fmt.Magics...),
DecodeConfig: fmt.DecodeConfig,
Decode: fmt.Decode,
Encode: fmt.Encode,
})
}
// A reader is an io.Reader that can also peek ahead.
type reader interface {
io.Reader
Peek(int) ([]byte, error)
}
// asReader converts an io.Reader to a reader.
func asReader(r io.Reader) reader {
if rr, ok := r.(reader); ok {
return rr
}
return bufio.NewReader(r)
}
// Match reports whether magic matches b. Magic may contain "?" wildcards.
func match(magic string, b []byte) bool {
if len(magic) != len(b) {
return false
}
for i, c := range b {
if magic[i] != c && magic[i] != '?' {
return false
}
}
return true
}
// Sniff determines the format by filename extension.
func sniffByName(filename string) Format {
if idx := strings.LastIndex(filename, "."); idx >= 0 {
ext := strings.ToLower(filename[idx:])
for _, f := range formats {
for _, extensions := range f.Extensions {
if ext == extensions {
return f
}
}
}
}
return Format{}
}
// Sniff determines the format of r's data.
func sniffByMagic(r reader) Format {
for _, f := range formats {
for _, magic := range f.Magics {
b, err := r.Peek(len(magic))
if err == nil && match(magic, b) {
return f
}
}
}
return Format{}
}
// Decode decodes an image that has been encoded in a registered format.
// The string returned is the format name used during format registration.
// Format registration is typically done by an init function in the codec-
// specific package.
func Decode(r io.Reader) (image.Image, string, error) {
rr := asReader(r)
f := sniffByMagic(rr)
if f.Decode == nil {
return nil, "", image.ErrFormat
}
m, err := f.Decode(rr)
return m, f.Name, err
}
// DecodeConfig decodes the color model and dimensions of an image that has
// been encoded in a registered format. The string returned is the format name
// used during format registration. Format registration is typically done by
// an init function in the codec-specific package.
func DecodeConfig(r io.Reader) (image.Config, string, error) {
rr := asReader(r)
f := sniffByMagic(rr)
if f.DecodeConfig == nil {
return image.Config{}, "", image.ErrFormat
}
c, err := f.DecodeConfig(rr)
return c, f.Name, err
}
// Encode encodes an image as a registered format.
// The format is the format name used during format registration.
// Format registration is typically done by an init function in the codec-
// specific package.
func Encode(format string, w io.Writer, m image.Image, opt Options) error {
for _, f := range formats {
if f.Name == format {
return f.Encode(w, m, opt)
}
}
return image.ErrFormat
}
func Load(filename string) (m image.Image, format string, err error) {
f, err := os.Open(filename)
if err != nil {
return
}
defer f.Close()
m, format, err = Decode(f)
if err != nil {
return
}
return
}
func Save(filename string, m image.Image, opt Options) (err error) {
f, err := os.Create(filename)
if err != nil {
return
}
defer f.Close()
format := sniffByName(filename)
if format.Encode == nil {
return image.ErrFormat
}
if err = format.Encode(f, m, opt); err != nil {
return
}
return
} | format.go | 0.760917 | 0.421373 | format.go | starcoder |
package vector
import (
"fmt"
"math"
)
// Vec is a two dimensional vector
type Vec struct {
X, Y float64
}
// New returns a new vector at (x, y)
func New(x, y float64) *Vec {
return &Vec{x, y}
}
// Zero returns a zero-vector
func Zero() *Vec {
return &Vec{0, 0}
}
// Unit returns a unit vector at the given angle
func Unit(angle float64) *Vec {
v := New(1, 0)
v.Rotate(angle)
return v
}
// Magnitude returns the magnitude of a vector
func (v *Vec) Magnitude() float64 {
return math.Sqrt(v.X * v.X + v.Y * v.Y)
}
// Angle returns the angle of a vector between [-PI, PI]
func (v Vec) Angle() float64 {
return math.Atan2(v.Y, v.X)
}
// Add adds vector u to vector v
func (v *Vec) Add(u *Vec) {
v.X += u.X
v.Y += u.Y
}
// Cap limits the length of a vector. If it is longer than max, it will be
// cut down to length max
func (v *Vec) Cap(max float64) {
if max < v.Magnitude() {
v.SetMagnitude(max)
}
}
// Cross calculates the cross product of two vectors and returns a scalar
func (v Vec) Cross(u *Vec) float64 {
return v.X*u.Y - u.X*v.Y
}
// Divide divides the vector by div
func (v *Vec) Divide(div float64) {
v.X /= div
v.Y /= div
}
// Dot calculates the dot product of two vectors and returns a scalar
func (v Vec) Dot(u *Vec) float64 {
return v.X*u.X + v.Y*u.Y
}
// Multiply scales a vector by mul
func (v *Vec) Multiply(mul float64) {
v.X *= mul
v.Y *= mul
}
// Normalize normalizes the vector
func (v *Vec) Normalize() {
v.Divide(v.Magnitude())
}
// Project projects vector u onto vector v
func (v *Vec) Project(u *Vec) {
l := v.Dot(u) / u.Magnitude()
*v = *u
v.Normalize()
v.Multiply(l)
}
// Rotate rotates the vector by angle in radians
func (v *Vec) Rotate(angle float64) {
sin, cos := math.Sincos(angle)
v.X = v.X*cos - v.Y*sin
v.Y = v.X*sin + v.Y*cos
}
// SetMagnitude sets the magnitude of the vector to l
func (v *Vec) SetMagnitude(l float64) {
v.Normalize()
v.Multiply(l)
}
// String renders the vector as a string
func (v *Vec) String() string {
return fmt.Sprintf("Vector(%f, %f)", v.X, v.Y)
}
// Subtract subtracts vector u from vector v
func (v *Vec) Subtract(u *Vec) {
v.X -= u.X
v.Y -= u.Y
} | vector.go | 0.936176 | 0.561936 | vector.go | starcoder |
package psd
import (
"image"
"image/color"
psdColor "github.com/oov/psd/color"
)
type picker interface {
image.Image
setSource(rect image.Rectangle, src ...[]byte)
}
func findPicker(depth int, colorMode ColorMode, hasAlpha bool) picker {
switch colorMode {
case ColorModeBitmap, ColorModeGrayscale:
return findNGrayPicker(depth, hasAlpha)
case ColorModeRGB:
return findNRGBAPicker(depth, hasAlpha)
case ColorModeCMYK:
return findNCMYKAPicker(depth, hasAlpha)
}
return nil
}
func findGrayPicker(depth int) picker {
switch depth {
case 1:
return &pickerGray1{}
case 8:
return &pickerGray8{}
case 16:
return &pickerGray16{}
case 32:
return &pickerGray32{}
}
return nil
}
func findNGrayPicker(depth int, hasAlpha bool) picker {
switch depth {
case 8:
if hasAlpha {
return &pickerNGrayA8{}
}
return &pickerNGray8{}
case 16:
if hasAlpha {
return &pickerNGrayA16{}
}
return &pickerNGray16{}
case 32:
if hasAlpha {
return &pickerNGrayA32{}
}
return &pickerNGray32{}
}
return nil
}
func findNRGBAPicker(depth int, hasAlpha bool) picker {
switch depth {
case 8:
if hasAlpha {
return &pickerNRGBA8{}
}
return &pickerNRGB8{}
case 16:
if hasAlpha {
return &pickerNRGBA16{}
}
return &pickerNRGB16{}
case 32:
if hasAlpha {
return &pickerNRGBA32{}
}
return &pickerNRGB32{}
}
return nil
}
func findNCMYKAPicker(depth int, hasAlpha bool) picker {
switch depth {
case 8:
if hasAlpha {
return &pickerNCMYKA8{}
}
return &pickerNCMYK8{}
case 16:
if hasAlpha {
return &pickerNCMYKA16{}
}
return &pickerNCMYK16{}
}
return nil
}
type pickerPalette struct {
Rect image.Rectangle
Src []byte
Palette color.Palette
}
func (p *pickerPalette) setSource(rect image.Rectangle, src ...[]byte) { p.Rect, p.Src = rect, src[0] }
func (p *pickerPalette) ColorModel() color.Model { return p.Palette }
func (p *pickerPalette) Bounds() image.Rectangle { return p.Rect }
func (p *pickerPalette) At(x, y int) color.Color {
pos := (y-p.Rect.Min.Y)*p.Rect.Dx() + x - p.Rect.Min.X
return p.Palette[p.Src[pos]]
}
type pickerGray1 struct {
Rect image.Rectangle
Y []byte
}
func (p *pickerGray1) setSource(rect image.Rectangle, src ...[]byte) { p.Rect, p.Y = rect, src[0] }
func (p *pickerGray1) ColorModel() color.Model { return psdColor.Gray1Model }
func (p *pickerGray1) Bounds() image.Rectangle { return p.Rect }
func (p *pickerGray1) At(x, y int) color.Color {
xx := x - p.Rect.Min.X
pos := (p.Rect.Dx()+7)>>3*(y-p.Rect.Min.Y) + xx>>3
return psdColor.Gray1{Y: p.Y[pos]&(1<<uint(^xx&7)) == 0}
}
type pickerGray8 struct {
Rect image.Rectangle
Y []byte
}
func (p *pickerGray8) setSource(rect image.Rectangle, src ...[]byte) { p.Rect, p.Y = rect, src[0] }
func (p *pickerGray8) ColorModel() color.Model { return color.GrayModel }
func (p *pickerGray8) Bounds() image.Rectangle { return p.Rect }
func (p *pickerGray8) At(x, y int) color.Color {
pos := (y-p.Rect.Min.Y)*p.Rect.Dx() + x - p.Rect.Min.X
return color.Gray{Y: p.Y[pos]}
}
type pickerNGray8 struct {
Rect image.Rectangle
Y []byte
}
func (p *pickerNGray8) setSource(rect image.Rectangle, src ...[]byte) {
p.Rect, p.Y = rect, src[0]
}
func (p *pickerNGray8) ColorModel() color.Model { return psdColor.NGrayAModel }
func (p *pickerNGray8) Bounds() image.Rectangle { return p.Rect }
func (p *pickerNGray8) At(x, y int) color.Color {
pos := (y-p.Rect.Min.Y)*p.Rect.Dx() + x - p.Rect.Min.X
return psdColor.NGrayA{Y: p.Y[pos], A: 0xff}
}
type pickerNGrayA8 struct {
Rect image.Rectangle
Y, A []byte
}
func (p *pickerNGrayA8) setSource(rect image.Rectangle, src ...[]byte) {
p.Rect, p.Y, p.A = rect, src[0], src[1]
}
func (p *pickerNGrayA8) ColorModel() color.Model { return psdColor.NGrayAModel }
func (p *pickerNGrayA8) Bounds() image.Rectangle { return p.Rect }
func (p *pickerNGrayA8) At(x, y int) color.Color {
pos := (y-p.Rect.Min.Y)*p.Rect.Dx() + x - p.Rect.Min.X
return psdColor.NGrayA{Y: p.Y[pos], A: p.A[pos]}
}
type pickerGray16 struct {
Rect image.Rectangle
Y []byte
}
func (p *pickerGray16) setSource(rect image.Rectangle, src ...[]byte) { p.Rect, p.Y = rect, src[0] }
func (p *pickerGray16) ColorModel() color.Model { return color.Gray16Model }
func (p *pickerGray16) Bounds() image.Rectangle { return p.Rect }
func (p *pickerGray16) At(x, y int) color.Color {
pos := ((y-p.Rect.Min.Y)*p.Rect.Dx() + x - p.Rect.Min.X) << 1
return color.Gray16{Y: readUint16(p.Y, pos)}
}
type pickerNGray16 struct {
Rect image.Rectangle
Y []byte
}
func (p *pickerNGray16) setSource(rect image.Rectangle, src ...[]byte) {
p.Rect, p.Y = rect, src[0]
}
func (p *pickerNGray16) ColorModel() color.Model { return psdColor.NGrayA32Model }
func (p *pickerNGray16) Bounds() image.Rectangle { return p.Rect }
func (p *pickerNGray16) At(x, y int) color.Color {
pos := ((y-p.Rect.Min.Y)*p.Rect.Dx() + x - p.Rect.Min.X) << 1
return psdColor.NGrayA32{Y: readUint16(p.Y, pos), A: 0xffff}
}
type pickerNGrayA16 struct {
Rect image.Rectangle
Y, A []byte
}
func (p *pickerNGrayA16) setSource(rect image.Rectangle, src ...[]byte) {
p.Rect, p.Y, p.A = rect, src[0], src[1]
}
func (p *pickerNGrayA16) ColorModel() color.Model { return psdColor.NGrayA32Model }
func (p *pickerNGrayA16) Bounds() image.Rectangle { return p.Rect }
func (p *pickerNGrayA16) At(x, y int) color.Color {
pos := ((y-p.Rect.Min.Y)*p.Rect.Dx() + x - p.Rect.Min.X) << 1
return psdColor.NGrayA32{Y: readUint16(p.Y, pos), A: readUint16(p.A, pos)}
}
type pickerGray32 struct {
Rect image.Rectangle
Y []byte
}
func (p *pickerGray32) setSource(rect image.Rectangle, src ...[]byte) { p.Rect, p.Y = rect, src[0] }
func (p *pickerGray32) ColorModel() color.Model { return psdColor.Gray32Model }
func (p *pickerGray32) Bounds() image.Rectangle { return p.Rect }
func (p *pickerGray32) At(x, y int) color.Color {
pos := ((y-p.Rect.Min.Y)*p.Rect.Dx() + x - p.Rect.Min.X) << 2
return psdColor.Gray32{Y: readFloat32(p.Y, pos)}
}
type pickerNGray32 struct {
Rect image.Rectangle
Y []byte
}
func (p *pickerNGray32) setSource(rect image.Rectangle, src ...[]byte) {
p.Rect, p.Y = rect, src[0]
}
func (p *pickerNGray32) ColorModel() color.Model { return psdColor.NGrayA64Model }
func (p *pickerNGray32) Bounds() image.Rectangle { return p.Rect }
func (p *pickerNGray32) At(x, y int) color.Color {
pos := ((y-p.Rect.Min.Y)*p.Rect.Dx() + x - p.Rect.Min.X) << 2
return psdColor.NGrayA64{Y: readFloat32(p.Y, pos), A: 1}
}
type pickerNGrayA32 struct {
Rect image.Rectangle
Y, A []byte
}
func (p *pickerNGrayA32) setSource(rect image.Rectangle, src ...[]byte) {
p.Rect, p.Y, p.A = rect, src[0], src[1]
}
func (p *pickerNGrayA32) ColorModel() color.Model { return psdColor.NGrayA64Model }
func (p *pickerNGrayA32) Bounds() image.Rectangle { return p.Rect }
func (p *pickerNGrayA32) At(x, y int) color.Color {
pos := ((y-p.Rect.Min.Y)*p.Rect.Dx() + x - p.Rect.Min.X) << 2
return psdColor.NGrayA64{
Y: readFloat32(p.Y, pos),
A: readFloat32(p.A, pos),
}
}
type pickerNRGB8 struct {
Rect image.Rectangle
R, G, B []byte
}
func (p *pickerNRGB8) setSource(rect image.Rectangle, src ...[]byte) {
p.Rect, p.R, p.G, p.B = rect, src[0], src[1], src[2]
}
func (p *pickerNRGB8) ColorModel() color.Model { return color.NRGBAModel }
func (p *pickerNRGB8) Bounds() image.Rectangle { return p.Rect }
func (p *pickerNRGB8) At(x, y int) color.Color {
pos := (y-p.Rect.Min.Y)*p.Rect.Dx() + x - p.Rect.Min.X
return color.NRGBA{
R: p.R[pos],
G: p.G[pos],
B: p.B[pos],
A: 0xff,
}
}
type pickerNRGBA8 struct {
Rect image.Rectangle
R, G, B, A []byte
}
func (p *pickerNRGBA8) setSource(rect image.Rectangle, src ...[]byte) {
p.Rect, p.R, p.G, p.B, p.A = rect, src[0], src[1], src[2], src[3]
}
func (p *pickerNRGBA8) ColorModel() color.Model { return color.NRGBAModel }
func (p *pickerNRGBA8) Bounds() image.Rectangle { return p.Rect }
func (p *pickerNRGBA8) At(x, y int) color.Color {
pos := (y-p.Rect.Min.Y)*p.Rect.Dx() + x - p.Rect.Min.X
return color.NRGBA{p.R[pos], p.G[pos], p.B[pos], p.A[pos]}
}
type pickerNRGB16 struct {
Rect image.Rectangle
R, G, B []byte
}
func (p *pickerNRGB16) setSource(rect image.Rectangle, src ...[]byte) {
p.Rect, p.R, p.G, p.B = rect, src[0], src[1], src[2]
}
func (p *pickerNRGB16) ColorModel() color.Model { return color.NRGBA64Model }
func (p *pickerNRGB16) Bounds() image.Rectangle { return p.Rect }
func (p *pickerNRGB16) At(x, y int) color.Color {
pos := ((y-p.Rect.Min.Y)*p.Rect.Dx() + x - p.Rect.Min.X) << 1
return color.NRGBA64{
R: readUint16(p.R, pos),
G: readUint16(p.G, pos),
B: readUint16(p.B, pos),
A: 0xffff,
}
}
type pickerNRGBA16 struct {
Rect image.Rectangle
R, G, B, A []byte
}
func (p *pickerNRGBA16) setSource(rect image.Rectangle, src ...[]byte) {
p.Rect, p.R, p.G, p.B, p.A = rect, src[0], src[1], src[2], src[3]
}
func (p *pickerNRGBA16) ColorModel() color.Model { return color.NRGBA64Model }
func (p *pickerNRGBA16) Bounds() image.Rectangle { return p.Rect }
func (p *pickerNRGBA16) At(x, y int) color.Color {
pos := ((y-p.Rect.Min.Y)*p.Rect.Dx() + x - p.Rect.Min.X) << 1
return color.NRGBA64{
R: readUint16(p.R, pos),
G: readUint16(p.G, pos),
B: readUint16(p.B, pos),
A: readUint16(p.A, pos),
}
}
type pickerNRGB32 struct {
Rect image.Rectangle
R, G, B []byte
}
func (p *pickerNRGB32) setSource(rect image.Rectangle, src ...[]byte) {
p.Rect, p.R, p.G, p.B = rect, src[0], src[1], src[2]
}
func (p *pickerNRGB32) ColorModel() color.Model { return psdColor.NRGBA128Model }
func (p *pickerNRGB32) Bounds() image.Rectangle { return p.Rect }
func (p *pickerNRGB32) At(x, y int) color.Color {
pos := ((y-p.Rect.Min.Y)*p.Rect.Dx() + x - p.Rect.Min.X) << 2
return psdColor.NRGBA128{
R: readFloat32(p.R, pos),
G: readFloat32(p.G, pos),
B: readFloat32(p.B, pos),
A: 1.0,
}
}
type pickerNRGBA32 struct {
Rect image.Rectangle
R, G, B, A []byte
}
func (p *pickerNRGBA32) setSource(rect image.Rectangle, src ...[]byte) {
p.Rect, p.R, p.G, p.B, p.A = rect, src[0], src[1], src[2], src[3]
}
func (p *pickerNRGBA32) ColorModel() color.Model { return psdColor.NRGBA128Model }
func (p *pickerNRGBA32) Bounds() image.Rectangle { return p.Rect }
func (p *pickerNRGBA32) At(x, y int) color.Color {
pos := ((y-p.Rect.Min.Y)*p.Rect.Dx() + x - p.Rect.Min.X) << 2
return psdColor.NRGBA128{
R: readFloat32(p.R, pos),
G: readFloat32(p.G, pos),
B: readFloat32(p.B, pos),
A: readFloat32(p.A, pos),
}
}
type pickerNCMYK8 struct {
Rect image.Rectangle
C, M, Y, K []byte
}
func (p *pickerNCMYK8) setSource(rect image.Rectangle, src ...[]byte) {
p.Rect, p.C, p.M, p.Y, p.K = rect, src[0], src[1], src[2], src[3]
}
func (p *pickerNCMYK8) ColorModel() color.Model { return psdColor.NCMYKAModel }
func (p *pickerNCMYK8) Bounds() image.Rectangle { return p.Rect }
func (p *pickerNCMYK8) At(x, y int) color.Color {
pos := (y-p.Rect.Min.Y)*p.Rect.Dx() + x - p.Rect.Min.X
return psdColor.NCMYKA{
C: p.C[pos],
M: p.M[pos],
Y: p.Y[pos],
K: p.K[pos],
A: 0xff,
}
}
type pickerNCMYKA8 struct {
Rect image.Rectangle
C, M, Y, K, A []byte
}
func (p *pickerNCMYKA8) setSource(rect image.Rectangle, src ...[]byte) {
p.Rect, p.C, p.M, p.Y, p.K, p.A = rect, src[0], src[1], src[2], src[3], src[4]
}
func (p *pickerNCMYKA8) ColorModel() color.Model { return psdColor.NCMYKAModel }
func (p *pickerNCMYKA8) Bounds() image.Rectangle { return p.Rect }
func (p *pickerNCMYKA8) At(x, y int) color.Color {
pos := (y-p.Rect.Min.Y)*p.Rect.Dx() + x - p.Rect.Min.X
return psdColor.NCMYKA{
C: p.C[pos],
M: p.M[pos],
Y: p.Y[pos],
K: p.K[pos],
A: p.A[pos],
}
}
type pickerNCMYK16 struct {
Rect image.Rectangle
C, M, Y, K []byte
}
func (p *pickerNCMYK16) setSource(rect image.Rectangle, src ...[]byte) {
p.Rect, p.C, p.M, p.Y, p.K = rect, src[0], src[1], src[2], src[3]
}
func (p *pickerNCMYK16) ColorModel() color.Model { return psdColor.NCMYKA80Model }
func (p *pickerNCMYK16) Bounds() image.Rectangle { return p.Rect }
func (p *pickerNCMYK16) At(x, y int) color.Color {
pos := ((y-p.Rect.Min.Y)*p.Rect.Dx() + x - p.Rect.Min.X) << 1
return psdColor.NCMYKA80{
C: readUint16(p.C, pos),
M: readUint16(p.M, pos),
Y: readUint16(p.Y, pos),
K: readUint16(p.K, pos),
A: 0xffff,
}
}
type pickerNCMYKA16 struct {
Rect image.Rectangle
C, M, Y, K, A []byte
}
func (p *pickerNCMYKA16) setSource(rect image.Rectangle, src ...[]byte) {
p.Rect, p.C, p.M, p.Y, p.K, p.A = rect, src[0], src[1], src[2], src[3], src[4]
}
func (p *pickerNCMYKA16) ColorModel() color.Model { return psdColor.NCMYKA80Model }
func (p *pickerNCMYKA16) Bounds() image.Rectangle { return p.Rect }
func (p *pickerNCMYKA16) At(x, y int) color.Color {
pos := ((y-p.Rect.Min.Y)*p.Rect.Dx() + x - p.Rect.Min.X) << 1
return psdColor.NCMYKA80{
C: readUint16(p.C, pos),
M: readUint16(p.M, pos),
Y: readUint16(p.Y, pos),
K: readUint16(p.K, pos),
A: readUint16(p.A, pos),
}
} | vendor/github.com/oov/psd/picker.go | 0.716715 | 0.401306 | picker.go | starcoder |
package models
import (
"github.com/peake100/turnup-go/models/timeofday"
"github.com/peake100/turnup-go/values"
"time"
)
type SpikeChanceBreakdown [values.PricePeriodCount]float64
// Return the spike chance for a given Weekday + time of day
func (spikes *SpikeChanceBreakdown) ForDay(
weekday time.Weekday, tod timeofday.ToD,
) (chance float64, err error) {
pricePeriod, err := PricePeriodFromDay(weekday, tod)
if err != nil {
return 0, err
}
return spikes[pricePeriod], nil
}
// Return the spike chance for a given time. The ticker does not contain any information
// about dates, so it is assumed that the time passed in to spikeTime is for the week
// that the density describes.
func (spikes *SpikeChanceBreakdown) ForTime(
spikeTime time.Time,
) (chance float64, err error) {
pricePeriod, err := PricePeriodFromTime(spikeTime)
if err != nil {
return 0, err
}
return spikes[pricePeriod], nil
}
type HasSpikeChance interface {
HasSpikeRange
Chance() float64
Breakdown() *SpikeChanceBreakdown
}
type SpikeChance struct {
SpikeRange
chance float64
breakdown *SpikeChanceBreakdown
}
func (spike *SpikeChance) Chance() float64 {
return spike.chance
}
func (spike *SpikeChance) Breakdown() *SpikeChanceBreakdown {
return spike.breakdown
}
func (spike *SpikeChance) updatePeriodDensity(
update HasSpikeRange,
period PricePeriod,
weekChance float64,
) {
if update.Has() && period >= update.Start() && period <= update.End() {
spike.breakdown[period] += weekChance
}
}
// A probability heat-map of when a price spike might occur.
type SpikeChancesAll struct {
small *SpikeChance
big *SpikeChance
any *SpikeChance
}
func (spikes *SpikeChancesAll) Big() HasSpikeChance {
return spikes.big
}
func (spikes *SpikeChancesAll) Small() HasSpikeChance {
return spikes.small
}
func (spikes *SpikeChancesAll) Any() HasSpikeChance {
return spikes.any
}
// Converts from HasSpikeChancesAll to HasSpikeRangesAll
func (spikes *SpikeChancesAll) SpikeRangeAll() *SpikeRangeAll {
// Extract the embedded types and rewrap them
return &SpikeRangeAll{
big: &spikes.big.SpikeRange,
small: &spikes.small.SpikeRange,
any: &spikes.any.SpikeRange,
}
}
func (spikes *SpikeChancesAll) updateRanges(info *SpikeRangeAll) {
spikes.any.updateFromRange(info.any)
spikes.big.updateFromRange(info.big)
spikes.small.updateFromRange(info.small)
}
// We will updatePrices the density from the potential weeks.
func (spikes *SpikeChancesAll) updateDensities(
updateWeek *PotentialWeek,
) {
// The idea behind this heatmap is simple: take the bin width of a given potential
// week, and add it to a running tally of each price period a spike occurs on that
// week for. We need to run this AFTER all of the chances are normalized for every
// pattern, as the total likelihood for any spike may be under 1.
// If there is no spike, abort.
update := updateWeek.Spikes
weekChance := updateWeek.Chance()
if !update.any.Has() {
return
}
start := update.any.Start()
end := update.any.End()
for period := start; period <= end; period++ {
if update.any.has {
spikes.any.updatePeriodDensity(update.any, period, weekChance)
}
if update.big.has {
spikes.big.updatePeriodDensity(update.big, period, weekChance)
}
if update.small.has {
spikes.small.updatePeriodDensity(update.small, period, weekChance)
}
}
} | models/predictionSpikeInfo.go | 0.812496 | 0.461441 | predictionSpikeInfo.go | starcoder |
package decision_tree
import (
"sync"
"fmt"
"errors"
)
const GINI = "gini"
type splitFunction func([][]float64, []float64, int) (float64, float64)
type decisionTree struct {
root *treeNode // actual tree
context *treeContext // fitting context
}
type treeContext struct {
splitter splitFunction // what impurity critera to split by
curDepth int // how deep are we
maxDepth int // maximum tree depth
used []int // which columns have we used?
}
type treeNode struct {
left *treeNode // left subtree
right *treeNode // right subtree
impurity float64 // impurity value
splitColumn int // index of column to split by
splitVal float64 // value to split on
probability float64 // P(1|t)
size int // number of samples in this sub tree
}
type splitResult struct {
impurity float64 // impurity value from split criteria
splitColumn int // best column to split on
splitVal float64 // value to split on
}
/*
decision tree constructor
arguments
---------
maxDepth: max depth of constructed tree
minSamplesLeaf: necessary samples needed to construct a leaf node
splitMethod: what criteria to use to calculate impurity
possible values: GINI ("gini")
*/
func DecisionTree(maxDepth int, splitMethod string) (*decisionTree, error) {
var splitter splitFunction
if splitMethod == "gini" {
splitter = splitGINI
} else {
return nil, errors.New("unknown splitting method")
}
tree := new(decisionTree)
tree.context = new(treeContext)
tree.context.splitter = splitter
tree.context.maxDepth = maxDepth
return tree, nil
}
func (tree decisionTree) String() string {
return tree.root.String()
}
// fit this decision tree with samples (X) and responses (y)
func (tree *decisionTree) Fit(X [][]float64, y []float64) error {
tree.context.used = make([]int, len(X))
tree.root = fitTree(X, y, tree.context)
return nil
}
// classify samples (X), return predicted labels
func (tree decisionTree) Classify(X [][]float64) []float64 {
y := make([]float64, len(X))
for i := range y {
y[i] = tree.ClassifySample(X[i])
}
return y
}
// classify single sample, return predicted label
func (tree decisionTree) ClassifySample(x []float64) float64 {
var label float64
node := tree.root
for {
if node.isLeaf() {
if node.probability > 0.5 {
label = 1
} else {
label = 0
}
break
} else {
i, val := node.splitColumn, node.splitVal
if x[i] < val {
node = node.left
} else {
node = node.right
}
}
}
return label
}
func (n treeNode) isLeaf() bool {
return n.left == nil && n.right == nil
}
func (n treeNode) String() string {
var toString func(*treeNode, string) string
toString = func(n *treeNode, padding string) string {
var s string
if n.isLeaf() {
s = fmt.Sprintf("%s(%.3f +%d)", padding, n.probability, n.size)
} else {
s = fmt.Sprintf("%s%d < %.2f (%.3f +%d)",
padding,
n.splitColumn,
n.splitVal,
n.impurity,
n.size)
if n.left != nil {
s += "\n" + toString(n.left, padding + " ")
}
if n.right != nil {
s += "\n" + toString(n.right, padding + " ")
}
}
return s
}
return toString(&n, "")
}
/*
fits a decision tree
arguments
---------
X: training samples
y: corresponding responses; should be in {0, 1}
context: training context: how deep we are, stopping cases, etc.
returns
-------
tree root node
*/
func fitTree(X [][]float64, y []float64, context *treeContext) *treeNode {
node := new(treeNode)
// calculate node's probability
labelSum := 0.0
for _, v := range y { labelSum += v }
node.probability = labelSum / float64(len(y))
node.size = len(X)
// should we split this tree further?
// 1) must not exceed maxDepth
// 2) must have both positive and negative samples
should_split := context.curDepth < context.maxDepth &&
node.probability != 0 &&
node.probability != 1
if should_split {
// find best splitting column we haven't used
result := bestSplit(X, y, context)
// did we successfully split?
couldSplit := result.splitColumn != -1
if couldSplit {
// this tree's depth, sub trees will have +1
myDepth := context.curDepth
// populate the new node's splitting point
node.impurity = result.impurity
node.splitColumn = result.splitColumn
node.splitVal = result.splitVal
// what index should we split the dataset by?
ix := splitDataset(X, y, node.splitColumn, node.splitVal)
// we can no longer use this column
used := context.used
used[node.splitColumn] = 1
// fit sub trees, fix context
context.curDepth = myDepth + 1
context.used = copySlice(used)
node.left = fitTree(X[:ix], y[:ix], context)
context.curDepth = myDepth + 1
context.used = copySlice(used)
node.right = fitTree(X[ix:], y[ix:], context)
}
}
return node
}
/*
finds the best column/value to split on given current context
NOTE: uses CPU-bound go-routines, increase runtime.GOMAXPROCS for
multicore processing and a generous speed-up
*/
func bestSplit(X [][]float64, y []float64, context *treeContext) splitResult {
nFeatures := len(X[0])
results := make(chan splitResult, nFeatures)
wg := new(sync.WaitGroup)
for i := 0; i < nFeatures; i++ {
if context.used[i] != 1 {
wg.Add(1)
go func(i int) {
defer wg.Done()
impurity, val := context.splitter(X, y, i)
results <- splitResult{impurity, i, val}
}(i)
}
}
wg.Wait()
close(results)
bestResult := splitResult{1.1, -1, 0.0}
for result := range results {
if result.impurity < bestResult.impurity {
bestResult = result
}
}
return bestResult
}
/*
Does an in-place split of the dataset & responses according to the
split column and value.
returns
-------
splitPoint: all samples with index lower than this belong in the
left sub tree.
*/
func splitDataset(X [][]float64, y []float64, splitColumn int, splitVal float64) int {
rearIndex := len(X) - 1
splitPoint := 0
for i := 0; i < rearIndex; i++ {
if X[i][splitColumn] >= splitVal {
X[i], X[rearIndex] = X[rearIndex], X[i]
X[i], X[i+1] = X[i+1], X[i]
y[i], y[rearIndex] = y[rearIndex], y[i]
y[i], y[i+1] = y[i+1], y[i]
rearIndex--
} else {
splitPoint = i
}
}
return splitPoint + 1
}
func copySlice(slice []int) []int {
newSlice := make([]int, len(slice))
copy(newSlice, slice)
return slice
} | decision_tree/decision_tree.go | 0.654564 | 0.443962 | decision_tree.go | starcoder |
package pipe
import (
"crypto/rand"
"fmt"
"pipelined.dev/pipe/internal/runner"
"pipelined.dev/signal"
)
// pipeline components
type (
// Pump is a source of samples. Pump method returns a new buffer with signal data.
// If no data is available, io.EOF should be returned. If pump cannot provide data
// to fulfill buffer, it can trim the size of the buffer to align it with actual data.
// Buffer size can only be decreased.
Pump interface {
Pump(pipeID string) (func(signal.Float64) error, signal.SampleRate, int, error)
}
// Processor defines interface for pipe processors.
// Processor should return output in the same signal buffer as input.
// It is encouraged to implement in-place processing algorithms.
// Buffer size could be changed during execution, but only decrease allowed.
// Number of channels cannot be changed.
Processor interface {
Process(pipeID string, sampleRate signal.SampleRate, numChannels int) (func(signal.Float64) error, error)
}
// Sink is an interface for final stage in audio pipeline.
// This components must not change buffer content. Line can have
// multiple sinks and this will cause race condition.
Sink interface {
Sink(pipeID string, sampleRate signal.SampleRate, numChannels int) (func(signal.Float64) error, error)
}
)
// optional interfaces
type (
// Resetter is a component that must be resetted before new run.
// Reset hook is executed when Run happens.
Resetter interface {
Reset(string) error
}
// Interrupter is a component that has custom interruption logic.
// Interrupt hook is executed when Cancel happens.
Interrupter interface {
Interrupt(string) error
}
// Flusher is a component that must be flushed in the end of execution.
// Flush hook is executed in the end of the run. It will be skipped if Reset hook has failed.
Flusher interface {
Flush(string) error
}
)
// newUID returns new unique id value.
func newUID() string {
b := make([]byte, 16)
rand.Read(b)
return fmt.Sprintf("%x-%x-%x-%x-%x\n", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
}
// Line is a sound processing sequence of components.
// It has a single pump, zero or many processors executed sequentially
// and one or many sinks executed in parallel.
type Line struct {
Pump
Processors []Processor
Sinks []Sink
}
// Processors is a helper function to use in line constructors.
func Processors(processors ...Processor) []Processor {
return processors
}
// Sinks is a helper function to use in line constructors.
func Sinks(sinks ...Sink) []Sink {
return sinks
}
// Wait for state transition or first error to occur.
func Wait(d <-chan error) error {
for err := range d {
if err != nil {
return err
}
}
return nil
}
// BindHooks of component.
func BindHooks(v interface{}) runner.Hooks {
return runner.Hooks{
Flush: flusher(v),
Interrupt: interrupter(v),
Reset: resetter(v),
}
}
// flusher checks if interface implements Flusher and if so, return it.
func flusher(i interface{}) runner.Hook {
if v, ok := i.(Flusher); ok {
return v.Flush
}
return nil
}
// flusher checks if interface implements Flusher and if so, return it.
func interrupter(i interface{}) runner.Hook {
if v, ok := i.(Interrupter); ok {
return v.Interrupt
}
return nil
}
// flusher checks if interface implements Flusher and if so, return it.
func resetter(i interface{}) runner.Hook {
if v, ok := i.(Resetter); ok {
return v.Reset
}
return nil
} | line.go | 0.759404 | 0.412885 | line.go | starcoder |
// The commit_log binary runs a simulation of the design for a commit-log
// based signer, with a simulated Kafka-like interface and a simulated
// master election package (which can be triggered to incorrectly report
// multiple masters), and with the core algorithm in the signer code.
// glog.Warning is used throughout for unexpected-but-recoverable situations,
// whereas glog.Error is used for any situation that would indicate data
// corruption.
package main
import (
"flag"
"fmt"
"math/rand"
"sync"
"time"
"github.com/golang/glog"
"github.com/google/trillian/docs/storage/commit_log/signer"
"github.com/google/trillian/docs/storage/commit_log/simelection"
"github.com/google/trillian/docs/storage/commit_log/simkafka"
)
var (
runElections = flag.Bool("run_elections", false, "Whether to use mastership election; if false, signers run in parallel")
signerCount = flag.Int("signer_count", 3, "Number of parallel signers to run")
leafInterval = flag.Duration("leaf_interval", 500*time.Millisecond, "Period between added leaves")
eventInterval = flag.Duration("event_interval", 1*time.Second, "Interval between events")
masterChangePercent = flag.Int("master_change", 20, "Percent chance of a change of master")
dualMasterPercent = flag.Int("dual_master", 8, "Percent chance of a dual master")
leafTogglePercent = flag.Int("leaf_toggle", 10, "Percent chance of toggling leaf generation")
)
var names = []string{"one", "two", "three", "four", "five", "six", "seven", "eight", "nine"}
func signerName(i int) string {
if i < len(names) {
return names[i]
}
return fmt.Sprintf("signer%d", i)
}
func increment(s string) string {
if len(s) == 0 {
return "A"
}
offset := len(s) - 1
char := s[offset]
var prefix string
if len(s) > 1 {
prefix = s[0:offset]
}
if char < 'Z' {
char++
return string(append([]byte(prefix), char))
}
return string(append([]byte(increment(prefix)), 'A'))
}
type lockedBool struct {
mu sync.RWMutex
val bool
}
func (ab *lockedBool) Get() bool {
ab.mu.RLock()
defer ab.mu.RUnlock()
return ab.val
}
func (ab *lockedBool) Set(v bool) {
ab.mu.Lock()
defer ab.mu.Unlock()
ab.val = v
}
func main() {
flag.Parse()
epochMillis := time.Now().UnixNano() / int64(time.Millisecond)
// Add leaves forever
generateLeaves := lockedBool{val: true}
go func() {
nextLeaf := "A"
for {
time.Sleep(*leafInterval)
if generateLeaves.Get() {
simkafka.Append("Leaves/<treeID>", nextLeaf)
nextLeaf = increment(nextLeaf)
}
}
}()
// Run a few signers forever
var election *simelection.Election
if *runElections {
election = &simelection.Election{}
} else {
// Mastership manipulations are irrelevant if no elections.
*masterChangePercent = 0
*dualMasterPercent = 0
}
signers := []*signer.Signer{}
for ii := 0; ii < *signerCount; ii++ {
signers = append(signers, signer.New(signerName(ii), election, epochMillis))
}
for _, s := range signers {
go func(s *signer.Signer) {
for {
time.Sleep(1 * time.Second)
s.Run()
}
}(s)
}
for {
choice := rand.Intn(100)
switch {
case choice < *masterChangePercent:
which := rand.Intn(len(signers))
who := signers[which].Name
glog.V(1).Infof("EVENT: Move mastership from %v to [%v]", election.Masters(), who)
election.SetMaster(who)
case choice < (*masterChangePercent + *dualMasterPercent):
if len(election.Masters()) > 1 {
// Already in dual-master mode
break
}
which1 := rand.Intn(len(signers))
who1 := signers[which1].Name
which2 := rand.Intn(len(signers))
who2 := signers[which2].Name
masters := []string{who1, who2}
glog.V(1).Infof("EVENT: Make multiple mastership, from %v to %v", election.Masters(), masters)
election.SetMasters(masters)
case choice < (*masterChangePercent + *dualMasterPercent + *leafTogglePercent):
val := generateLeaves.Get()
glog.V(1).Infof("EVENT: Toggle leaf generation from %v to %v", val, !val)
generateLeaves.Set(!val)
}
time.Sleep(*eventInterval)
// Show current status
output := simkafka.Status()
for _, s := range signers {
output += s.String()
}
fmt.Printf("\n%s\n", output)
}
} | docs/storage/commit_log/main.go | 0.533641 | 0.419886 | main.go | starcoder |
package box2d
import (
"math"
)
type EPAxisType uint8
// This structure is used to keep track of the best separating axis.
const (
EPAxisTypeUnknown EPAxisType = 0
EPAxisTypeEdgeA EPAxisType = 1
EPAxisTypeEdgeB EPAxisType = 2
)
type EPAxis struct {
Type EPAxisType
Index int
Separation float64
}
func MakeEPAxis() EPAxis {
return EPAxis{}
}
// This holds polygon B expressed in frame A.
type TempPolygon struct {
Vertices []Point
Normals []Point
Count int
}
// Reference face used for clipping
type ReferenceFace struct {
I1, I2 int
V1, V2 Point
Normal Point
SideNormal1 Point
SideOffset1 float64
SideNormal2 Point
SideOffset2 float64
}
func MakeReferenceFace() ReferenceFace {
return ReferenceFace{}
}
type EPColliderVertexType uint8
const (
EPColliderVertexTypeIsolated EPColliderVertexType = 0
EPColliderVertexTypeConcave EPColliderVertexType = 1
EPColliderVertexTypeConvex EPColliderVertexType = 2
)
// This class collides and edge and a polygon, taking into account edge adjacency.
type EPCollider struct {
PolygonB TempPolygon
Xf Transform
CentroidB Point
V0, V1, V2, V3 Point
Normal0, Normal1, Normal2 Point
Normal Point
Type1, Type2 uint8
LowerLimit, UpperLimit Point
Radius float64
Front bool
}
func MakeEPCollider() EPCollider {
return EPCollider{}
}
// Algorithm:
// 1. Classify v1 and v2
// 2. Classify polygon centroid as front or back
// 3. Flip normal if necessary
// 4. Initialize normal range to [-pi, pi] about face normal
// 5. Adjust normal range according to adjacent edges
// 6. Visit each separating axes, only accept axes within the range
// 7. Return if _any_ axis indicates separation
// 8. Clip
func (collider *EPCollider) Collide(manifold *Manifold, edgeA *EdgeShape, xfA Transform, polygonB *PolygonShape, xfB Transform) {
collider.Xf = TransformMulT(xfA, xfB)
collider.CentroidB = TransformPointMul(collider.Xf, polygonB.Centroid)
collider.V0 = edgeA.Vertex0
collider.V1 = edgeA.Vertex1
collider.V2 = edgeA.Vertex2
collider.V3 = edgeA.Vertex3
hasVertex0 := edgeA.HasVertex0
hasVertex3 := edgeA.HasVertex3
edge1 := PointSub(collider.V2, collider.V1)
edge1.Normalize()
collider.Normal1.Set(edge1.Y, -edge1.X)
offset1 := PointDot(collider.Normal1, PointSub(collider.CentroidB, collider.V1))
offset0 := 0.0
offset2 := 0.0
convex1 := false
convex2 := false
// Is there a preceding edge?
if hasVertex0 {
edge0 := PointSub(collider.V1, collider.V0)
edge0.Normalize()
collider.Normal0.Set(edge0.Y, -edge0.X)
convex1 = PointCross(edge0, edge1) >= 0.0
offset0 = PointDot(collider.Normal0, PointSub(collider.CentroidB, collider.V0))
}
// Is there a following edge?
if hasVertex3 {
edge2 := PointSub(collider.V3, collider.V2)
edge2.Normalize()
collider.Normal2.Set(edge2.Y, -edge2.X)
convex2 = PointCross(edge1, edge2) > 0.0
offset2 = PointDot(collider.Normal2, PointSub(collider.CentroidB, collider.V2))
}
// Determine front or back collision. Determine collision normal limits.
if hasVertex0 && hasVertex3 {
if convex1 && convex2 {
collider.Front = offset0 >= 0.0 || offset1 >= 0.0 || offset2 >= 0.0
if collider.Front {
collider.Normal = collider.Normal1
collider.LowerLimit = collider.Normal0
collider.UpperLimit = collider.Normal2
} else {
collider.Normal = collider.Normal1.OperatorNegate()
collider.LowerLimit = collider.Normal1.OperatorNegate()
collider.UpperLimit = collider.Normal1.OperatorNegate()
}
} else if convex1 {
collider.Front = offset0 >= 0.0 || (offset1 >= 0.0 && offset2 >= 0.0)
if collider.Front {
collider.Normal = collider.Normal1
collider.LowerLimit = collider.Normal0
collider.UpperLimit = collider.Normal1
} else {
collider.Normal = collider.Normal1.OperatorNegate()
collider.LowerLimit = collider.Normal2.OperatorNegate()
collider.UpperLimit = collider.Normal1.OperatorNegate()
}
} else if convex2 {
collider.Front = offset2 >= 0.0 || (offset0 >= 0.0 && offset1 >= 0.0)
if collider.Front {
collider.Normal = collider.Normal1
collider.LowerLimit = collider.Normal1
collider.UpperLimit = collider.Normal2
} else {
collider.Normal = collider.Normal1.OperatorNegate()
collider.LowerLimit = collider.Normal1.OperatorNegate()
collider.UpperLimit = collider.Normal0.OperatorNegate()
}
} else {
collider.Front = offset0 >= 0.0 && offset1 >= 0.0 && offset2 >= 0.0
if collider.Front {
collider.Normal = collider.Normal1
collider.LowerLimit = collider.Normal1
collider.UpperLimit = collider.Normal1
} else {
collider.Normal = collider.Normal1.OperatorNegate()
collider.LowerLimit = collider.Normal2.OperatorNegate()
collider.UpperLimit = collider.Normal0.OperatorNegate()
}
}
} else if hasVertex0 {
if convex1 {
collider.Front = offset0 >= 0.0 || offset1 >= 0.0
if collider.Front {
collider.Normal = collider.Normal1
collider.LowerLimit = collider.Normal0
collider.UpperLimit = collider.Normal1.OperatorNegate()
} else {
collider.Normal = collider.Normal1.OperatorNegate()
collider.LowerLimit = collider.Normal1
collider.UpperLimit = collider.Normal1.OperatorNegate()
}
} else {
collider.Front = offset0 >= 0.0 && offset1 >= 0.0
if collider.Front {
collider.Normal = collider.Normal1
collider.LowerLimit = collider.Normal1
collider.UpperLimit = collider.Normal1.OperatorNegate()
} else {
collider.Normal = collider.Normal1.OperatorNegate()
collider.LowerLimit = collider.Normal1
collider.UpperLimit = collider.Normal0.OperatorNegate()
}
}
} else if hasVertex3 {
if convex2 {
collider.Front = offset1 >= 0.0 || offset2 >= 0.0
if collider.Front {
collider.Normal = collider.Normal1
collider.LowerLimit = collider.Normal1.OperatorNegate()
collider.UpperLimit = collider.Normal2
} else {
collider.Normal = collider.Normal1.OperatorNegate()
collider.LowerLimit = collider.Normal1.OperatorNegate()
collider.UpperLimit = collider.Normal1
}
} else {
collider.Front = offset1 >= 0.0 && offset2 >= 0.0
if collider.Front {
collider.Normal = collider.Normal1
collider.LowerLimit = collider.Normal1.OperatorNegate()
collider.UpperLimit = collider.Normal1
} else {
collider.Normal = collider.Normal1.OperatorNegate()
collider.LowerLimit = collider.Normal2.OperatorNegate()
collider.UpperLimit = collider.Normal1
}
}
} else {
collider.Front = offset1 >= 0.0
if collider.Front {
collider.Normal = collider.Normal1
collider.LowerLimit = collider.Normal1.OperatorNegate()
collider.UpperLimit = collider.Normal1.OperatorNegate()
} else {
collider.Normal = collider.Normal1.OperatorNegate()
collider.LowerLimit = collider.Normal1
collider.UpperLimit = collider.Normal1
}
}
// Get polygonB in frameA
collider.PolygonB.Count = polygonB.Count
collider.PolygonB.Vertices = make([]Point, len(polygonB.Vertices))
collider.PolygonB.Normals = make([]Point, len(polygonB.Normals))
for i := 0; i < polygonB.Count; i++ {
collider.PolygonB.Vertices[i] = TransformPointMul(collider.Xf, polygonB.Vertices[i])
collider.PolygonB.Normals[i] = RotPointMul(collider.Xf.Q, polygonB.Normals[i])
}
collider.Radius = polygonB.Radius + edgeA.Radius
manifold.PointCount = 0
edgeAxis := collider.ComputeEdgeSeparation()
// If no valid normal can be found than this edge should not collide.
if edgeAxis.Type == EPAxisTypeUnknown {
return
}
if edgeAxis.Separation > collider.Radius {
return
}
polygonAxis := collider.ComputePolygonSeparation()
if polygonAxis.Type != EPAxisTypeUnknown && polygonAxis.Separation > collider.Radius {
return
}
// Use hysteresis for jitter reduction.
k_relativeTol := 0.98
k_absoluteTol := 0.001
primaryAxis := MakeEPAxis()
if polygonAxis.Type == EPAxisTypeUnknown {
primaryAxis = edgeAxis
} else if polygonAxis.Separation > k_relativeTol*edgeAxis.Separation+k_absoluteTol {
primaryAxis = polygonAxis
} else {
primaryAxis = edgeAxis
}
ie := make([]ClipVertex, 2)
rf := MakeReferenceFace()
if primaryAxis.Type == EPAxisTypeEdgeA {
manifold.Type = ManifoldTypeFaceA
// Search for the polygon normal that is most anti-parallel to the edge normal.
bestIndex := 0
bestValue := PointDot(collider.Normal, collider.PolygonB.Normals[0])
for i := 1; i < collider.PolygonB.Count; i++ {
value := PointDot(collider.Normal, collider.PolygonB.Normals[i])
if value < bestValue {
bestValue = value
bestIndex = i
}
}
i1 := bestIndex
i2 := 0
if i1+1 < collider.PolygonB.Count {
i2 = i1 + 1
}
ie[0].V = collider.PolygonB.Vertices[i1]
ie[0].Id.IndexA = 0
ie[0].Id.IndexB = uint8(i1)
ie[0].Id.TypeA = ContactFeatureTypeFace
ie[0].Id.TypeB = ContactFeatureTypeVertex
ie[1].V = collider.PolygonB.Vertices[i2]
ie[1].Id.IndexA = 0
ie[1].Id.IndexB = uint8(i2)
ie[1].Id.TypeA = ContactFeatureTypeFace
ie[1].Id.TypeB = ContactFeatureTypeVertex
if collider.Front {
rf.I1 = 0
rf.I2 = 1
rf.V1 = collider.V1
rf.V2 = collider.V2
rf.Normal = collider.Normal1
} else {
rf.I1 = 1
rf.I2 = 0
rf.V1 = collider.V2
rf.V2 = collider.V1
rf.Normal = collider.Normal1.OperatorNegate()
}
} else {
manifold.Type = ManifoldTypeFaceB
ie[0].V = collider.V1
ie[0].Id.IndexA = 0
ie[0].Id.IndexB = uint8(primaryAxis.Index)
ie[0].Id.TypeA = ContactFeatureTypeVertex
ie[0].Id.TypeB = ContactFeatureTypeFace
ie[1].V = collider.V2
ie[1].Id.IndexA = 0
ie[1].Id.IndexB = uint8(primaryAxis.Index)
ie[1].Id.TypeA = ContactFeatureTypeVertex
ie[1].Id.TypeB = ContactFeatureTypeFace
rf.I1 = primaryAxis.Index
if rf.I1+1 < collider.PolygonB.Count {
rf.I2 = rf.I1 + 1
} else {
rf.I2 = 0
}
rf.V1 = collider.PolygonB.Vertices[rf.I1]
rf.V2 = collider.PolygonB.Vertices[rf.I2]
rf.Normal = collider.PolygonB.Normals[rf.I1]
}
rf.SideNormal1.Set(rf.Normal.Y, -rf.Normal.X)
rf.SideNormal2 = rf.SideNormal1.OperatorNegate()
rf.SideOffset1 = PointDot(rf.SideNormal1, rf.V1)
rf.SideOffset2 = PointDot(rf.SideNormal2, rf.V2)
// Clip incident edge against extruded edge1 side edges.
clipPoints1 := make([]ClipVertex, 2)
clipPoints2 := make([]ClipVertex, 2)
np := 0
// Clip to box side 1
np = ClipSegmentToLine(clipPoints1, ie, rf.SideNormal1, rf.SideOffset1, rf.I1)
if np < _maxManifoldPoints {
return
}
// Clip to negative box side 1
np = ClipSegmentToLine(clipPoints2, clipPoints1, rf.SideNormal2, rf.SideOffset2, rf.I2)
if np < _maxManifoldPoints {
return
}
// Now clipPoints2 contains the clipped points.
if primaryAxis.Type == EPAxisTypeEdgeA {
manifold.LocalNormal = rf.Normal
manifold.LocalPoint = rf.V1
} else {
manifold.LocalNormal = polygonB.Normals[rf.I1]
manifold.LocalPoint = polygonB.Vertices[rf.I1]
}
pointCount := 0
for i := 0; i < _maxManifoldPoints; i++ {
separation := 0.0
separation = PointDot(rf.Normal, PointSub(clipPoints2[i].V, rf.V1))
if separation <= collider.Radius {
cp := &manifold.Points[pointCount]
if primaryAxis.Type == EPAxisTypeEdgeA {
cp.LocalPoint = TransformPointMulT(collider.Xf, clipPoints2[i].V)
cp.Id = clipPoints2[i].Id
} else {
cp.LocalPoint = clipPoints2[i].V
cp.Id.TypeA = clipPoints2[i].Id.TypeB
cp.Id.TypeB = clipPoints2[i].Id.TypeA
cp.Id.IndexA = clipPoints2[i].Id.IndexB
cp.Id.IndexB = clipPoints2[i].Id.IndexA
}
pointCount++
}
}
manifold.PointCount = pointCount
}
func (collider *EPCollider) ComputeEdgeSeparation() EPAxis {
axis := MakeEPAxis()
axis.Type = EPAxisTypeEdgeA
if collider.Front {
axis.Index = 0
} else {
axis.Index = 1
}
axis.Separation = math.MaxFloat64
for i := 0; i < collider.PolygonB.Count; i++ {
s := PointDot(collider.Normal, PointSub(collider.PolygonB.Vertices[i], collider.V1))
if s < axis.Separation {
axis.Separation = s
}
}
return axis
}
func (collider *EPCollider) ComputePolygonSeparation() EPAxis {
axis := MakeEPAxis()
axis.Type = EPAxisTypeUnknown
axis.Index = -1
axis.Separation = -math.MaxFloat64
perp := Point{X: -collider.Normal.Y, Y: collider.Normal.X}
for i := 0; i < collider.PolygonB.Count; i++ {
n := collider.PolygonB.Normals[i].OperatorNegate()
s1 := PointDot(n, PointSub(collider.PolygonB.Vertices[i], collider.V1))
s2 := PointDot(n, PointSub(collider.PolygonB.Vertices[i], collider.V2))
s := math.Min(s1, s2)
if s > collider.Radius {
// No collision
axis.Type = EPAxisTypeEdgeB
axis.Index = i
axis.Separation = s
return axis
}
// Adjacency
if PointDot(n, perp) >= 0.0 {
if PointDot(PointSub(n, collider.UpperLimit), collider.Normal) < -_angularSlop {
continue
}
} else {
if PointDot(PointSub(n, collider.LowerLimit), collider.Normal) < -_angularSlop {
continue
}
}
if s > axis.Separation {
axis.Type = EPAxisTypeEdgeB
axis.Index = i
axis.Separation = s
}
}
return axis
}
func CollideEdgeAndPolygon(manifold *Manifold, edgeA *EdgeShape, xfA Transform, polygonB *PolygonShape, xfB Transform) {
collider := MakeEPCollider()
collider.Collide(manifold, edgeA, xfA, polygonB, xfB)
} | pkg/box2d/collide_edge.go | 0.684264 | 0.629746 | collide_edge.go | starcoder |
package restruct
import (
"encoding/binary"
"reflect"
)
func fieldFromIntf(v interface{}) (field, reflect.Value) {
val := reflect.ValueOf(v)
if val.Kind() == reflect.Ptr {
val = val.Elem()
}
f := fieldFromType(val.Type())
return f, val
}
/*
Unpack reads data from a byteslice into a value.
Two types of values are directly supported here: Unpackers and structs. You can
pass them by value or by pointer, although it is an error if Restruct is
unable to set a value because it is unaddressable.
For structs, each field will be read sequentially based on a straightforward
interpretation of the type. For example, an int32 will be read as a 32-bit
signed integer, taking 4 bytes of memory. Structures and arrays are laid out
flat with no padding or metadata.
Unexported fields are ignored, except for fields named _ - those fields will
be treated purely as padding. Padding will not be preserved through packing
and unpacking.
The behavior of deserialization can be customized using struct tags. The
following struct tag syntax is supported:
`struct:"[flags...]"`
Flags are comma-separated keys. The following are available:
type A bare type name, e.g. int32 or []string. For integer
types, it is possible to specify the number of bits,
allowing the definition of bitfields, by appending a
colon followed by the number of bits. For example,
uint32:20 would specify a field that is 20 bits long.
sizeof=[Field] Specifies that the field should be treated as a count of
the number of elements in Field.
sizefrom=[Field] Specifies that the field should determine the number of
elements in itself by reading the counter in Field.
skip=[Count] Skips Count bytes before the field. You can use this to
e.g. emulate C structure alignment.
big,msb Specifies big endian byte order. When applied to
structs, this will apply to all fields under the struct.
little,lsb Specifies little endian byte order. When applied to
structs, this will apply to all fields under the struct.
variantbool Specifies that the boolean `true` value should be
encoded as -1 instead of 1.
invertedbool Specifies that the `true` and `false` encodings for
boolean should be swapped.
*/
func Unpack(data []byte, order binary.ByteOrder, v interface{}) (err error) {
defer func() {
if r := recover(); r != nil {
var ok bool
if err, ok = r.(error); !ok {
panic(err)
}
}
}()
f, val := fieldFromIntf(v)
ss := structstack{allowexpr: expressionsEnabled, buf: data}
d := decoder{structstack: ss, order: order}
d.read(f, val)
return
}
/*
SizeOf returns the binary encoded size of the given value, in bytes.
*/
func SizeOf(v interface{}) (size int, err error) {
defer func() {
if r := recover(); r != nil {
err = r.(error)
}
}()
ss := structstack{allowexpr: expressionsEnabled}
f, val := fieldFromIntf(v)
return ss.fieldbytes(f, val), nil
}
/*
BitSize returns the binary encoded size of the given value, in bits.
*/
func BitSize(v interface{}) (size int, err error) {
defer func() {
if r := recover(); r != nil {
err = r.(error)
}
}()
ss := structstack{allowexpr: expressionsEnabled}
f, val := fieldFromIntf(v)
return ss.fieldbits(f, val), nil
}
/*
Pack writes data from a datastructure into a byteslice.
Two types of values are directly supported here: Packers and structs. You can
pass them by value or by pointer.
Each structure is serialized in the same way it would be deserialized with
Unpack. See Unpack documentation for the struct tag format.
*/
func Pack(order binary.ByteOrder, v interface{}) (data []byte, err error) {
defer func() {
if r := recover(); r != nil {
data = nil
err = r.(error)
}
}()
ss := structstack{allowexpr: expressionsEnabled, buf: []byte{}}
f, val := fieldFromIntf(v)
data = make([]byte, ss.fieldbytes(f, val))
ss.buf = data
e := encoder{structstack: ss, order: order}
e.write(f, val)
return
} | sgx-tools/vendor/github.com/go-restruct/restruct/packing.go | 0.770551 | 0.499939 | packing.go | starcoder |
The logger package defines the function that initializes a Rabia Logger and several logger-related functions. A
Logger can be set as a terminal logger (output to stdout only), or file logger (output to a file in the log folder),
or a terminal & file logger.
Notes:
1. If the logger write to a file, the file pointer is also returned and needs to be managed by the user. The user needs
to call .Sync() method of the file pointer to flush writes to the disk. Otherwise, the log file's last line may be
ill-formatted. See the last a few lines of Executor.Executor() function for an example
2. If we let the logger outputs to a file, the file's name is a specific prefix based on some current configurations
in Conf. This strategy helps us organize logs that are generated by different benchmarking tests.
3. We choose the package zerolog logger for Rabia version 4 because it offers better visual effects for command-line
logs. Some of our developers, based outside the States, constantly have trouble installing zap, our logging module
for Rabia version 3.
4. Each entity (e.g., a client, a proxy, or a network layer) can have one or more loggers by having loggers of
different subId fields.
*/
package logger
import (
"fmt"
"github.com/rs/zerolog"
"os"
"path"
"rabia/internal/config"
)
/*
Generates the log file path for a to-file logger, the log locates at the Rabia's "logs" folder
component: the name of the Goroutine that owns the logger, e.g., proxy, consensus, network, client, server...
svrId: server id, usually equals to Conf.SvrId
subId: subroutine id -- e.g., a consensus instance's id
*/
func GetLogFilePathAndName(component string, svrId, subId uint32) string {
fileName := fmt.Sprintf("/logs/ns%d-nf%d-nc%d-nC%d-to%d-tt%d-cb%d-pb%d-pt%d-nb%d-nt%d--%s-%d-%d.log",
config.Conf.NServers, config.Conf.NFaulty, config.Conf.NClients, config.Conf.NConcurrency,
int64(config.Conf.ClientTimeout.Seconds()), config.Conf.ClientThinkTime, config.Conf.ClientBatchSize,
config.Conf.ProxyBatchSize, config.Conf.ProxyBatchTimeout, config.Conf.NetworkBatchSize,
config.Conf.NetworkBatchTimeout, component, svrId, subId)
return path.Join(config.Conf.ProjectFolder, fileName)
}
/*
Convert Conf.LogLevel to zerolog's log level.
If options other than "debug", "info", and "warn" are given, this function selects the debug level.
*/
func GetLogLevel() zerolog.Level {
lvl, err := zerolog.ParseLevel(config.Conf.LogLevel)
if err != nil {
panic(fmt.Sprint(config.Conf.LogLevel, ", ", err))
}
return lvl
}
/*
Initializes a zerolog Logger.
component: the name of the Goroutine that owns the logger, e.g., proxy, consensus, network, client, server...
svrId: server id, usually equals to Conf.SvrId
subId: subroutine id -- e.g., a consensus instance's id
loggerType: valid options are "terminal", "file", and "both". If other options are given, the logger logs to the
terminal as well as a file.
Note: the name of the log file (if created) consists of the terminal arguments used in initializing the server. See
comments of function GetLogFilePathAndName.
*/
func InitLogger(component string, svrId, subId uint32, loggerType string) (zerolog.Logger, *os.File) {
// Creates a log folder if necessary
if err := os.MkdirAll(path.Join(config.Conf.ProjectFolder, "logs"), os.ModePerm); err != nil {
panic(err)
}
zerolog.SetGlobalLevel(GetLogLevel())
// Generates the log file's path and name
if loggerType == "terminal" {
multi := zerolog.MultiLevelWriter(zerolog.ConsoleWriter{Out: os.Stdout})
return zerolog.New(multi), nil
} else {
logFile := GetLogFilePathAndName(component, svrId, subId)
fileWriter, err := os.Create(logFile)
if err != nil {
panic(err)
}
if loggerType == "file" {
multi := zerolog.MultiLevelWriter(fileWriter)
return zerolog.New(multi), fileWriter
} else if loggerType == "both" {
multi := zerolog.MultiLevelWriter(zerolog.ConsoleWriter{Out: os.Stdout}, fileWriter)
return zerolog.New(multi), fileWriter
} else {
panic("invalid logger type")
}
}
} | internal/logger/logger.go | 0.667798 | 0.450601 | logger.go | starcoder |
package gart
import (
"fmt"
"math"
)
// Vector2d is a immutable struct for doing 2d vector operations.
type Vector2d struct {
X float64
Y float64
}
// NewVector2d creates a new vector from the x and y components
func NewVector2d(x, y float64) Vector2d {
return Vector2d{x, y}
}
// NewVector2dFromPolar creates a new vector from a given angle and maagnitude.
func NewVector2dFromPolar(angle, magnitude float64) Vector2d {
x := math.Cos(angle) * magnitude
y := math.Sin(angle) * magnitude
return NewVector2d(x, y)
}
// Addition
func (this Vector2d) Add(other Vector2d) Vector2d {
return NewVector2d(this.X+other.X, this.Y+other.Y)
}
func (this Vector2d) AddScalar(scalar float64) Vector2d {
return NewVector2d(this.X+scalar, this.Y+scalar)
}
// Subtraction
func (this Vector2d) Sub(other Vector2d) Vector2d {
return NewVector2d(this.X-other.X, this.Y-other.Y)
}
func (this Vector2d) SubScalar(scalar float64) Vector2d {
return NewVector2d(this.X-scalar, this.Y-scalar)
}
// Multiply
func (this Vector2d) Mult(other Vector2d) Vector2d {
return NewVector2d(this.X*other.X, this.Y*other.Y)
}
func (this Vector2d) MultScalar(scalar float64) Vector2d {
return NewVector2d(this.X*scalar, this.Y*scalar)
}
// Divide
func (this Vector2d) Div(other Vector2d) Vector2d {
return NewVector2d(this.X/other.X, this.Y/other.Y)
}
func (this Vector2d) DivScalar(scalar float64) Vector2d {
return NewVector2d(this.X/scalar, this.Y/scalar)
}
// Misc Math
// TODO: This function
// func (this Vector2d) Cross(other Vector2d) (Vector2d) {
// }
// Length returns the length of the vector.
func (this Vector2d) Length() float64 {
return math.Sqrt(this.X*this.X + this.Y*this.Y)
}
// LengthSq returns the length of the vector squared. This should provide
// performance benefits when in tight loops by not doing math.Sqrt().
func (this Vector2d) LengthSq() float64 {
return this.X*this.X + this.Y*this.Y
}
// Distance returns the distance computed between two vectors.
func (this Vector2d) Distance(other Vector2d) float64 {
dx := this.X - other.X
dy := this.Y - other.Y
return math.Sqrt(dx*dx + dy*dy)
}
// Normalize returns a normalized vector.
func (this Vector2d) Normalize() Vector2d {
len := this.Length()
return this.DivScalar(len)
}
// Direction gives the angle that this vector is facing (in radians)
func (this Vector2d) Direction() float64 {
return math.Atan2(this.Y, this.X)
}
// PolarCoordinates returns the angle and magnitude of the vector.
func (this Vector2d) PolarCoordinates() (float64, float64) {
return this.Direction(), this.Length()
}
func (this Vector2d) Rotate(theta float64) Vector2d {
x := this.X*math.Cos(theta) - this.Y*math.Sin(theta)
y := this.X*math.Sin(theta) + this.Y*math.Cos(theta)
return NewVector2d(x, y)
}
// Compare
func (this Vector2d) Equals(other Vector2d) bool {
// TODO: Make this use an optional elipson value
return this.X == other.X && this.Y == other.Y
}
//Misc
func (this Vector2d) String() string {
return fmt.Sprintf("Vector2d{%.3f, %.3f}", this.X, this.Y)
} | vector.go | 0.81571 | 0.885086 | vector.go | starcoder |
package trie
// A Navigator allows a trie to be navigated, by keeping previous state
// (for example the prefix and the nodes back up the trie).
type Navigator struct {
nodes []TrieNode
prefix []byte
}
// NewNavigator creates a navigator for the provided trie.
func NewNavigator(tn TrieNode) *Navigator {
result := &Navigator{make([]TrieNode, 0, 20), make([]byte, 0, 20)}
result.nodes = append(result.nodes, tn)
return result
}
// Iterate through the trie, sending all valid words along the channel.
func (n *Navigator) allValidWords(words chan string) {
if !n.IsPrefix() {
return
}
if n.IsWord() {
words <- n.Word()
}
for c := byte('A'); c <= byte('Z'); c++ {
n.Push(c)
n.allValidWords(words)
n.Pop()
}
}
// ValidWordsChan returns a channel that recieves all words from the current
// place in the trie. The navigator should not be used until all words have been
// read from the channel.
func (n *Navigator) ValidWordsChan() chan string {
c := make(chan string)
go func() {
n.allValidWords(c)
close(c)
}()
return c
}
// All returns all words that start from the current place in the trie.
func (n *Navigator) All(result []string) []string {
for w := range n.ValidWordsChan() {
result = append(result, w)
}
return result
}
// Count returns the number of words at the current location.
func (n *Navigator) Count() int {
result := 0
for _ = range n.ValidWordsChan() {
result++
}
return result
}
// IsWord reports whether we are at the end of a word.
func (n *Navigator) IsWord() bool {
return n.lastNode().IsWord()
}
func (n *Navigator) lastNode() TrieNode {
return n.nodes[len(n.nodes)-1]
}
// IsPrefix reports whether the there are any words with the
// prefix represented by the current postion in the trie.
func (n *Navigator) IsPrefix() bool {
return n.lastNode().IsPrefix()
}
// Word returns the current prefix.
func (n *Navigator) Word() string {
return string(n.prefix)
}
// Push descends the trie along the given character.
func (n *Navigator) Push(c byte) {
n.prefix = append(n.prefix, c)
n.nodes = append(n.nodes, n.lastNode().Follow(c))
}
// PushString descends the trie, by pushing the characters
// of the given string.
func (n *Navigator) PushString(s string) {
for _, c := range s {
n.Push(byte(c))
}
}
// Pop removes the last character of the current prefix, returning back
// up the trie.
func (n *Navigator) Pop() {
n.prefix = n.prefix[:len(n.prefix)-1]
n.nodes = n.nodes[:len(n.nodes)-1]
}
// Reset returns to the root of the trie.
func (n *Navigator) Reset() {
n.prefix = n.prefix[:0]
n.nodes = n.nodes[:1]
} | navigator.go | 0.833697 | 0.5047 | navigator.go | starcoder |
package randomock
// Policy configures the policy that RandoMock objects adhere to when they are called more times than they have return
// values.
type Policy int
const (
// ErrorOutPolicy is the default policy and causes RandoMock to panic when called more times
// than there are return values for a specific key. This only applies if there are more than one return value.
ErrorOutPolicy Policy = iota
// WrapAroundPolicy causes the return values to repeat from the beginning after being exhausted.
WrapAroundPolicy
// RepeatLastPolicy causes the return values to repeat the last return value after being exhausted.
RepeatLastPolicy
)
var defaultPolicy = ErrorOutPolicy
// OutOfBound is the structure returned when an ErrorOutPolicy panics.
type OutOfBound struct {
}
func (err OutOfBound) Error() string {
return "more calls to randomock than return values"
}
// OutOfBoundsError error is the error value returned when the policy is broken
var OutOfBoundsError = OutOfBound{}
// SetDefaultPolicy sets the default policy of all RandoMock instances going forward.
// This does not modify already existing instances.
func SetDefaultPolicy(p Policy) {
defaultPolicy = p
}
// results keeps track of return values for a specific key of RandoMock
type results struct {
values []float64
count int
policy Policy
}
func (r *results) Add(values ...float64) {
r.values = append(r.values, values...)
}
func (r *results) Get() float64 {
if len(r.values) > 1 {
if r.count >= len(r.values) {
switch r.policy {
case WrapAroundPolicy:
r.count = 0
case RepeatLastPolicy:
return r.values[len(r.values)-1]
case ErrorOutPolicy:
panic(OutOfBoundsError)
}
}
v := r.values[r.count]
r.count++
return v
}
return r.values[0]
}
// RandoMock is a mock of Random which keeps track of the return values for each key. Use this in your tests.
type RandoMock struct {
ret map[string]*results
policy Policy
}
// NewRandoMock creates a new RandoMock instance with the default policy.
func NewRandoMock() *RandoMock {
r := &RandoMock{ret: make(map[string]*results), policy: defaultPolicy}
return r
}
// Add adds return values to a specific key.
func (r *RandoMock) Add(key string, values ...float64) *RandoMock {
res, ok := r.ret[key]
if !ok {
res = &results{policy: r.policy}
r.ret[key] = res
}
res.Add(values...)
return r
}
// SetPolicy sets the policy of the specific key.
func (r *RandoMock) SetPolicy(key string, p Policy) *RandoMock {
r.ret[key].policy = p
return r
}
// Policy returns the policy of a specific key
func (r *RandoMock) Policy(key string) Policy {
return r.ret[key].policy
}
// rand functions:
// ExpFloat64 is a mocked version of rand.ExpFloat64
func (r *RandoMock) ExpFloat64(key string) float64 {
return r.ret[key].Get()
}
// Float32 is a mocked version of rand.Float32
func (r *RandoMock) Float32(key string) float32 {
return float32(r.ret[key].Get())
}
// Float64 is a mocked version of rand.Float64
func (r *RandoMock) Float64(key string) float64 {
return r.ret[key].Get()
}
// Int is a mocked version of rand.Int
func (r *RandoMock) Int(key string) int {
return int(r.ret[key].Get())
}
// Int31 is a mocked version of rand.Int31
func (r *RandoMock) Int31(key string) int32 {
return int32(r.ret[key].Get())
}
// Int31n is a mocked version of rand.Int31n
func (r *RandoMock) Int31n(key string, n int32) int32 {
return int32(r.ret[key].Get())
}
// Int63 is a mocked version of rand.Int63
func (r *RandoMock) Int63(key string) int64 {
return int64(r.ret[key].Get())
}
// Int63n is a mocked version of rand.Int63n
func (r *RandoMock) Int63n(key string, n int64) int64 {
return int64(r.ret[key].Get())
}
// Intn is a mocked version of rand.Intn
func (r *RandoMock) Intn(key string, n int) int {
return int(r.ret[key].Get())
}
// NormFloat64 is a mocked version of rand.NormFloat64
func (r *RandoMock) NormFloat64(key string) float64 {
return r.ret[key].Get()
}
// Uint32 is a mocked version of rand.Uint32
func (r *RandoMock) Uint32(key string) uint32 {
return uint32(r.ret[key].Get())
}
// Uint64 is a mocked version of rand.Uint64
func (r *RandoMock) Uint64(key string) uint64 {
return uint64(r.ret[key].Get())
} | mock.go | 0.801392 | 0.457924 | mock.go | starcoder |
package evalprocessor
import "github.com/vjeantet/bitfan/processors/doc"
func (p *processor) Doc() *doc.Processor {
return &doc.Processor{
Name: "evalprocessor",
ImportPath: "github.com/vjeantet/bitfan/processors/filter-eval",
Doc: "Modify or add event's field with the result of\n\n* an expression (math or compare)\n* a go template\n\n**Operators and types supported in expression :**\n\n* Modifiers: `+` `-` `/` `*` `&` `|` `^` `**` `%` `>>` `<<`\n* Comparators: `>` `>=` `<` `<=` `==` `!=` `=~` `!~`\n* Logical ops: `||` `&&`\n* Numeric constants, as 64-bit floating point (`12345.678`)\n* String constants (single quotes: `'foobar'`)\n* Date constants (single quotes, using any permutation of RFC3339, ISO8601, ruby date, or unix date; date parsing is automatically tried with any string constant)\n* Boolean constants: `true` `false`\n* Parenthesis to control order of evaluation `(` `)`\n* Arrays (anything separated by `,` within parenthesis: `(1, 2, 'foo')`)\n* Prefixes: `!` `-` `~`\n* Ternary conditional: `?` `:`\n* Null coalescence: `??`",
DocShort: "Evaluate expression",
Options: &doc.ProcessorOptions{
Doc: "",
Options: []*doc.ProcessorOption{
&doc.ProcessorOption{
Name: "processors.CommonOptions",
Alias: ",squash",
Doc: "",
Required: false,
Type: "processors.CommonOptions",
DefaultValue: nil,
PossibleValues: []string{},
ExampleLS: "",
},
&doc.ProcessorOption{
Name: "Expressions",
Alias: "expressions",
Doc: "list of field to set with expression's result",
Required: false,
Type: "hash",
DefaultValue: nil,
PossibleValues: []string{},
ExampleLS: "expressions => { \"usage\" => \"[usage] * 100\" }",
},
&doc.ProcessorOption{
Name: "Templates",
Alias: "templates",
Doc: "list of field to set with a go template location",
Required: false,
Type: "hash",
DefaultValue: nil,
PossibleValues: []string{},
ExampleLS: "templates => { \"count\" => \"{{len .data}}\", \"mail\"=>\"mytemplate.tpl\" }",
},
&doc.ProcessorOption{
Name: "Var",
Alias: "var",
Doc: "You can set variable to be used in template by using ${var}.\neach reference will be replaced by the value of the variable found in Template's content\nThe replacement is case-sensitive.",
Required: false,
Type: "hash",
DefaultValue: nil,
PossibleValues: []string{},
ExampleLS: "var => {\"hostname\"=>\"myhost\",\"varname\"=>\"varvalue\"}",
},
},
},
Ports: []*doc.ProcessorPort{
&doc.ProcessorPort{
Default: true,
Name: "PORT_SUCCESS",
Number: 0,
Doc: "",
},
},
}
} | processors/filter-eval/docdoc.go | 0.665628 | 0.453988 | docdoc.go | starcoder |
package list
const takeDropFunctions = `
//-------------------------------------------------------------------------------------------------
// Take returns a new {{.TName}}List containing the leading n elements of the source list.
// If n is greater than the size of the list, the whole list is returned.
func (list {{.TName}}List) Take(n int) {{.TName}}List {
if n > len(list) {
return list
} else {
return list[0:n]
}
}
// Drop returns a new {{.TName}}List without the leading n elements of the source list.
// If n is greater than the size of the list, the whole list is returned.
func (list {{.TName}}List) Drop(n int) {{.TName}}List {
l := len(list)
if n > l {
return list[l:]
} else {
return list[n:]
}
}
// TakeLast returns a new {{.TName}}List containing the trailing n elements of the source list.
// If n is greater than the size of the list, the whole list is returned.
func (list {{.TName}}List) TakeLast(n int) {{.TName}}List {
l := len(list)
if n > l {
return list
} else {
return list[l-n:]
}
}
// DropLast returns a new {{.TName}}List without the trailing n elements of the source list.
// If n is greater than the size of the list, the whole list is returned.
func (list {{.TName}}List) DropLast(n int) {{.TName}}List {
l := len(list)
if n > l {
return list[l:]
} else {
return list[0:l-n]
}
}
// TakeWhile returns a new {{.TName}}List containing the leading elements of the source list. Whilst the
// predicate p returns true, elements are added to the result. Once predicate p returns false, all remaining
// elemense are excluded.
func (list {{.TName}}List) TakeWhile(p func({{.PName}}) bool) (result {{.TName}}List) {
for _, v := range list {
if p(v) {
result = append(result, v)
} else {
return
}
}
return
}
// DropWhile returns a new {{.TName}}List containing the trailing elements of the source list. Whilst the
// predicate p returns true, elements are excluded from the result. Once predicate p returns false, all remaining
// elemense are added.
func (list {{.TName}}List) DropWhile(p func({{.PName}}) bool) (result {{.TName}}List) {
adding := false
for _, v := range list {
if !p(v) || adding {
adding = true
result = append(result, v)
}
}
return
}
` | internal/list/takedrop.go | 0.797872 | 0.467089 | takedrop.go | starcoder |
package fitness
// A CacheFunction represents a fitness function that caches data for efficiency.
type CacheFunction interface {
Function
SetBase(function CacheFunction)
Cache() []CacheData
SetCache([]CacheData)
}
type CacheData interface {
Equals(data CacheData) bool
Hash() uint64
Data() float64
CachedHash() uint32
SetCachedHash(uint322 uint32)
}
// TriangleCacheData stores the triangles vertices and its fitness, and is used to cache calculations.
type TriangleCacheData struct {
aX, aY int16
bX, bY int16
cX, cY int16
fitness float64
hash uint32
}
func (t TriangleCacheData) Data() float64 {
return t.fitness
}
// Equals returns if the TriangleCacheData is equal to another.
func (t TriangleCacheData) Equals(other CacheData) bool {
tri := other.(*TriangleCacheData)
return t.aX == tri.aX && t.aY == tri.aY &&
t.bX == tri.bX && t.bY == tri.bY &&
t.cX == tri.cX && t.cY == tri.cY
}
// Hash calculates the hash code of a TriangleCacheData.
func (t TriangleCacheData) Hash() uint64 {
x := int(t.aX) + int(t.bX) + int(t.cX)
y := int(t.aY) + int(t.bY) + int(t.cY)
return uint64((97+x)*97 + y)
}
func (t TriangleCacheData) CachedHash() uint32 {
return t.hash
}
func (t *TriangleCacheData) SetCachedHash(hash uint32) {
t.hash = hash
}
type PolygonCacheData struct {
coords []int16
fitness float64
hash uint32
}
func (p PolygonCacheData) CachedHash() uint32 {
return p.hash
}
func (p *PolygonCacheData) SetCachedHash(hash uint32) {
p.hash = hash
}
func (p PolygonCacheData) Data() float64 {
return p.fitness
}
// Equals returns if the TriangleCacheData is equal to another.
func (p PolygonCacheData) Equals(other CacheData) bool {
poly := other.(*PolygonCacheData)
if len(poly.coords) != len(p.coords) {
return false
}
for i, v := range poly.coords {
if v != p.coords[i] {
return false
}
}
return true
}
// Hash calculates the hash code of a TriangleCacheData.
func (p PolygonCacheData) Hash() uint64 {
hash := uint64(1)
for i := 0; i < len(p.coords); i++ {
hash = hash*97 + uint64(p.coords[i])
}
return hash
} | fitness/cache.go | 0.882668 | 0.616965 | cache.go | starcoder |
package option
import (
"fmt"
"strconv"
)
// Option is a type that allows optional values. It is similar to a Sum type
// and can either be Some(value) or None.
type Option[T any] struct {
value *some[T]
}
func (o Option[T]) String() string {
if o.IsNone() {
return "None"
}
val := printv(o.Value())
return fmt.Sprintf("Some(%v)", val)
}
type some[T any] struct {
value T
}
// Some creates an option with a value.
func Some[T any](value T) Option[T] {
return Option[T]{value: &some[T]{value: value}}
}
// None creates an option with no value.
func None[T any]() Option[T] {
return Option[T]{value: nil}
}
// IsSome tests if the option contains a value.
func (o Option[T]) IsSome() bool {
return o.value != nil
}
// IsNone tests whether the option does not contain a value.
func (o Option[T]) IsNone() bool {
return o.value == nil
}
// Value returns the value in the option.
// If the option is None, it returns the zero value for the type.
func (o Option[T]) Value() T {
if o.IsNone() {
r := new(T)
return *r
}
return o.value.value
}
// Bind applies f to input if input.IsSome() and otherwise returns None.
func Bind[T, R any](f func(T) Option[R], input Option[T]) Option[R] {
if input.IsNone() {
return None[R]()
}
return f(input.Value())
}
// Filter returns o if the value in o matches the predicate.
// Otherwise, it returns None.
func Filter[T any](predicate func(T) bool, o Option[T]) Option[T] {
if Exists(predicate, o) {
return o
}
return None[T]()
}
// Flatten takes a nested option and returns the inner option.
func Flatten[T any](oo Option[Option[T]]) Option[T] {
if oo.IsNone() {
return None[T]()
}
return oo.Value()
}
// Map applies f to the value of o and returns the result as an Option.
// If o is None, it returns None.
func Map[T, R any](f func(T) R, o Option[T]) Option[R] {
if o.IsNone() {
return None[R]()
}
return Some(f(o.Value()))
}
// Map2 applies f to the values in both o1 and o2 as the first and second parameters and returns the result as an Option.
// If either option is None, it returns None.
func Map2[T1, T2, R any](f func(T1, T2) R, o1 Option[T1], o2 Option[T2]) Option[R] {
if o1.IsNone() || o2.IsNone() {
return None[R]()
}
return Some(f(o1.Value(), o2.Value()))
}
// Map3 applies f to the values in o1, o2, and o3 as the first, second, and third parameters and returns the result as an Option.
// If any of the options are None, it returns None.
func Map3[T1, T2, T3, R any](f func(T1, T2, T3) R, o1 Option[T1], o2 Option[T2], o3 Option[T3]) Option[R] {
if o1.IsNone() || o2.IsNone() || o3.IsNone() {
return None[R]()
}
return Some(f(o1.Value(), o2.Value(), o3.Value()))
}
// OfNullable returns None if value is nil.
// Otherwise it returns Some of the value (after dereferencing the pointer).
func OfNullable[T any](value *T) Option[T] {
if value == nil {
return None[T]()
}
return Some(*value)
}
// Lift converts the function f that returns a value and an error
// to a function that returns an Option.
func Lift[T any](f func() (T, error)) func() Option[T] {
return func() Option[T] {
r, err := f()
if err != nil {
return None[T]()
}
return Some(r)
}
}
// Lift1 converts the function f that accepts a single input and returns a value and an error
// to a function that accepts a single input and returns an Option.
func Lift1[TInput, T any](f func(TInput) (T, error)) func(TInput) Option[T] {
return func(input TInput) Option[T] {
r, err := f(input)
if err != nil {
return None[T]()
}
return Some(r)
}
}
// Lift2 converts the function f that accepts two inputs and returns a value and an error
// to a function that accepts two inputs and returns an Option.
func Lift2[TInput1, TInput2, T any](f func(TInput1, TInput2) (T, error)) func(TInput1, TInput2) Option[T] {
return func(input1 TInput1, input2 TInput2) Option[T] {
r, err := f(input1, input2)
if err != nil {
return None[T]()
}
return Some(r)
}
}
// helper function for quoting the internal values
func printv(v any) string {
switch o := v.(type) {
case string:
return strconv.Quote(o)
case rune:
return strconv.QuoteRune(o)
default:
return fmt.Sprint(o)
}
} | option/option.go | 0.739893 | 0.506225 | option.go | starcoder |
package goja
// Ported from Rhino (https://github.com/mozilla/rhino/blob/master/src/org/mozilla/javascript/DToA.java)
import (
"bytes"
"fmt"
"math"
"math/big"
"strconv"
)
const (
frac_mask = 0xfffff
exp_shift = 20
exp_msk1 = 0x100000
exp_shiftL = 52
exp_mask_shifted = 0x7ff
frac_maskL = 0xfffffffffffff
exp_msk1L = 0x10000000000000
exp_shift1 = 20
exp_mask = 0x7ff00000
bias = 1023
p = 53
bndry_mask = 0xfffff
log2P = 1
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
)
func lo0bits(x uint32) (k uint32) {
if (x & 7) != 0 {
if (x & 1) != 0 {
return 0
}
if (x & 2) != 0 {
return 1
}
return 2
}
if (x & 0xffff) == 0 {
k = 16
x >>= 16
}
if (x & 0xff) == 0 {
k += 8
x >>= 8
}
if (x & 0xf) == 0 {
k += 4
x >>= 4
}
if (x & 0x3) == 0 {
k += 2
x >>= 2
}
if (x & 1) == 0 {
k++
x >>= 1
if (x & 1) == 0 {
return 32
}
}
return
}
func hi0bits(x uint32) (k uint32) {
if (x & 0xffff0000) == 0 {
k = 16
x <<= 16
}
if (x & 0xff000000) == 0 {
k += 8
x <<= 8
}
if (x & 0xf0000000) == 0 {
k += 4
x <<= 4
}
if (x & 0xc0000000) == 0 {
k += 2
x <<= 2
}
if (x & 0x80000000) == 0 {
k++
if (x & 0x40000000) == 0 {
return 32
}
}
return
}
func stuffBits(bits []byte, offset int, val uint32) {
bits[offset] = byte(val >> 24)
bits[offset+1] = byte(val >> 16)
bits[offset+2] = byte(val >> 8)
bits[offset+3] = byte(val)
}
func d2b(d float64) (b *big.Int, e int32, bits uint32) {
dBits := math.Float64bits(d)
d0 := uint32(dBits >> 32)
d1 := uint32(dBits)
z := d0 & frac_mask
d0 &= 0x7fffffff /* clear sign bit, which we ignore */
var de, k, i uint32
var dbl_bits []byte
if de = (d0 >> exp_shift); de != 0 {
z |= exp_msk1
}
y := d1
if y != 0 {
dbl_bits = make([]byte, 8)
k = lo0bits(y)
y >>= k
if k != 0 {
stuffBits(dbl_bits, 4, y|z<<(32-k))
z >>= k
} else {
stuffBits(dbl_bits, 4, y)
}
stuffBits(dbl_bits, 0, z)
if z != 0 {
i = 2
} else {
i = 1
}
} else {
dbl_bits = make([]byte, 4)
k = lo0bits(z)
z >>= k
stuffBits(dbl_bits, 0, z)
k += 32
i = 1
}
if de != 0 {
e = int32(de - bias - (p - 1) + k)
bits = p - k
} else {
e = int32(de - bias - (p - 1) + 1 + k)
bits = 32*i - hi0bits(z)
}
b = (&big.Int{}).SetBytes(dbl_bits)
return
}
func dtobasestr(num float64, radix int) string {
var negative bool
if num < 0 {
num = -num
negative = true
}
dfloor := math.Floor(num)
ldfloor := int64(dfloor)
var intDigits string
if dfloor == float64(ldfloor) {
if negative {
ldfloor = -ldfloor
}
intDigits = strconv.FormatInt(ldfloor, radix)
} else {
floorBits := math.Float64bits(num)
exp := int(floorBits>>exp_shiftL) & exp_mask_shifted
var mantissa int64
if exp == 0 {
mantissa = int64((floorBits & frac_maskL) << 1)
} else {
mantissa = int64((floorBits & frac_maskL) | exp_msk1L)
}
if negative {
mantissa = -mantissa
}
exp -= 1075
x := big.NewInt(mantissa)
if exp > 0 {
x.Lsh(x, uint(exp))
} else if exp < 0 {
x.Rsh(x, uint(-exp))
}
intDigits = x.Text(radix)
}
if num == dfloor {
// No fraction part
return intDigits
} else {
/* We have a fraction. */
var buffer bytes.Buffer
buffer.WriteString(intDigits)
buffer.WriteByte('.')
df := num - dfloor
dBits := math.Float64bits(num)
word0 := uint32(dBits >> 32)
word1 := uint32(dBits)
b, e, _ := d2b(df)
// JS_ASSERT(e < 0);
/* At this point df = b * 2^e. e must be less than zero because 0 < df < 1. */
s2 := -int32((word0 >> exp_shift1) & (exp_mask >> exp_shift1))
if s2 == 0 {
s2 = -1
}
s2 += bias + p
/* 1/2^s2 = (nextDouble(d) - d)/2 */
// JS_ASSERT(-s2 < e);
if -s2 >= e {
panic(fmt.Errorf("-s2 >= e: %d, %d", -s2, e))
}
mlo := big.NewInt(1)
mhi := mlo
if (word1 == 0) && ((word0 & bndry_mask) == 0) && ((word0 & (exp_mask & exp_mask << 1)) != 0) {
/* The special case. Here we want to be within a quarter of the last input
significant digit instead of one half of it when the output string's value is less than d. */
s2 += log2P
mhi = big.NewInt(1 << log2P)
}
b.Lsh(b, uint(e+s2))
s := big.NewInt(1)
s.Lsh(s, uint(s2))
/* At this point we have the following:
* s = 2^s2;
* 1 > df = b/2^s2 > 0;
* (d - prevDouble(d))/2 = mlo/2^s2;
* (nextDouble(d) - d)/2 = mhi/2^s2. */
bigBase := big.NewInt(int64(radix))
done := false
m := &big.Int{}
delta := &big.Int{}
for !done {
b.Mul(b, bigBase)
b.DivMod(b, s, m)
digit := byte(b.Int64())
b, m = m, b
mlo.Mul(mlo, bigBase)
if mlo != mhi {
mhi.Mul(mhi, bigBase)
}
/* Do we yet have the shortest string that will round to d? */
j := b.Cmp(mlo)
/* j is b/2^s2 compared with mlo/2^s2. */
delta.Sub(s, mhi)
var j1 int
if delta.Sign() <= 0 {
j1 = 1
} else {
j1 = b.Cmp(delta)
}
/* j1 is b/2^s2 compared with 1 - mhi/2^s2. */
if j1 == 0 && (word1&1) == 0 {
if j > 0 {
digit++
}
done = true
} else if j < 0 || (j == 0 && ((word1 & 1) == 0)) {
if j1 > 0 {
/* Either dig or dig+1 would work here as the least significant digit.
Use whichever would produce an output value closer to d. */
b.Lsh(b, 1)
j1 = b.Cmp(s)
if j1 > 0 { /* The even test (|| (j1 == 0 && (digit & 1))) is not here because it messes up odd base output such as 3.5 in base 3. */
digit++
}
}
done = true
} else if j1 > 0 {
digit++
done = true
}
// JS_ASSERT(digit < (uint32)base);
buffer.WriteByte(digits[digit])
}
return buffer.String()
}
} | vendor/github.com/dop251/goja/dtoa.go | 0.605682 | 0.416144 | dtoa.go | starcoder |
// Package conti provides business logic of trial account calculation
package conti
import (
"fmt"
"math"
)
const decimals float64 = 10000
// postTransactionsToAccounts calculates the balance (for balance categories and the cumulative
// sums for P/L categories) per category on the basis of records of transactions.
func postTransactionsToAccounts(catsIn []Categories, records []Transactions) (catsOut []Categories, err error) {
var (
sVal map[string]float64
cVal map[string]float64
pVal *map[string]float64
cSec map[string]string
Result Report
)
// Create a map of category-value pairs
cVal = catVal(catsIn)
// Create a map of category-value pairs with opening balance values
sVal = staVal(catsIn)
// Create a pointer to this map, i.e. a pointer to category's value
pVal = &cVal
// Create a map of category-section pairs to detect category's section
cSec = catSec(catsIn)
// Calculate a starting balance per section
for j0 := range catsIn {
Result.startBal(sVal, cSec, catsIn[j0].Cat)
}
// Calculate a starting profit (loss)
// Note: this result doesn't reflect the actual starting P/L value,
// as the retained profit or the loss carried forward is in
// 'Equity, P&L Account' with a user-defined key
Result.Profit.Profit.Sta = Result.Profit.Revenue.Sta - Result.Profit.Expense.Sta
// Allocate space for a slice of categories
catsOut = make([]Categories, len(catsIn))
for i := range records {
// Update a Source-account value. Set value through the pointer to
// category's value.
// Note: Some sections ('Revenues', 'Liabilities' and 'Equity') require
// special treatment. The applied sign depends on the type (section) of the source
// category.
if specialSection(cSec, records[i].Source) {
*pVal = addBal(cVal, records[i].Source, +records[i].Amount)
} else {
*pVal = addBal(cVal, records[i].Source, -records[i].Amount)
}
// Update a Purpose-account value. Set value through the pointer to
// category's value.
// Note: some sections ('Revenues', 'Liabilities' and 'Equity') require
// special treatment. The applied sign depends on the type (section) of the source
// category.
if specialSection(cSec, records[i].Purpose) {
*pVal = addBal(cVal, records[i].Purpose, -records[i].Amount)
} else {
*pVal = addBal(cVal, records[i].Purpose, +records[i].Amount)
}
}
// Assign values to each element of the slice of categories
for j := range catsIn {
catsOut[j].Cat = catsIn[j].Cat
catsOut[j].Sect = catsIn[j].Sect
catsOut[j].Name = catsIn[j].Name
catsOut[j].Bal.Sta = catsIn[j].Bal.Sta
catsOut[j].Bal.Dif = cVal[catsIn[j].Cat]
catsOut[j].Bal.End = catsOut[j].Bal.Sta + catsOut[j].Bal.Dif
// Round the balance change and the ending balance to the required number
// of decimals
catsOut[j].Bal.Dif = math.Round(catsOut[j].Bal.Dif*decimals) / decimals
catsOut[j].Bal.End = math.Round(catsOut[j].Bal.End*decimals) / decimals
// Calculate a difference between the balance at the end and the balance at
// the beginning per section
Result.changeBal(cSec, catsOut[j].Cat, catsOut[j].Bal.Dif)
}
Result.Profit.Profit.End = Result.Profit.Revenue.End - Result.Profit.Expense.End
// Calculate an ending balance per section
Result.finalBal()
Result.roundBal()
fmt.Println("Balance, Assets: ", Result.Balance.Assets)
fmt.Println("Balance, Liabilities: ", Result.Balance.Liabls)
fmt.Println("Balance, Equity: ", Result.Balance.Equity)
fmt.Println("Balance, Retained Result:", Result.Balance.Retained)
fmt.Println("P&L, Revenues:", Result.Profit.Revenue)
fmt.Println("P&L, Expenses:", Result.Profit.Expense)
fmt.Println("P&L, Profit: ", Result.Profit.Profit)
return
} | conti/calc.go | 0.705379 | 0.52007 | calc.go | starcoder |
package bst
import (
"math"
"sync"
)
// NewBSTree returns an empty binary search tree.
func NewBSTree() *BSTree {
return &BSTree{
RWMutex: sync.RWMutex{},
root: nil,
}
}
// Root returns the payload of the root node of the tree.
func (t *BSTree) Root() interface{} {
t.RLock()
defer t.RUnlock()
if t.root == nil {
return nil
}
return t.root.payload
}
// Height returns the height (max depth) of the tree. Returns -1 if the tree
// has no nodes. A (rooted) tree with only a node (the root) has a height of
// zero.
func (t *BSTree) Height() int {
t.RLock()
defer t.RUnlock()
return int(height(t.root))
}
// Upsert inserts or updates an item. Runs in O(lg n) time on average.
func (t *BSTree) Upsert(key int64, payload interface{}) {
t.Lock()
defer t.Unlock()
if existing := search(t.root, key); existing != nil {
existing.payload = payload
return
}
var (
parent *node
x = t.root
newNode = &node{
key: key,
payload: payload,
}
)
for x != nil {
parent = x
if newNode.key < parent.key {
x = x.left
} else {
x = x.right
}
}
newNode.parent = parent
switch {
case parent == nil:
t.root = newNode
case newNode.key < parent.key:
parent.left = newNode
default:
parent.right = newNode
}
}
// Search searches for a node based on its key and returns the payload.
func (t *BSTree) Search(key int64) interface{} {
t.RLock()
defer t.RUnlock()
if t.root == nil {
return nil
}
n := search(t.root, key)
if n == nil {
return nil
}
return n.payload
}
// Min returns the payload of the Node with the lowest key, or nil.
func (t *BSTree) Min() interface{} {
t.RLock()
defer t.RUnlock()
n := min(t.root)
if n == nil {
return nil
}
return n.payload
}
// Max returns the payload of the Node with the highest key, or nil.
func (t *BSTree) Max() interface{} {
t.RLock()
defer t.RUnlock()
n := max(t.root)
if n == nil {
return nil
}
return n.payload
}
// Successor returns the next highest neighbour (key-wise) of the Node with the
// passed key.
func (t *BSTree) Successor(key int64) interface{} {
t.RLock()
defer t.RUnlock()
n := successor(search(t.root, key))
if n == nil {
return nil
}
return n.payload
}
// Delete deletes a node with a given key. This runs in O(h) time with h being
// the height of the tree.
func (t *BSTree) Delete(key int64) {
t.Lock()
defer t.Unlock()
if n := search(t.root, key); n != nil {
t.delete(n)
}
}
func (t *BSTree) delete(node *node) {
switch {
// If the node has no left subtree, replace it with its right subtree.
case node.left == nil:
t.transplant(node, node.right)
// If the node has a left subtree but not a right one, replace it with
// its right subtree.
case node.right == nil:
t.transplant(node, node.left)
// Node has two children.
default:
// The node's successor must be the smallest key in the right subtree,
// which has no left child.
succ := min(node.right)
// If the successor is the node's right child, the successor doesn't
// have a left subtree. We replace the node with its right child (the
// successor) and leave the latter's right subtree in tact.
if succ.parent != node {
t.transplant(succ, succ.right)
succ.right = node.right
succ.right.parent = succ
}
// If the successor is the node's right child, replace the parent of
// the node by its successor, attaching node's left child.
t.transplant(node, succ)
succ.left = node.left
succ.left.parent = node
}
}
func height(node *node) float64 {
if node == nil {
return -1
}
return 1 + math.Max(height(node.left), height(node.right))
}
func successor(node *node) *node {
if node == nil {
return nil
}
if node.right != nil {
return min(node.right)
}
parent := node.parent
for parent != nil && node == parent.right {
node = parent
parent = node.parent
}
return parent
}
func max(node *node) *node {
for node != nil && node.right != nil {
node = node.right
}
return node
}
func min(node *node) *node {
for node != nil && node.left != nil {
node = node.left
}
return node
}
func search(node *node, key int64) *node {
if node == nil || node.key == key {
return node
}
for node != nil && node.key != key {
if key > node.key {
node = node.right
} else {
node = node.left
}
}
return node
}
// transplant replaces one subtree of a node as a child of its parent, with
// another subtree.
func (t *BSTree) transplant(nodeA, nodeB *node) {
if nodeA == nil {
return
}
switch {
// If nodeA is the root, nodeB will be root now.
case nodeA.parent == nil:
t.root = nodeB
// If nodeA is a left-child, replace with nodeB.
case nodeA == nodeA.parent.left:
nodeA.parent.left = nodeB
// If nodeA is a right-child, replace with nodeB.
default:
nodeA.parent.right = nodeB
}
// Update parent relationship.
if nodeB != nil {
nodeB.parent = nodeA.parent
}
} | bst/tree.go | 0.789193 | 0.538559 | tree.go | starcoder |
package zsample
import (
"time"
)
func NewFloatSampler(size int, frequency time.Duration) *FloatSampler {
return &FloatSampler{
size: size,
frequency: frequency,
samples: make([]float64, size),
times: make([]time.Time, size),
}
}
type FloatSampler struct {
size int
frequency time.Duration
index int
samples []float64
times []time.Time
}
func (s *FloatSampler) Add(val float64) (added bool) {
n := time.Now()
if n.Before(s.times[s.index].Add(s.frequency)) {
return false
}
s.samples[s.index] = val
s.times[s.index] = n
s.index++
if s.index == s.size {
s.index = 0
}
return true
}
func (s FloatSampler) ExtrapolateValueLinearNano(time time.Time) float64 {
prevI := s.index - 1
if prevI < 0 {
prevI += s.size
}
return s.samples[s.index] + float64(time.UnixNano())*(s.samples[s.index]-s.samples[prevI])/float64(s.times[s.index].UnixNano()-s.times[prevI].UnixNano())
}
func (s FloatSampler) ExtrapolateTimeLinearNano(val float64) time.Time {
prevI := s.index - 1
if prevI < 0 {
prevI += s.size
}
t := float64(s.times[s.index].UnixNano()) + (val-s.samples[s.index])*float64(s.times[s.index].UnixNano()-s.times[prevI].UnixNano())/(s.samples[s.index]-s.samples[prevI])
return time.Unix(0, int64(t))
}
func (s FloatSampler) ExtrapolateTimeLinearMillis(val float64) time.Time {
prevI := s.index - 1
if prevI < 0 {
prevI += s.size
}
var slope float64 = 0
for i := 1; i < s.size; i++ {
index := (s.index + i) % s.size
prevIndex := (s.index + i - 1) % s.size
slope += float64(s.times[index].UnixMilli()-s.times[prevIndex].UnixMilli()) / (s.samples[index] - s.samples[prevIndex])
}
slope = slope / float64(s.size)
t := float64(s.times[s.index].UnixMilli()) + (val-s.samples[s.index])*slope
return time.UnixMilli(int64(t))
}
func (s FloatSampler) EstimateSecondsUntil(value float64) time.Duration {
timeAtValue := s.ExtrapolateTimeLinearMillis(value)
return timeAtValue.Sub(time.Now()).Round(time.Second)
}
func NewIntSampler(size int, frequency time.Duration) *IntSampler {
return &IntSampler{
FloatSampler: *NewFloatSampler(size, frequency),
}
}
type IntSampler struct {
FloatSampler
}
func (s *IntSampler) Add(value int) (added bool) {
return s.FloatSampler.Add(float64(value))
}
func (s *IntSampler) EstimateSecondsUntil(val int) time.Duration {
return s.FloatSampler.EstimateSecondsUntil(float64(val))
} | zsample/zsample.go | 0.763043 | 0.538073 | zsample.go | starcoder |
package qsort
type smallsort64 func(data []uint64, base int, swap func(int, int))
type partition64 func(data []uint64, base int, swap func(int, int)) int
func quicksort64(data []uint64, base, cutoff int, smallsort smallsort64, partition partition64, swap func(int, int)) {
for len(data) > 1 {
if len(data) <= cutoff/8 {
smallsort(data, base, swap)
return
}
medianOfThree64(data, base, swap)
p := partition(data, base, swap)
if p < len(data)-p { // recurse on the smaller side
quicksort64(data[:p], base, cutoff, smallsort, partition, swap)
data = data[p+1:]
base = base + p + 1
} else {
quicksort64(data[p+1:], base+p+1, cutoff, smallsort, partition, swap)
data = data[:p]
}
}
}
func bubblesort64NoSwap1(data []uint64, base int, swap func(int, int)) {
for i := len(data); i > 1; i-- {
max := data[0]
for j := 1; j < i; j++ {
y := data[j]
x := uint64(0)
if max <= y {
x = max
} else {
x = y
}
if max <= y {
max = y
}
data[j-1] = x
}
data[i-1] = max
}
}
func bubblesort64NoSwap2(data []uint64, base int, swap func(int, int)) {
for i := len(data); i > 1; i -= 2 {
x := data[0]
y := data[1]
if y < x {
x, y = y, x
}
for j := 2; j < i; j++ {
z := data[j]
w := uint64(0)
v := uint64(0)
if y <= z {
w = y
} else {
w = z
}
if y <= z {
y = z
}
if x <= z {
v = x
} else {
v = z
}
if x <= z {
x = w
}
data[j-2] = v
}
data[i-2] = x
data[i-1] = y
}
}
func insertionsort64(data []uint64, base int, swap func(int, int)) {
for i := 1; i < len(data); i++ {
item := data[i]
for j := i; j > 0 && item < data[j-1]; j-- {
data[j], data[j-1] = data[j-1], data[j]
callswap(base, swap, j, j-1)
}
}
}
func medianOfThree64(data []uint64, base int, swap func(int, int)) {
end := len(data) - 1
mid := len(data) / 2
if data[0] < data[mid] {
data[mid], data[0] = data[0], data[mid]
callswap(base, swap, mid, 0)
}
if data[end] < data[0] {
data[0], data[end] = data[end], data[0]
callswap(base, swap, 0, end)
if data[0] < data[mid] {
data[mid], data[0] = data[0], data[mid]
callswap(base, swap, mid, 0)
}
}
}
func hoarePartition64(data []uint64, base int, swap func(int, int)) int {
i, j := 1, len(data)-1
if len(data) > 0 {
pivot := data[0]
for j < len(data) {
for i < len(data) && data[i] < pivot {
i++
}
for j > 0 && pivot < data[j] {
j--
}
if i >= j {
break
}
data[i], data[j] = data[j], data[i]
callswap(base, swap, i, j)
i++
j--
}
data[0], data[j] = data[j], data[0]
callswap(base, swap, 0, j)
}
return j
}
func hybridPartition64(data, scratch []uint64) int {
pivot, lo, hi, limit := 0, 1, len(data)-1, len(scratch)
p := distributeForward64(data, scratch, limit, lo, hi)
if hi-p <= limit {
scratch = scratch[limit-hi+p:]
} else {
lo = p + limit
for {
hi = distributeBackward64(data, data[lo+1-limit:], limit, lo, hi) - limit
if hi < lo {
p = hi
break
}
lo = distributeForward64(data, data[hi+1:], limit, lo, hi) + limit
if hi < lo {
p = lo - limit
break
}
}
}
copy(data[p+1:], scratch[:])
data[pivot], data[p] = data[p], data[pivot]
return p
} | qsort/sort8.go | 0.510741 | 0.424472 | sort8.go | starcoder |
package astits
import (
"fmt"
"github.com/asticode/go-astilog"
"github.com/pkg/errors"
)
// PSI table IDs
const (
PSITableTypeBAT = "BAT"
PSITableTypeDIT = "DIT"
PSITableTypeEIT = "EIT"
PSITableTypeNIT = "NIT"
PSITableTypeNull = "Null"
PSITableTypePAT = "PAT"
PSITableTypePMT = "PMT"
PSITableTypeRST = "RST"
PSITableTypeSDT = "SDT"
PSITableTypeSIT = "SIT"
PSITableTypeST = "ST"
PSITableTypeTDT = "TDT"
PSITableTypeTOT = "TOT"
PSITableTypeUnknown = "Unknown"
)
// PSIData represents a PSI data
// https://en.wikipedia.org/wiki/Program-specific_information
type PSIData struct {
PointerField int // Present at the start of the TS packet payload signaled by the payload_unit_start_indicator bit in the TS header. Used to set packet alignment bytes or content before the start of tabled payload data.
Sections []*PSISection
}
// PSISection represents a PSI section
type PSISection struct {
CRC32 uint32 // A checksum of the entire table excluding the pointer field, pointer filler bytes and the trailing CRC32.
Header *PSISectionHeader
Syntax *PSISectionSyntax
}
// PSISectionHeader represents a PSI section header
type PSISectionHeader struct {
PrivateBit bool // The PAT, PMT, and CAT all set this to 0. Other tables set this to 1.
SectionLength uint16 // The number of bytes that follow for the syntax section (with CRC value) and/or table data. These bytes must not exceed a value of 1021.
SectionSyntaxIndicator bool // A flag that indicates if the syntax section follows the section length. The PAT, PMT, and CAT all set this to 1.
TableID int // Table Identifier, that defines the structure of the syntax section and other contained data. As an exception, if this is the byte that immediately follow previous table section and is set to 0xFF, then it indicates that the repeat of table section end here and the rest of TS data payload shall be stuffed with 0xFF. Consequently the value 0xFF shall not be used for the Table Identifier.
TableType string
}
// PSISectionSyntax represents a PSI section syntax
type PSISectionSyntax struct {
Data *PSISectionSyntaxData
Header *PSISectionSyntaxHeader
}
// PSISectionSyntaxHeader represents a PSI section syntax header
type PSISectionSyntaxHeader struct {
CurrentNextIndicator bool // Indicates if data is current in effect or is for future use. If the bit is flagged on, then the data is to be used at the present moment.
LastSectionNumber uint8 // This indicates which table is the last table in the sequence of tables.
SectionNumber uint8 // This is an index indicating which table this is in a related sequence of tables. The first table starts from 0.
TableIDExtension uint16 // Informational only identifier. The PAT uses this for the transport stream identifier and the PMT uses this for the Program number.
VersionNumber uint8 // Syntax version number. Incremented when data is changed and wrapped around on overflow for values greater than 32.
}
// PSISectionSyntaxData represents a PSI section syntax data
type PSISectionSyntaxData struct {
EIT *EITData
NIT *NITData
PAT *PATData
PMT *PMTData
SDT *SDTData
TOT *TOTData
}
// parsePSIData parses a PSI data
func parsePSIData(i []byte) (d *PSIData, err error) {
// Init data
d = &PSIData{}
var offset int
// Pointer field
d.PointerField = int(i[offset])
offset += 1
// Pointer filler bytes
offset += d.PointerField
// Parse sections
var s *PSISection
var stop bool
for offset < len(i) && !stop {
if s, stop, err = parsePSISection(i, &offset); err != nil {
err = errors.Wrap(err, "astits: parsing PSI table failed")
return
}
d.Sections = append(d.Sections, s)
}
return
}
// parsePSISection parses a PSI section
func parsePSISection(i []byte, offset *int) (s *PSISection, stop bool, err error) {
// Init section
s = &PSISection{}
// Parse header
var offsetStart, offsetSectionsEnd, offsetEnd int
s.Header, offsetStart, _, offsetSectionsEnd, offsetEnd = parsePSISectionHeader(i, offset)
// Check whether we need to stop the parsing
if shouldStopPSIParsing(s.Header.TableType) {
stop = true
return
}
// Check whether there's a syntax section
if s.Header.SectionLength > 0 {
// Parse syntax
s.Syntax = parsePSISectionSyntax(i, offset, s.Header, offsetSectionsEnd)
// Process CRC32
if hasCRC32(s.Header.TableType) {
// Parse CRC32
s.CRC32 = parseCRC32(i[offsetSectionsEnd:offsetEnd])
*offset += 4
// Check CRC32
var c = computeCRC32(i[offsetStart:offsetSectionsEnd])
if c != s.CRC32 {
err = fmt.Errorf("astits: Table CRC32 %x != computed CRC32 %x", s.CRC32, c)
return
}
}
}
return
}
// parseCRC32 parses a CRC32
func parseCRC32(i []byte) uint32 {
return uint32(i[len(i)-4])<<24 | uint32(i[len(i)-3])<<16 | uint32(i[len(i)-2])<<8 | uint32(i[len(i)-1])
}
// computeCRC32 computes a CRC32
// https://stackoverflow.com/questions/35034042/how-to-calculate-crc32-in-psi-si-packet
func computeCRC32(i []byte) (o uint32) {
o = uint32(0xffffffff)
for _, b := range i {
for i := 0; i < 8; i++ {
if (o >= uint32(0x80000000)) != (b >= uint8(0x80)) {
o = (o << 1) ^ 0x04C11DB7
} else {
o = o << 1
}
b <<= 1
}
}
return
}
// shouldStopPSIParsing checks whether the PSI parsing should be stopped
func shouldStopPSIParsing(tableType string) bool {
return tableType == PSITableTypeNull || tableType == PSITableTypeUnknown
}
// parsePSISectionHeader parses a PSI section header
func parsePSISectionHeader(i []byte, offset *int) (h *PSISectionHeader, offsetStart, offsetSectionsStart, offsetSectionsEnd, offsetEnd int) {
// Init
h = &PSISectionHeader{}
offsetStart = *offset
// Table ID
h.TableID = int(i[*offset])
*offset += 1
// Table type
h.TableType = psiTableType(h.TableID)
// Check whether we need to stop the parsing
if shouldStopPSIParsing(h.TableType) {
return
}
// Section syntax indicator
h.SectionSyntaxIndicator = i[*offset]&0x80 > 0
// Private bit
h.PrivateBit = i[*offset]&0x40 > 0
// Section length
h.SectionLength = uint16(i[*offset]&0xf)<<8 | uint16(i[*offset+1])
*offset += 2
// Offsets
offsetSectionsStart = *offset
offsetEnd = offsetSectionsStart + int(h.SectionLength)
offsetSectionsEnd = offsetEnd
if hasCRC32(h.TableType) {
offsetSectionsEnd -= 4
}
return
}
// hasCRC32 checks whether the table has a CRC32
func hasCRC32(tableType string) bool {
return tableType == PSITableTypePAT ||
tableType == PSITableTypePMT ||
tableType == PSITableTypeEIT ||
tableType == PSITableTypeNIT ||
tableType == PSITableTypeTOT ||
tableType == PSITableTypeSDT
}
// psiTableType returns the psi table type based on the table id
// Page: 28 | https://www.dvb.org/resources/public/standards/a38_dvb-si_specification.pdf
func psiTableType(tableID int) string {
switch {
case tableID == 0x4a:
return PSITableTypeBAT
case tableID >= 0x4e && tableID <= 0x6f:
return PSITableTypeEIT
case tableID == 0x7e:
return PSITableTypeDIT
case tableID == 0x40, tableID == 0x41:
return PSITableTypeNIT
case tableID == 0xff:
return PSITableTypeNull
case tableID == 0:
return PSITableTypePAT
case tableID == 2:
return PSITableTypePMT
case tableID == 0x71:
return PSITableTypeRST
case tableID == 0x42, tableID == 0x46:
return PSITableTypeSDT
case tableID == 0x7f:
return PSITableTypeSIT
case tableID == 0x72:
return PSITableTypeST
case tableID == 0x70:
return PSITableTypeTDT
case tableID == 0x73:
return PSITableTypeTOT
}
// TODO Remove this log
astilog.Debugf("astits: unlisted PSI table ID %d", tableID)
return PSITableTypeUnknown
}
// parsePSISectionSyntax parses a PSI section syntax
func parsePSISectionSyntax(i []byte, offset *int, h *PSISectionHeader, offsetSectionsEnd int) (s *PSISectionSyntax) {
// Init
s = &PSISectionSyntax{}
// Header
if hasPSISyntaxHeader(h.TableType) {
s.Header = parsePSISectionSyntaxHeader(i, offset)
}
// Parse data
s.Data = parsePSISectionSyntaxData(i, offset, h, s.Header, offsetSectionsEnd)
return
}
// hasPSISyntaxHeader checks whether the section has a syntax header
func hasPSISyntaxHeader(tableType string) bool {
return tableType == PSITableTypeEIT ||
tableType == PSITableTypeNIT ||
tableType == PSITableTypePAT ||
tableType == PSITableTypePMT ||
tableType == PSITableTypeSDT
}
// parsePSISectionSyntaxHeader parses a PSI section syntax header
func parsePSISectionSyntaxHeader(i []byte, offset *int) (h *PSISectionSyntaxHeader) {
// Init
h = &PSISectionSyntaxHeader{}
// Table ID extension
h.TableIDExtension = uint16(i[*offset])<<8 | uint16(i[*offset+1])
*offset += 2
// Version number
h.VersionNumber = uint8(i[*offset]&0x3f) >> 1
// Current/Next indicator
h.CurrentNextIndicator = i[*offset]&0x1 > 0
*offset += 1
// Section number
h.SectionNumber = uint8(i[*offset])
*offset += 1
// Last section number
h.LastSectionNumber = uint8(i[*offset])
*offset += 1
return
}
// parsePSISectionSyntaxData parses a PSI section data
func parsePSISectionSyntaxData(i []byte, offset *int, h *PSISectionHeader, sh *PSISectionSyntaxHeader, offsetSectionsEnd int) (d *PSISectionSyntaxData) {
// Init
d = &PSISectionSyntaxData{}
// Switch on table type
switch h.TableType {
case PSITableTypeBAT:
// TODO Parse BAT
case PSITableTypeDIT:
// TODO Parse DIT
case PSITableTypeEIT:
d.EIT = parseEITSection(i, offset, offsetSectionsEnd, sh.TableIDExtension)
case PSITableTypeNIT:
d.NIT = parseNITSection(i, offset, sh.TableIDExtension)
case PSITableTypePAT:
d.PAT = parsePATSection(i, offset, offsetSectionsEnd, sh.TableIDExtension)
case PSITableTypePMT:
d.PMT = parsePMTSection(i, offset, offsetSectionsEnd, sh.TableIDExtension)
case PSITableTypeRST:
// TODO Parse RST
case PSITableTypeSDT:
d.SDT = parseSDTSection(i, offset, offsetSectionsEnd, sh.TableIDExtension)
case PSITableTypeSIT:
// TODO Parse SIT
case PSITableTypeST:
// TODO Parse ST
case PSITableTypeTOT:
d.TOT = parseTOTSection(i, offset)
case PSITableTypeTDT:
// TODO Parse TDT
}
return
}
// toData parses the PSI tables and returns a set of Data
func (d *PSIData) toData(firstPacket *Packet, pid uint16) (ds []*Data) {
// Loop through sections
for _, s := range d.Sections {
// Switch on table type
switch s.Header.TableType {
case PSITableTypeEIT:
ds = append(ds, &Data{EIT: s.Syntax.Data.EIT, FirstPacket: firstPacket, PID: pid})
case PSITableTypeNIT:
ds = append(ds, &Data{FirstPacket: firstPacket, NIT: s.Syntax.Data.NIT, PID: pid})
case PSITableTypePAT:
ds = append(ds, &Data{FirstPacket: firstPacket, PAT: s.Syntax.Data.PAT, PID: pid})
case PSITableTypePMT:
ds = append(ds, &Data{FirstPacket: firstPacket, PID: pid, PMT: s.Syntax.Data.PMT})
case PSITableTypeSDT:
ds = append(ds, &Data{FirstPacket: firstPacket, PID: pid, SDT: s.Syntax.Data.SDT})
case PSITableTypeTOT:
ds = append(ds, &Data{FirstPacket: firstPacket, PID: pid, TOT: s.Syntax.Data.TOT})
}
}
return
} | data_psi.go | 0.609175 | 0.467271 | data_psi.go | starcoder |
package keys
import (
"bytes"
"encoding/hex"
"fmt"
"math/big"
"prencrypt/point"
"prencrypt/util"
)
type PublicKey struct {
Point *point.Point
}
func NewPublicKeyFromHex(s string) (*PublicKey, error) {
b, err := hex.DecodeString(s)
if err != nil {
return nil, fmt.Errorf("cannot decode hex string: %v", err)
}
return NewPublicKeyFromBytes(b)
}
func NewPublicKeyFromBytes(b []byte) (*PublicKey, error) {
switch b[0] {
case 0x02, 0x03:
if len(b) != 33 {
return nil, fmt.Errorf("cannot parse public key")
}
x := new(big.Int).SetBytes(b[1:])
var ybit uint
switch b[0] {
case 0x02:
ybit = 0
case 0x03:
ybit = 1
}
if x.Cmp(util.Curve.Params().P) >= 0 {
return nil, fmt.Errorf("cannot parse public key")
}
// y^2 = x^3 + b
// y = sqrt(x^3 + b)
var y, x3b big.Int
x3b.Mul(x, x)
x3b.Mul(&x3b, x)
x3b.Add(&x3b, util.Curve.Params().B)
x3b.Mod(&x3b, util.Curve.Params().P)
if z := y.ModSqrt(&x3b, util.Curve.Params().P); z == nil {
return nil, fmt.Errorf("cannot parse public key")
}
if y.Bit(0) != ybit {
y.Sub(util.Curve.Params().P, &y)
}
if y.Bit(0) != ybit {
return nil, fmt.Errorf("incorrectly encoded X and Y bit")
}
return &PublicKey{
Point: &point.Point{
Curve: util.Curve,
X: x,
Y: &y,
},
}, nil
case 0x04:
if len(b) != 65 {
return nil, fmt.Errorf("cannot parse public key")
}
x := new(big.Int).SetBytes(b[1:33])
y := new(big.Int).SetBytes(b[33:])
if x.Cmp(util.Curve.Params().P) >= 0 || y.Cmp(util.Curve.Params().P) >= 0 {
return nil, fmt.Errorf("cannot parse public key")
}
x3 := new(big.Int).Sqrt(x).Mul(x, x)
if t := new(big.Int).Sqrt(y).Sub(y, x3.Add(x3, util.Curve.Params().B)); t.IsInt64() && t.Int64() == 0 {
return nil, fmt.Errorf("cannot parse public key")
}
return &PublicKey{
Point: &point.Point{
Curve: util.Curve,
X: x,
Y: y,
},
}, nil
default:
return nil, fmt.Errorf("cannot parse public key")
}
}
func (k *PublicKey) Bytes(compressed bool) []byte {
x := k.Point.X.Bytes()
if len(x) < 32 {
for i := 0; i < 32-len(x); i++ {
x = append([]byte{0}, x...)
}
}
if compressed {
// If odd
if k.Point.Y.Bit(0) != 0 {
return bytes.Join([][]byte{{0x03}, x}, nil)
}
// If even
return bytes.Join([][]byte{{0x02}, x}, nil)
}
y := k.Point.Y.Bytes()
if len(y) < 32 {
for i := 0; i < 32-len(y); i++ {
y = append([]byte{0}, y...)
}
}
return bytes.Join([][]byte{{0x04}, x, y}, nil)
}
func (k *PublicKey) Hex(compressed bool) string {
return hex.EncodeToString(k.Bytes(compressed))
} | keys/publickey.go | 0.640748 | 0.479077 | publickey.go | starcoder |
Common 3D shapes.
*/
//-----------------------------------------------------------------------------
package sdf
import "math"
//-----------------------------------------------------------------------------
// Counter Bored Hole
func CounterBored_Hole3D(
l float64, // total length
r float64, // hole radius
cb_r float64, // counter bore radius
cb_d float64, // counter bore depth
) SDF3 {
s0 := Cylinder3D(l, r, 0)
s1 := Cylinder3D(cb_d, cb_r, 0)
s1 = Transform3D(s1, Translate3d(V3{0, 0, (l - cb_d) / 2}))
return Union3D(s0, s1)
}
// Chamfered Hole (45 degrees)
func Chamfered_Hole3D(
l float64, // total length
r float64, // hole radius
ch_r float64, // chamfer radius
) SDF3 {
s0 := Cylinder3D(l, r, 0)
s1 := Cone3D(ch_r, r, r+ch_r, 0)
s1 = Transform3D(s1, Translate3d(V3{0, 0, (l - ch_r) / 2}))
return Union3D(s0, s1)
}
// Countersunk Hole (45 degrees)
func CounterSunk_Hole3D(
l float64, // total length
r float64, // hole radius
) SDF3 {
return Chamfered_Hole3D(l, r, r)
}
//-----------------------------------------------------------------------------
// Return a rounded hex head for a nut or bolt.
func HexHead3D(
r float64, // radius
h float64, // height
round string, // (t)top, (b)bottom, (tb)top/bottom
) SDF3 {
// basic hex body
corner_round := r * 0.08
hex_2d := Polygon2D(Nagon(6, r-corner_round))
hex_2d = Offset2D(hex_2d, corner_round)
hex_3d := Extrude3D(hex_2d, h)
// round out the top and/or bottom as required
if round != "" {
top_round := r * 1.6
d := r * math.Cos(DtoR(30))
sphere_3d := Sphere3D(top_round)
z_ofs := math.Sqrt(top_round*top_round-d*d) - h/2
if round == "t" || round == "tb" {
hex_3d = Intersect3D(hex_3d, Transform3D(sphere_3d, Translate3d(V3{0, 0, -z_ofs})))
}
if round == "b" || round == "tb" {
hex_3d = Intersect3D(hex_3d, Transform3D(sphere_3d, Translate3d(V3{0, 0, z_ofs})))
}
}
return hex_3d
}
// Return a cylindrical knurled head.
func KnurledHead3D(
r float64, // radius
h float64, // height
pitch float64, // knurl pitch
) SDF3 {
theta := DtoR(45)
cylinder_round := r * 0.05
knurl_h := pitch * math.Floor((h-cylinder_round)/pitch)
knurl_3d := Knurl3D(knurl_h, r, pitch, pitch*0.3, theta)
return Union3D(Cylinder3D(h, r, cylinder_round), knurl_3d)
}
//-----------------------------------------------------------------------------
// Return a 2D knurl profile.
func KnurlProfile(
radius float64, // radius of knurled cylinder
pitch float64, // pitch of the knurl
height float64, // height of the knurl
) SDF2 {
knurl := NewPolygon()
knurl.Add(pitch/2, 0)
knurl.Add(pitch/2, radius)
knurl.Add(0, radius+height)
knurl.Add(-pitch/2, radius)
knurl.Add(-pitch/2, 0)
//knurl.Render("knurl.dxf")
return Polygon2D(knurl.Vertices())
}
// Return a knurled cylinder.
func Knurl3D(
length float64, // length of cylinder
radius float64, // radius of cylinder
pitch float64, // knurl pitch
height float64, // knurl height
theta float64, // knurl helix angle
) SDF3 {
// A knurl is the the intersection of left and right hand
// multistart "threads". Work out the number of starts using
// the desired helix angle.
n := int(TAU * radius * math.Tan(theta) / pitch)
// build the knurl profile.
knurl_2d := KnurlProfile(radius, pitch, height)
// create the left/right hand spirals
knurl0_3d := Screw3D(knurl_2d, length, pitch, n)
knurl1_3d := Screw3D(knurl_2d, length, pitch, -n)
return Intersect3D(knurl0_3d, knurl1_3d)
}
//-----------------------------------------------------------------------------
// Return a washer.
func Washer3D(
t float64, // thickness
r_inner float64, // inner radius
r_outer float64, // outer radius
) SDF3 {
if t <= 0 {
panic("t <= 0")
}
if r_inner >= r_outer {
panic("r_inner >= r_outer")
}
return Difference3D(Cylinder3D(t, r_outer, 0), Cylinder3D(t, r_inner, 0))
}
//----------------------------------------------------------------------------- | sdf/shapes3.go | 0.855021 | 0.467271 | shapes3.go | starcoder |
package op
import(
"fmt"
)
// Performs an addition and updates the State.
func (self *State)Add(a,b any)any{
self.IncOperations(self.coeff["+"]+self.off["+"])
var t string = fmt.Sprintf("%T", a)
switch t {
case "int": return a.(int)+b.(int)
case "int8": return a.(int8)+b.(int8)
case "int16": return a.(int16)+b.(int16)
case "int32": return a.(int32)+b.(int32)
case "int64": return a.(int64)+b.(int64)
case "uint": return a.(uint)+b.(uint)
case "uint8": return a.(uint8)+b.(uint8)
case "uint16": return a.(uint16)+b.(uint16)
case "uint32": return a.(uint32)+b.(uint32)
case "uint64": return a.(uint64)+b.(uint64)
case "float32": return a.(float32)+b.(float32)
case "float64": return a.(float64)+b.(float64)
case "complex64": return a.(complex64)+b.(complex64)
case "complex128": return a.(complex128)+b.(complex128)
default: fmt.Println("Invalid type")
}
return nil
}
// Performs a subtraction and updates the State.
func (self *State)Sub(a,b any)any{
self.IncOperations(self.coeff["-"]+self.off["-"])
var t string = fmt.Sprintf("%T", a)
switch t {
case "int": return a.(int)-b.(int)
case "int8": return a.(int8)-b.(int8)
case "int16": return a.(int16)-b.(int16)
case "int32": return a.(int32)-b.(int32)
case "int64": return a.(int64)-b.(int64)
case "uint": return a.(uint)-b.(uint)
case "uint8": return a.(uint8)-b.(uint8)
case "uint16": return a.(uint16)-b.(uint16)
case "uint32": return a.(uint32)-b.(uint32)
case "uint64": return a.(uint64)-b.(uint64)
case "float32": return a.(float32)-b.(float32)
case "float64": return a.(float64)-b.(float64)
case "complex64": return a.(complex64)-b.(complex64)
case "complex128": return a.(complex128)-b.(complex128)
default: fmt.Println("Invalid type")
}
return nil
}
// Performs a multiplication and updates the State.
func (self *State)Mul(a,b any)any{
self.IncOperations(self.coeff["*"]+self.off["*"])
var t string = fmt.Sprintf("%T", a)
switch t {
case "int": return a.(int)*b.(int)
case "int8": return a.(int8)*b.(int8)
case "int16": return a.(int16)*b.(int16)
case "int32": return a.(int32)*b.(int32)
case "int64": return a.(int64)*b.(int64)
case "uint": return a.(uint)*b.(uint)
case "uint8": return a.(uint8)*b.(uint8)
case "uint16": return a.(uint16)*b.(uint16)
case "uint32": return a.(uint32)*b.(uint32)
case "uint64": return a.(uint64)*b.(uint64)
case "float32": return a.(float32)*b.(float32)
case "float64": return a.(float64)*b.(float64)
case "complex64": return a.(complex64)*b.(complex64)
case "complex128": return a.(complex128)*b.(complex128)
default: fmt.Println("Invalid type")
}
return nil
}
// Performs a division and updates the State.
func (self *State)Div(a,b any)any{
self.IncOperations(self.coeff["/"]+self.off["/"])
var t string = fmt.Sprintf("%T", a)
switch t {
case "int": return a.(int)/b.(int)
case "int8": return a.(int8)/b.(int8)
case "int16": return a.(int16)/b.(int16)
case "int32": return a.(int32)/b.(int32)
case "int64": return a.(int64)/b.(int64)
case "uint": return a.(uint)/b.(uint)
case "uint8": return a.(uint8)/b.(uint8)
case "uint16": return a.(uint16)/b.(uint16)
case "uint32": return a.(uint32)/b.(uint32)
case "uint64": return a.(uint64)/b.(uint64)
case "float32": return a.(float32)/b.(float32)
case "float64": return a.(float64)/b.(float64)
case "complex64": return a.(complex64)/b.(complex64)
case "complex128": return a.(complex128)/b.(complex128)
default: fmt.Println("Invalid type")
}
return nil
}
// Returns the remainder a % b and updates the State.
func (self *State)Mod(a,b any)any{
self.IncOperations(self.coeff["%"]+self.off["%"])
var t string = fmt.Sprintf("%T", a)
switch t {
case "int": return a.(int)%b.(int)
case "int8": return a.(int8)%b.(int8)
case "int16": return a.(int16)%b.(int16)
case "int32": return a.(int32)%b.(int32)
case "int64": return a.(int64)%b.(int64)
case "uint": return a.(uint)%b.(uint)
case "uint8": return a.(uint8)%b.(uint8)
case "uint16": return a.(uint16)%b.(uint16)
case "uint32": return a.(uint32)%b.(uint32)
case "uint64": return a.(uint64)%b.(uint64)
default: fmt.Println("Invalid type")
}
return nil
}
// Returns a number incremented by 1 and updates the State.
func (self *State)Succ(a any)any{
self.IncOperations(self.coeff["++"]+self.off["++"])
var t string = fmt.Sprintf("%T", a)
switch t {
case "int": return a.(int)+1
case "int8": return a.(int8)+1
case "int16": return a.(int16)+1
case "int32": return a.(int32)+1
case "int64": return a.(int64)+1
case "uint": return a.(uint)+1
case "uint8": return a.(uint8)+1
case "uint16": return a.(uint16)+1
case "uint32": return a.(uint32)+1
case "uint64": return a.(uint64)+1
case "float32": return a.(float32)+1
case "float64": return a.(float64)+1
case "complex64": return a.(complex64)+1
case "complex128": return a.(complex128)+1
default: fmt.Println("Invalid type")
}
return nil
}
// Returns a number decremented by 1 and updates the State.
func (self *State)Prec(a any)any{
self.IncOperations(self.coeff["--"]+self.off["--"])
var t string = fmt.Sprintf("%T", a)
switch t {
case "int": return a.(int)-1
case "int8": return a.(int8)-1
case "int16": return a.(int16)-1
case "int32": return a.(int32)-1
case "int64": return a.(int64)-1
case "uint": return a.(uint)-1
case "uint8": return a.(uint8)-1
case "uint16": return a.(uint16)-1
case "uint32": return a.(uint32)-1
case "uint64": return a.(uint64)-1
case "float32": return a.(float32)-1
case "float64": return a.(float64)-1
case "complex64": return a.(complex64)-1
case "complex128": return a.(complex128)-1
default: fmt.Println("Invalid type")
}
return nil
} | op/arithmetic_operators.go | 0.568416 | 0.538437 | arithmetic_operators.go | starcoder |
package dataset
import (
"math"
"github.com/adam-lavrik/go-imath/i64"
"github.com/adam-lavrik/go-imath/u64"
)
// DataSet represents a set of documents from the source query system
// It serves as an abstractions of the DBMS/Data Processor/... specific table/dataset/collection/...
type DataSet struct {
// The Name/ID of the dataset
Name string
// The real number of documents in the dataset. Should be set if known
Count *uint64
// The expected number of documents
ExpectedCount uint64
// The merged pathinformation of all documents in the dataset
Paths map[string]*DataPath
// The parent DataSet, if it exists
DerivedFrom *DataSet
}
// GetSize returns the expected or actual number of documents in the dataset
func (d *DataSet) GetSize() uint64 {
if d.Count != nil {
return *d.Count
}
return d.ExpectedCount
}
// A DataPath represents a single path within a document of a DataSet.
type DataPath struct {
// The Path expression of the Path
Path string
// Information about the existence and distribution of string values
Stringtype *StringType
// Information about the existence and distribution of float values
Floattype *FloatType
// Information about the existence and distribution of integer values
Inttype *IntType
// Information about the existence and distribution of bool values
Booltype *BooleanType
// Information about the existence and distribution of null values
Nulltype *NullType
// Information about the existence and distribution of object values
Objecttype *ObjectType
// Information about the existence and distribution of array values
Arraytype *ArrayType
// Information about the existence of the path
Count *uint64
}
// Merge merges two DataPaths and accumulates their statistics.
// They have to represent the same path. If not, nil is returned.
func (l *DataPath) Merge(r DataPath) *DataPath {
if l.Path != r.Path {
return nil
}
if l.Stringtype == nil {
l.Stringtype = r.Stringtype
} else if r.Stringtype != nil {
l.Stringtype.merge(*r.Stringtype)
}
if l.Floattype == nil {
l.Floattype = r.Floattype
} else if r.Floattype != nil {
l.Floattype.merge(*r.Floattype)
}
if l.Booltype == nil {
l.Booltype = r.Booltype
} else if r.Booltype != nil {
l.Booltype.merge(*r.Booltype)
}
if l.Nulltype == nil {
l.Nulltype = r.Nulltype
} else if r.Nulltype != nil {
l.Nulltype.merge(*r.Nulltype)
}
return l
}
func (l *DataPath) HasFloatCount() bool {
return l.Floattype != nil && l.Floattype.Count != nil && *l.Floattype.Count > 0
}
func (l *DataPath) HasIntCount() bool {
return l.Inttype != nil && l.Inttype.Count != nil && *l.Inttype.Count > 0
}
func (l *DataPath) HasNumCount() bool {
return l.HasFloatCount() || l.HasIntCount()
}
func (l *DataPath) HasStringCount() bool {
return l.Stringtype != nil && l.Stringtype.Count != nil && *l.Stringtype.Count > 0
}
func (l *DataPath) HasBoolCount() bool {
return l.Booltype != nil && ((l.Booltype.Count != nil && *l.Booltype.Count > 0) || (l.Booltype.FalseCount != nil && *l.Booltype.FalseCount > 0) || (l.Booltype.TrueCount != nil && *l.Booltype.TrueCount > 0))
}
// StringType represents a possible String type of a DataPath.
// The type may be augmented with statistics about the distribution of the data
type StringType struct {
// An optional count of how many documents have the given type at the path
Count *uint64
Min *string
Max *string
Unique *uint64
Prefixes []string
}
func (l *StringType) merge(r StringType) *StringType {
if l.Count == nil {
l.Count = r.Count
} else if r.Count != nil {
*l.Count += *r.Count
}
if l.Unique == nil {
l.Unique = r.Unique
} else if r.Unique != nil {
*l.Unique += *r.Unique
}
if l.Min == nil {
l.Min = r.Min
} else if r.Min != nil {
if *r.Min < *l.Min {
l.Min = r.Min
}
}
if l.Max == nil {
l.Max = r.Max
} else if r.Max != nil {
if *r.Max > *l.Max {
l.Max = r.Max
}
}
return l
}
// FloatType represents a possible Float type of a DataPath.
// The type may be augmented with statistics about the distribution of the data
type FloatType struct {
// An optional count of how many documents have the given type at the path
Count *uint64
Min *float64
Max *float64
Unique *uint64
}
func (l *FloatType) merge(r FloatType) *FloatType {
if l.Count == nil {
l.Count = r.Count
} else if r.Count != nil {
*l.Count += *r.Count
}
if l.Unique == nil {
l.Unique = r.Unique
} else if r.Unique != nil {
*l.Unique += *r.Unique
}
if l.Min == nil {
l.Min = r.Min
} else if r.Min != nil {
*l.Min = math.Min(*l.Min, *r.Min)
}
if l.Max == nil {
l.Max = r.Max
} else if r.Max != nil {
*l.Max = math.Max(*l.Max, *r.Max)
}
return l
}
// IntType represents a possible integer type of a DataPath.
// The type may be augmented with statistics about the distribution of the data
type IntType struct {
// An optional count of how many documents have the given type at the path
Count *uint64
Min *int64
Max *int64
Unique *uint64
}
func (l *IntType) merge(r IntType) *IntType {
if l.Count == nil {
l.Count = r.Count
} else if r.Count != nil {
*l.Count += *r.Count
}
if l.Unique == nil {
l.Unique = r.Unique
} else if r.Unique != nil {
*l.Unique += *r.Unique
}
if l.Min == nil {
l.Min = r.Min
} else if r.Min != nil {
*l.Min = i64.Min(*l.Min, *r.Min)
}
if l.Max == nil {
l.Max = r.Max
} else if r.Max != nil {
*l.Max = i64.Max(*l.Max, *r.Max)
}
return l
}
// BooleanType represents a possible Boolean type of a DataPath.
// The type may be augmented with statistics about the distribution of the data
type BooleanType struct {
// An optional count of how many documents have the given type at the path
Count *uint64
FalseCount *uint64
TrueCount *uint64
}
func (l *BooleanType) merge(r BooleanType) *BooleanType {
if l.Count == nil {
l.Count = r.Count
} else if r.Count != nil {
*l.Count += *r.Count
}
if l.FalseCount == nil {
l.FalseCount = r.FalseCount
} else if r.FalseCount != nil {
*l.FalseCount += *r.FalseCount
}
if l.TrueCount == nil {
l.TrueCount = r.TrueCount
} else if r.TrueCount != nil {
*l.TrueCount += *r.TrueCount
}
return l
}
// NullType represents a possible Null type of a DataPath.
// The type may be augmented with statistics about the distribution of the data
type NullType struct {
// An optional count of how many documents have the given type at the path
Count *uint64
}
func (l *NullType) merge(r NullType) *NullType {
if l.Count == nil {
l.Count = r.Count
} else if r.Count != nil {
*l.Count += *r.Count
}
return l
}
// ObjectType represents a possible object type of a DataPath.
// The type may be augmented with statistics about the distribution of the data
type ObjectType struct {
// An optional count of how many documents have the given type at the path
Count *uint64
MinMembers *uint64
MaxMembers *uint64
}
func (l *ObjectType) merge(r ObjectType) *ObjectType {
if l.Count == nil {
l.Count = r.Count
} else if r.Count != nil {
*l.Count += *r.Count
}
if l.MinMembers == nil {
l.MinMembers = r.MinMembers
} else if r.MinMembers != nil {
*l.MinMembers = u64.Min(*l.MinMembers, *r.MinMembers)
}
if l.MaxMembers == nil {
l.MaxMembers = r.MaxMembers
} else if r.MaxMembers != nil {
*l.MaxMembers = u64.Max(*l.MaxMembers, *r.MaxMembers)
}
return l
}
// ArrayType represents a possible Array type of a DataPath.
// The type may be augmented with statistics about the distribution of the data
type ArrayType struct {
// An optional count of how many documents have the given type at the path
Count *uint64
MinSize *uint64
MaxSize *uint64
}
func (l *ArrayType) merge(r ArrayType) *ArrayType {
if l.Count == nil {
l.Count = r.Count
} else if r.Count != nil {
*l.Count += *r.Count
}
if l.MinSize == nil {
l.MinSize = r.MinSize
} else if r.MinSize != nil {
*l.MinSize = u64.Min(*l.MinSize, *r.MinSize)
}
if l.MaxSize == nil {
l.MaxSize = r.MaxSize
} else if r.MaxSize != nil {
*l.MaxSize = u64.Max(*l.MaxSize, *r.MaxSize)
}
return l
} | dataset/dataset.go | 0.741019 | 0.590248 | dataset.go | starcoder |
package quadedge
import (
"log"
"github.com/go-spatial/geom/winding"
"github.com/go-spatial/geom/planar"
"github.com/go-spatial/geom"
)
// Splice operator affects the two edge rings around the origin of a and b,
// and, independently, the two edge rings around the left faces of a and b.
// In each case, (i) if the two rings are distinct, Splace will combine
// them into one; (ii) if the two are the same ring, Splice will break it
// into two separate pieces.
// Thus, Splice can be used both to attach the two edges together, and
// to break them apart. See Guibas and Stolfi (1985) p.96 for more details
// and illustrations.
func Splice(a, b *Edge) {
if a == nil || b == nil {
return
}
alpha := a.ONext().Rot()
beta := b.ONext().Rot()
t1 := b.ONext()
t2 := a.ONext()
t3 := beta.ONext()
t4 := alpha.ONext()
a.next = t1
b.next = t2
alpha.next = t3
beta.next = t4
}
// Connect Adds a new edge (e) connecting the destination of a to the
// origin of b, in such a way that all three have the same
// left face after the connection is complete.
// Additionally, the data pointers of the new edge are set.
func Connect(a, b *Edge, order winding.Order) *Edge {
if b == nil || a == nil {
return nil
}
if debug {
log.Printf("\n\n\tvalidate a:\n%v\n", a.DumpAllEdges())
if err := Validate(a, order); err != nil {
if err1, ok := err.(ErrInvalid); ok {
for i, estr := range err1 {
log.Printf("err: %03v : %v", i, estr)
}
}
}
log.Printf("\n\n\tvalidate b:\n%v\n", b.DumpAllEdges())
if err := Validate(b, order); err != nil {
if err1, ok := err.(ErrInvalid); ok {
for i, estr := range err1 {
log.Printf("err: %03v : %v", i, estr)
}
}
}
log.Printf("-------------------------\n")
}
if debug {
log.Printf("\n\n\tConnect\n\n")
log.Printf("Connecting %v to %v:", wkt.MustEncode(*a.Dest()), wkt.MustEncode(*b.Orig()))
}
bb, err := ResolveEdge(order, b, *a.Dest())
if debug {
if err != nil {
panic(err)
}
log.Printf("splice e.Sym, bb: bb: %v", wkt.MustEncode(bb.AsLine()))
}
e := NewWithEndPoints(a.Dest(), bb.Orig())
if debug {
log.Printf("a: %v", wkt.MustEncode(a.AsLine()))
log.Printf("a:LNext(): %v", wkt.MustEncode(a.LNext().AsLine()))
log.Printf("a:LPrev(): %v", wkt.MustEncode(a.LPrev().AsLine()))
log.Printf("splice e, a:LNext(): e: %v", wkt.MustEncode(e.AsLine()))
log.Printf("splice e.Sym, b: b: %v", wkt.MustEncode(b.AsLine()))
}
Splice(e, a.LNext())
Splice(e.Sym(), bb)
if debug {
log.Printf("\n\n\tvalidate e:\n%v\n", e.DumpAllEdges())
if err := Validate(e, order); err != nil {
if err1, ok := err.(ErrInvalid); ok {
for i, estr := range err1 {
log.Printf("err: %03v : %v", i, estr)
}
}
log.Printf("Vertex Edges: %v", e.DumpAllEdges())
}
log.Printf("\n\n\tvalidate a:\n%v\n", a.DumpAllEdges())
if err := Validate(a, order); err != nil {
if err1, ok := err.(ErrInvalid); ok {
for i, estr := range err1 {
log.Printf("err: %03v : %v", i, estr)
}
}
log.Printf("Vertex Edges: %v", e.DumpAllEdges())
}
log.Printf("\n\n\tvalidate b:\n%v\n", b.DumpAllEdges())
if err := Validate(b, order); err != nil {
if err1, ok := err.(ErrInvalid); ok {
for i, estr := range err1 {
log.Printf("err: %03v : %v", i, estr)
}
}
log.Printf("Vertex Edges: %v", e.DumpAllEdges())
panic("invalid edge b")
}
log.Printf("-------------------------\n")
}
return e
}
// Swap Essentially turns edge e counterclockwise inside its enclosing
// quadrilateral. The data pointers are modified accordingly.
func Swap(e *Edge) {
a := e.OPrev()
b := e.Sym().OPrev()
Splice(e, a)
Splice(e.Sym(), b)
Splice(e, a.LNext())
Splice(e.Sym(), b.LNext())
e.EndPoints(a.Dest(), b.Dest())
}
// Delete will remove the edge from the ring
func Delete(e *Edge) {
if e == nil {
return
}
if debug {
log.Printf("Deleting edge %p", e)
}
sym := e.Sym()
Splice(e, e.OPrev())
Splice(sym, sym.OPrev())
}
// OnEdge determines if the point x is on the edge e.
func OnEdge(pt geom.Point, e *Edge) bool {
org := e.Orig()
if org == nil {
return false
}
dst := e.Dest()
if dst == nil {
return false
}
l := geom.Line{*org, *dst}
return planar.IsPointOnLineSegment(cmp, pt, l)
}
// RightOf indicates if the point is right of the Edge
// If a point is below the line it is to it's right
// If a point is above the line it is to it's left
func RightOf(yflip bool, x geom.Point, e *Edge) bool {
order := winding.Order{
YPositiveDown: yflip,
}
org := e.Orig()
if org == nil {
return false
}
dst := e.Dest()
if dst == nil {
return false
}
w := order.OfGeomPoints(*org, *dst, x)
if debug {
log.Printf(
"%v right of %v ? (%v) %t",
wkt.MustEncode(x),
wkt.MustEncode(e.AsLine()),
w.ShortString(),
w.IsClockwise(),
)
}
return w.IsClockwise()
} | planar/triangulate/delaunay/quadedge/topo.go | 0.565059 | 0.489259 | topo.go | starcoder |
package core
const (
// Each constant represents a different chunk of random numbers in
// the table. For example, the 64 random numbers for black bishops
// would begin at the 4 chunk of 64 random numbers, or the 4*64=
// 256 index.
BlackPawn = iota
WhitePawn
BlackKnight
WhiteKnight
BlackBishop
WhiteBishop
BlackRook
WhiteRook
BlackQueen
WhiteQueen
BlackKing
WhiteKing
// These constants peforms the same function
// as the one for pieces, but the direct offsets
// can be hardcoded.
CastleWKSHash = 768
CastleWQSHash = 769
CastleBKSHash = 770
CastleBQSHash = 771
EnPassant = 772
SideToMove = 780
)
// Create an initial zobrist hash for a board loaded from a
// fen string.
func initZobristHash(board *Board) (hash uint64) {
for pos, piece := range board.Pieces {
if piece != NoPiece {
hash ^= getPieceHash(piece, pos)
}
}
if board.CastlingRights&WhiteKingside != 0 {
hash ^= Random64[CastleWKSHash]
}
if board.CastlingRights&WhiteQueenside != 0 {
hash ^= Random64[CastleWQSHash]
}
if board.CastlingRights&BlackKingside != 0 {
hash ^= Random64[CastleBKSHash]
}
if board.CastlingRights&BlackQueenside != 0 {
hash ^= Random64[CastleBQSHash]
}
if board.EPSquare != NoEPSquare && isValidZobristEPSq(board, board.EPSquare) {
hash ^= getEPFileHash(board.EPSquare)
}
if board.WhiteToMove {
hash ^= Random64[SideToMove]
}
return hash
}
// For our zobrist hashing algorithm, an en passant square is only
// valid and included as part of the hash, when there is a pawn of
// the opposite color that would be able to perform the en passant.
// So we have to do a little extra work to verify that's the case.
func isValidZobristEPSq(board *Board, EPsq int) bool {
epPawnPos := EPsq + 8
if !hasBitSet(board.PieceBB[PawnBB], epPawnPos) {
epPawnPos = EPsq - 8
}
pawnColor := getPieceColor(board.Pieces[epPawnPos])
enemyColor := WhiteBB
if pawnColor == WhiteBB {
enemyColor = BlackBB
}
// Take the pawn that can be captured via en passant,
// and move it one to the left and right. If the left or
// right shifted bitboards intersect with any enemy pawns,
// then it can be captured and the en passant square is
// valid for zobrist hashing.
epPawnBB := setSingleBit(epPawnPos)
enemyPawns := board.PieceBB[enemyColor] & board.PieceBB[PawnBB]
pawnShiftedLeft := (epPawnBB & ClearFile[FileA]) << 1
pawnShiftedRight := (epPawnBB & ClearFile[FileH]) >> 1
return pawnShiftedLeft&enemyPawns != 0 || pawnShiftedRight&enemyPawns != 0
}
func getPieceHash(piece uint8, pos int) uint64 {
pieceType, pieceColor := GetPieceType(piece), getPieceColor(piece)
if pieceColor == WhiteBB {
return Random64[(pieceType*2+1)*64+pos]
}
return Random64[(pieceType*2)*64+pos]
}
// Use this function when debugging the engine as it checks that the piece
// given is valid.
func getPieceHashDebug(piece uint8, pos int) uint64 {
pieceType, pieceColor := GetPieceType(piece), getPieceColor(piece)
if pieceType == PawnBB && pieceColor == BlackBB {
return Random64[BlackPawn*64+pos]
} else if pieceType == PawnBB && pieceColor == WhiteBB {
return Random64[WhitePawn*64+pos]
} else if pieceType == KnightBB && pieceColor == BlackBB {
return Random64[BlackKnight*64+pos]
} else if pieceType == KnightBB && pieceColor == WhiteBB {
return Random64[WhiteKnight*64+pos]
} else if pieceType == BishopBB && pieceColor == BlackBB {
return Random64[BlackBishop*64+pos]
} else if pieceType == BishopBB && pieceColor == WhiteBB {
return Random64[WhiteBishop*64+pos]
} else if pieceType == RookBB && pieceColor == BlackBB {
return Random64[BlackRook*64+pos]
} else if pieceType == RookBB && pieceColor == WhiteBB {
return Random64[WhiteRook*64+pos]
} else if pieceType == QueenBB && pieceColor == BlackBB {
return Random64[BlackQueen*64+pos]
} else if pieceType == QueenBB && pieceColor == WhiteBB {
return Random64[WhiteQueen*64+pos]
} else if pieceType == KingBB && pieceColor == BlackBB {
return Random64[BlackKing*64+pos]
} else if pieceType == KingBB && pieceColor == WhiteBB {
return Random64[WhiteKing*64+pos]
} else {
panic("getting piece zobrist hash failed!")
}
}
func getEPFileHash(EPsq int) uint64 {
file := int(7 - ('h' - PosToCoordinate(EPsq)[0]))
return Random64[EnPassant+file]
}
// A table containing the random numbers used for zobrist hashing. Currently
// a pre-computed table is used for testing and and debugging purposes. Once
// this phase is complete, the 12*64+8+4+1 random numbers needed will be
// procedurally generated upon program startup.
var Random64 [781]uint64 = [781]uint64{
0x9D39247E33776D41, 0x2AF7398005AAA5C7, 0x44DB015024623547, 0x9C15F73E62A76AE2,
0x75834465489C0C89, 0x3290AC3A203001BF, 0x0FBBAD1F61042279, 0xE83A908FF2FB60CA,
0x0D7E765D58755C10, 0x1A083822CEAFE02D, 0x9605D5F0E25EC3B0, 0xD021FF5CD13A2ED5,
0x40BDF15D4A672E32, 0x011355146FD56395, 0x5DB4832046F3D9E5, 0x239F8B2D7FF719CC,
0x05D1A1AE85B49AA1, 0x679F848F6E8FC971, 0x7449BBFF801FED0B, 0x7D11CDB1C3B7ADF0,
0x82C7709E781EB7CC, 0xF3218F1C9510786C, 0x331478F3AF51BBE6, 0x4BB38DE5E7219443,
0xAA649C6EBCFD50FC, 0x8DBD98A352AFD40B, 0x87D2074B81D79217, 0x19F3C751D3E92AE1,
0xB4AB30F062B19ABF, 0x7B0500AC42047AC4, 0xC9452CA81A09D85D, 0x24AA6C514DA27500,
0x4C9F34427501B447, 0x14A68FD73C910841, 0xA71B9B83461CBD93, 0x03488B95B0F1850F,
0x637B2B34FF93C040, 0x09D1BC9A3DD90A94, 0x3575668334A1DD3B, 0x735E2B97A4C45A23,
0x18727070F1BD400B, 0x1FCBACD259BF02E7, 0xD310A7C2CE9B6555, 0xBF983FE0FE5D8244,
0x9F74D14F7454A824, 0x51EBDC4AB9BA3035, 0x5C82C505DB9AB0FA, 0xFCF7FE8A3430B241,
0x3253A729B9BA3DDE, 0x8C74C368081B3075, 0xB9BC6C87167C33E7, 0x7EF48F2B83024E20,
0x11D505D4C351BD7F, 0x6568FCA92C76A243, 0x4DE0B0F40F32A7B8, 0x96D693460CC37E5D,
0x42E240CB63689F2F, 0x6D2BDCDAE2919661, 0x42880B0236E4D951, 0x5F0F4A5898171BB6,
0x39F890F579F92F88, 0x93C5B5F47356388B, 0x63DC359D8D231B78, 0xEC16CA8AEA98AD76,
0x5355F900C2A82DC7, 0x07FB9F855A997142, 0x5093417AA8A7ED5E, 0x7BCBC38DA25A7F3C,
0x19FC8A768CF4B6D4, 0x637A7780DECFC0D9, 0x8249A47AEE0E41F7, 0x79AD695501E7D1E8,
0x14ACBAF4777D5776, 0xF145B6BECCDEA195, 0xDABF2AC8201752FC, 0x24C3C94DF9C8D3F6,
0xBB6E2924F03912EA, 0x0CE26C0B95C980D9, 0xA49CD132BFBF7CC4, 0xE99D662AF4243939,
0x27E6AD7891165C3F, 0x8535F040B9744FF1, 0x54B3F4FA5F40D873, 0x72B12C32127FED2B,
0xEE954D3C7B411F47, 0x9A85AC909A24EAA1, 0x70AC4CD9F04F21F5, 0xF9B89D3E99A075C2,
0x87B3E2B2B5C907B1, 0xA366E5B8C54F48B8, 0xAE4A9346CC3F7CF2, 0x1920C04D47267BBD,
0x87BF02C6B49E2AE9, 0x092237AC237F3859, 0xFF07F64EF8ED14D0, 0x8DE8DCA9F03CC54E,
0x9C1633264DB49C89, 0xB3F22C3D0B0B38ED, 0x390E5FB44D01144B, 0x5BFEA5B4712768E9,
0x1E1032911FA78984, 0x9A74ACB964E78CB3, 0x4F80F7A035DAFB04, 0x6304D09A0B3738C4,
0x2171E64683023A08, 0x5B9B63EB9CEFF80C, 0x506AACF489889342, 0x1881AFC9A3A701D6,
0x6503080440750644, 0xDFD395339CDBF4A7, 0xEF927DBCF00C20F2, 0x7B32F7D1E03680EC,
0xB9FD7620E7316243, 0x05A7E8A57DB91B77, 0xB5889C6E15630A75, 0x4A750A09CE9573F7,
0xCF464CEC899A2F8A, 0xF538639CE705B824, 0x3C79A0FF5580EF7F, 0xEDE6C87F8477609D,
0x799E81F05BC93F31, 0x86536B8CF3428A8C, 0x97D7374C60087B73, 0xA246637CFF328532,
0x043FCAE60CC0EBA0, 0x920E449535DD359E, 0x70EB093B15B290CC, 0x73A1921916591CBD,
0x56436C9FE1A1AA8D, 0xEFAC4B70633B8F81, 0xBB215798D45DF7AF, 0x45F20042F24F1768,
0x930F80F4E8EB7462, 0xFF6712FFCFD75EA1, 0xAE623FD67468AA70, 0xDD2C5BC84BC8D8FC,
0x7EED120D54CF2DD9, 0x22FE545401165F1C, 0xC91800E98FB99929, 0x808BD68E6AC10365,
0xDEC468145B7605F6, 0x1BEDE3A3AEF53302, 0x43539603D6C55602, 0xAA969B5C691CCB7A,
0xA87832D392EFEE56, 0x65942C7B3C7E11AE, 0xDED2D633CAD004F6, 0x21F08570F420E565,
0xB415938D7DA94E3C, 0x91B859E59ECB6350, 0x10CFF333E0ED804A, 0x28AED140BE0BB7DD,
0xC5CC1D89724FA456, 0x5648F680F11A2741, 0x2D255069F0B7DAB3, 0x9BC5A38EF729ABD4,
0xEF2F054308F6A2BC, 0xAF2042F5CC5C2858, 0x480412BAB7F5BE2A, 0xAEF3AF4A563DFE43,
0x19AFE59AE451497F, 0x52593803DFF1E840, 0xF4F076E65F2CE6F0, 0x11379625747D5AF3,
0xBCE5D2248682C115, 0x9DA4243DE836994F, 0x066F70B33FE09017, 0x4DC4DE189B671A1C,
0x51039AB7712457C3, 0xC07A3F80C31FB4B4, 0xB46EE9C5E64A6E7C, 0xB3819A42ABE61C87,
0x21A007933A522A20, 0x2DF16F761598AA4F, 0x763C4A1371B368FD, 0xF793C46702E086A0,
0xD7288E012AEB8D31, 0xDE336A2A4BC1C44B, 0x0BF692B38D079F23, 0x2C604A7A177326B3,
0x4850E73E03EB6064, 0xCFC447F1E53C8E1B, 0xB05CA3F564268D99, 0x9AE182C8BC9474E8,
0xA4FC4BD4FC5558CA, 0xE755178D58FC4E76, 0x69B97DB1A4C03DFE, 0xF9B5B7C4ACC67C96,
0xFC6A82D64B8655FB, 0x9C684CB6C4D24417, 0x8EC97D2917456ED0, 0x6703DF9D2924E97E,
0xC547F57E42A7444E, 0x78E37644E7CAD29E, 0xFE9A44E9362F05FA, 0x08BD35CC38336615,
0x9315E5EB3A129ACE, 0x94061B871E04DF75, 0xDF1D9F9D784BA010, 0x3BBA57B68871B59D,
0xD2B7ADEEDED1F73F, 0xF7A255D83BC373F8, 0xD7F4F2448C0CEB81, 0xD95BE88CD210FFA7,
0x336F52F8FF4728E7, 0xA74049DAC312AC71, 0xA2F61BB6E437FDB5, 0x4F2A5CB07F6A35B3,
0x87D380BDA5BF7859, 0x16B9F7E06C453A21, 0x7BA2484C8A0FD54E, 0xF3A678CAD9A2E38C,
0x39B0BF7DDE437BA2, 0xFCAF55C1BF8A4424, 0x18FCF680573FA594, 0x4C0563B89F495AC3,
0x40E087931A00930D, 0x8CFFA9412EB642C1, 0x68CA39053261169F, 0x7A1EE967D27579E2,
0x9D1D60E5076F5B6F, 0x3810E399B6F65BA2, 0x32095B6D4AB5F9B1, 0x35CAB62109DD038A,
0xA90B24499FCFAFB1, 0x77A225A07CC2C6BD, 0x513E5E634C70E331, 0x4361C0CA3F692F12,
0xD941ACA44B20A45B, 0x528F7C8602C5807B, 0x52AB92BEB9613989, 0x9D1DFA2EFC557F73,
0x722FF175F572C348, 0x1D1260A51107FE97, 0x7A249A57EC0C9BA2, 0x04208FE9E8F7F2D6,
0x5A110C6058B920A0, 0x0CD9A497658A5698, 0x56FD23C8F9715A4C, 0x284C847B9D887AAE,
0x04FEABFBBDB619CB, 0x742E1E651C60BA83, 0x9A9632E65904AD3C, 0x881B82A13B51B9E2,
0x506E6744CD974924, 0xB0183DB56FFC6A79, 0x0ED9B915C66ED37E, 0x5E11E86D5873D484,
0xF678647E3519AC6E, 0x1B85D488D0F20CC5, 0xDAB9FE6525D89021, 0x0D151D86ADB73615,
0xA865A54EDCC0F019, 0x93C42566AEF98FFB, 0x99E7AFEABE000731, 0x48CBFF086DDF285A,
0x7F9B6AF1EBF78BAF, 0x58627E1A149BBA21, 0x2CD16E2ABD791E33, 0xD363EFF5F0977996,
0x0CE2A38C344A6EED, 0x1A804AADB9CFA741, 0x907F30421D78C5DE, 0x501F65EDB3034D07,
0x37624AE5A48FA6E9, 0x957BAF61700CFF4E, 0x3A6C27934E31188A, 0xD49503536ABCA345,
0x088E049589C432E0, 0xF943AEE7FEBF21B8, 0x6C3B8E3E336139D3, 0x364F6FFA464EE52E,
0xD60F6DCEDC314222, 0x56963B0DCA418FC0, 0x16F50EDF91E513AF, 0xEF1955914B609F93,
0x565601C0364E3228, 0xECB53939887E8175, 0xBAC7A9A18531294B, 0xB344C470397BBA52,
0x65D34954DAF3CEBD, 0xB4B81B3FA97511E2, 0xB422061193D6F6A7, 0x071582401C38434D,
0x7A13F18BBEDC4FF5, 0xBC4097B116C524D2, 0x59B97885E2F2EA28, 0x99170A5DC3115544,
0x6F423357E7C6A9F9, 0x325928EE6E6F8794, 0xD0E4366228B03343, 0x565C31F7DE89EA27,
0x30F5611484119414, 0xD873DB391292ED4F, 0x7BD94E1D8E17DEBC, 0xC7D9F16864A76E94,
0x947AE053EE56E63C, 0xC8C93882F9475F5F, 0x3A9BF55BA91F81CA, 0xD9A11FBB3D9808E4,
0x0FD22063EDC29FCA, 0xB3F256D8ACA0B0B9, 0xB03031A8B4516E84, 0x35DD37D5871448AF,
0xE9F6082B05542E4E, 0xEBFAFA33D7254B59, 0x9255ABB50D532280, 0xB9AB4CE57F2D34F3,
0x693501D628297551, 0xC62C58F97DD949BF, 0xCD454F8F19C5126A, 0xBBE83F4ECC2BDECB,
0xDC842B7E2819E230, 0xBA89142E007503B8, 0xA3BC941D0A5061CB, 0xE9F6760E32CD8021,
0x09C7E552BC76492F, 0x852F54934DA55CC9, 0x8107FCCF064FCF56, 0x098954D51FFF6580,
0x23B70EDB1955C4BF, 0xC330DE426430F69D, 0x4715ED43E8A45C0A, 0xA8D7E4DAB780A08D,
0x0572B974F03CE0BB, 0xB57D2E985E1419C7, 0xE8D9ECBE2CF3D73F, 0x2FE4B17170E59750,
0x11317BA87905E790, 0x7FBF21EC8A1F45EC, 0x1725CABFCB045B00, 0x964E915CD5E2B207,
0x3E2B8BCBF016D66D, 0xBE7444E39328A0AC, 0xF85B2B4FBCDE44B7, 0x49353FEA39BA63B1,
0x1DD01AAFCD53486A, 0x1FCA8A92FD719F85, 0xFC7C95D827357AFA, 0x18A6A990C8B35EBD,
0xCCCB7005C6B9C28D, 0x3BDBB92C43B17F26, 0xAA70B5B4F89695A2, 0xE94C39A54A98307F,
0xB7A0B174CFF6F36E, 0xD4DBA84729AF48AD, 0x2E18BC1AD9704A68, 0x2DE0966DAF2F8B1C,
0xB9C11D5B1E43A07E, 0x64972D68DEE33360, 0x94628D38D0C20584, 0xDBC0D2B6AB90A559,
0xD2733C4335C6A72F, 0x7E75D99D94A70F4D, 0x6CED1983376FA72B, 0x97FCAACBF030BC24,
0x7B77497B32503B12, 0x8547EDDFB81CCB94, 0x79999CDFF70902CB, 0xCFFE1939438E9B24,
0x829626E3892D95D7, 0x92FAE24291F2B3F1, 0x63E22C147B9C3403, 0xC678B6D860284A1C,
0x5873888850659AE7, 0x0981DCD296A8736D, 0x9F65789A6509A440, 0x9FF38FED72E9052F,
0xE479EE5B9930578C, 0xE7F28ECD2D49EECD, 0x56C074A581EA17FE, 0x5544F7D774B14AEF,
0x7B3F0195FC6F290F, 0x12153635B2C0CF57, 0x7F5126DBBA5E0CA7, 0x7A76956C3EAFB413,
0x3D5774A11D31AB39, 0x8A1B083821F40CB4, 0x7B4A38E32537DF62, 0x950113646D1D6E03,
0x4DA8979A0041E8A9, 0x3BC36E078F7515D7, 0x5D0A12F27AD310D1, 0x7F9D1A2E1EBE1327,
0xDA3A361B1C5157B1, 0xDCDD7D20903D0C25, 0x36833336D068F707, 0xCE68341F79893389,
0xAB9090168DD05F34, 0x43954B3252DC25E5, 0xB438C2B67F98E5E9, 0x10DCD78E3851A492,
0xDBC27AB5447822BF, 0x9B3CDB65F82CA382, 0xB67B7896167B4C84, 0xBFCED1B0048EAC50,
0xA9119B60369FFEBD, 0x1FFF7AC80904BF45, 0xAC12FB171817EEE7, 0xAF08DA9177DDA93D,
0x1B0CAB936E65C744, 0xB559EB1D04E5E932, 0xC37B45B3F8D6F2BA, 0xC3A9DC228CAAC9E9,
0xF3B8B6675A6507FF, 0x9FC477DE4ED681DA, 0x67378D8ECCEF96CB, 0x6DD856D94D259236,
0xA319CE15B0B4DB31, 0x073973751F12DD5E, 0x8A8E849EB32781A5, 0xE1925C71285279F5,
0x74C04BF1790C0EFE, 0x4DDA48153C94938A, 0x9D266D6A1CC0542C, 0x7440FB816508C4FE,
0x13328503DF48229F, 0xD6BF7BAEE43CAC40, 0x4838D65F6EF6748F, 0x1E152328F3318DEA,
0x8F8419A348F296BF, 0x72C8834A5957B511, 0xD7A023A73260B45C, 0x94EBC8ABCFB56DAE,
0x9FC10D0F989993E0, 0xDE68A2355B93CAE6, 0xA44CFE79AE538BBE, 0x9D1D84FCCE371425,
0x51D2B1AB2DDFB636, 0x2FD7E4B9E72CD38C, 0x65CA5B96B7552210, 0xDD69A0D8AB3B546D,
0x604D51B25FBF70E2, 0x73AA8A564FB7AC9E, 0x1A8C1E992B941148, 0xAAC40A2703D9BEA0,
0x764DBEAE7FA4F3A6, 0x1E99B96E70A9BE8B, 0x2C5E9DEB57EF4743, 0x3A938FEE32D29981,
0x26E6DB8FFDF5ADFE, 0x469356C504EC9F9D, 0xC8763C5B08D1908C, 0x3F6C6AF859D80055,
0x7F7CC39420A3A545, 0x9BFB227EBDF4C5CE, 0x89039D79D6FC5C5C, 0x8FE88B57305E2AB6,
0xA09E8C8C35AB96DE, 0xFA7E393983325753, 0xD6B6D0ECC617C699, 0xDFEA21EA9E7557E3,
0xB67C1FA481680AF8, 0xCA1E3785A9E724E5, 0x1CFC8BED0D681639, 0xD18D8549D140CAEA,
0x4ED0FE7E9DC91335, 0xE4DBF0634473F5D2, 0x1761F93A44D5AEFE, 0x53898E4C3910DA55,
0x734DE8181F6EC39A, 0x2680B122BAA28D97, 0x298AF231C85BAFAB, 0x7983EED3740847D5,
0x66C1A2A1A60CD889, 0x9E17E49642A3E4C1, 0xEDB454E7BADC0805, 0x50B704CAB602C329,
0x4CC317FB9CDDD023, 0x66B4835D9EAFEA22, 0x219B97E26FFC81BD, 0x261E4E4C0A333A9D,
0x1FE2CCA76517DB90, 0xD7504DFA8816EDBB, 0xB9571FA04DC089C8, 0x1DDC0325259B27DE,
0xCF3F4688801EB9AA, 0xF4F5D05C10CAB243, 0x38B6525C21A42B0E, 0x36F60E2BA4FA6800,
0xEB3593803173E0CE, 0x9C4CD6257C5A3603, 0xAF0C317D32ADAA8A, 0x258E5A80C7204C4B,
0x8B889D624D44885D, 0xF4D14597E660F855, 0xD4347F66EC8941C3, 0xE699ED85B0DFB40D,
0x2472F6207C2D0484, 0xC2A1E7B5B459AEB5, 0xAB4F6451CC1D45EC, 0x63767572AE3D6174,
0xA59E0BD101731A28, 0x116D0016CB948F09, 0x2CF9C8CA052F6E9F, 0x0B090A7560A968E3,
0xABEEDDB2DDE06FF1, 0x58EFC10B06A2068D, 0xC6E57A78FBD986E0, 0x2EAB8CA63CE802D7,
0x14A195640116F336, 0x7C0828DD624EC390, 0xD74BBE77E6116AC7, 0x804456AF10F5FB53,
0xEBE9EA2ADF4321C7, 0x03219A39EE587A30, 0x49787FEF17AF9924, 0xA1E9300CD8520548,
0x5B45E522E4B1B4EF, 0xB49C3B3995091A36, 0xD4490AD526F14431, 0x12A8F216AF9418C2,
0x001F837CC7350524, 0x1877B51E57A764D5, 0xA2853B80F17F58EE, 0x993E1DE72D36D310,
0xB3598080CE64A656, 0x252F59CF0D9F04BB, 0xD23C8E176D113600, 0x1BDA0492E7E4586E,
0x21E0BD5026C619BF, 0x3B097ADAF088F94E, 0x8D14DEDB30BE846E, 0xF95CFFA23AF5F6F4,
0x3871700761B3F743, 0xCA672B91E9E4FA16, 0x64C8E531BFF53B55, 0x241260ED4AD1E87D,
0x106C09B972D2E822, 0x7FBA195410E5CA30, 0x7884D9BC6CB569D8, 0x0647DFEDCD894A29,
0x63573FF03E224774, 0x4FC8E9560F91B123, 0x1DB956E450275779, 0xB8D91274B9E9D4FB,
0xA2EBEE47E2FBFCE1, 0xD9F1F30CCD97FB09, 0xEFED53D75FD64E6B, 0x2E6D02C36017F67F,
0xA9AA4D20DB084E9B, 0xB64BE8D8B25396C1, 0x70CB6AF7C2D5BCF0, 0x98F076A4F7A2322E,
0xBF84470805E69B5F, 0x94C3251F06F90CF3, 0x3E003E616A6591E9, 0xB925A6CD0421AFF3,
0x61BDD1307C66E300, 0xBF8D5108E27E0D48, 0x240AB57A8B888B20, 0xFC87614BAF287E07,
0xEF02CDD06FFDB432, 0xA1082C0466DF6C0A, 0x8215E577001332C8, 0xD39BB9C3A48DB6CF,
0x2738259634305C14, 0x61CF4F94C97DF93D, 0x1B6BACA2AE4E125B, 0x758F450C88572E0B,
0x959F587D507A8359, 0xB063E962E045F54D, 0x60E8ED72C0DFF5D1, 0x7B64978555326F9F,
0xFD080D236DA814BA, 0x8C90FD9B083F4558, 0x106F72FE81E2C590, 0x7976033A39F7D952,
0xA4EC0132764CA04B, 0x733EA705FAE4FA77, 0xB4D8F77BC3E56167, 0x9E21F4F903B33FD9,
0x9D765E419FB69F6D, 0xD30C088BA61EA5EF, 0x5D94337FBFAF7F5B, 0x1A4E4822EB4D7A59,
0x6FFE73E81B637FB3, 0xDDF957BC36D8B9CA, 0x64D0E29EEA8838B3, 0x08DD9BDFD96B9F63,
0x087E79E5A57D1D13, 0xE328E230E3E2B3FB, 0x1C2559E30F0946BE, 0x720BF5F26F4D2EAA,
0xB0774D261CC609DB, 0x443F64EC5A371195, 0x4112CF68649A260E, 0xD813F2FAB7F5C5CA,
0x660D3257380841EE, 0x59AC2C7873F910A3, 0xE846963877671A17, 0x93B633ABFA3469F8,
0xC0C0F5A60EF4CDCF, 0xCAF21ECD4377B28C, 0x57277707199B8175, 0x506C11B9D90E8B1D,
0xD83CC2687A19255F, 0x4A29C6465A314CD1, 0xED2DF21216235097, 0xB5635C95FF7296E2,
0x22AF003AB672E811, 0x52E762596BF68235, 0x9AEBA33AC6ECC6B0, 0x944F6DE09134DFB6,
0x6C47BEC883A7DE39, 0x6AD047C430A12104, 0xA5B1CFDBA0AB4067, 0x7C45D833AFF07862,
0x5092EF950A16DA0B, 0x9338E69C052B8E7B, 0x455A4B4CFE30E3F5, 0x6B02E63195AD0CF8,
0x6B17B224BAD6BF27, 0xD1E0CCD25BB9C169, 0xDE0C89A556B9AE70, 0x50065E535A213CF6,
0x9C1169FA2777B874, 0x78EDEFD694AF1EED, 0x6DC93D9526A50E68, 0xEE97F453F06791ED,
0x32AB0EDB696703D3, 0x3A6853C7E70757A7, 0x31865CED6120F37D, 0x67FEF95D92607890,
0x1F2B1D1F15F6DC9C, 0xB69E38A8965C6B65, 0xAA9119FF184CCCF4, 0xF43C732873F24C13,
0xFB4A3D794A9A80D2, 0x3550C2321FD6109C, 0x371F77E76BB8417E, 0x6BFA9AAE5EC05779,
0xCD04F3FF001A4778, 0xE3273522064480CA, 0x9F91508BFFCFC14A, 0x049A7F41061A9E60,
0xFCB6BE43A9F2FE9B, 0x08DE8A1C7797DA9B, 0x8F9887E6078735A1, 0xB5B4071DBFC73A66,
0x230E343DFBA08D33, 0x43ED7F5A0FAE657D, 0x3A88A0FBBCB05C63, 0x21874B8B4D2DBC4F,
0x1BDEA12E35F6A8C9, 0x53C065C6C8E63528, 0xE34A1D250E7A8D6B, 0xD6B04D3B7651DD7E,
0x5E90277E7CB39E2D, 0x2C046F22062DC67D, 0xB10BB459132D0A26, 0x3FA9DDFB67E2F199,
0x0E09B88E1914F7AF, 0x10E8B35AF3EEAB37, 0x9EEDECA8E272B933, 0xD4C718BC4AE8AE5F,
0x81536D601170FC20, 0x91B534F885818A06, 0xEC8177F83F900978, 0x190E714FADA5156E,
0xB592BF39B0364963, 0x89C350C893AE7DC1, 0xAC042E70F8B383F2, 0xB49B52E587A1EE60,
0xFB152FE3FF26DA89, 0x3E666E6F69AE2C15, 0x3B544EBE544C19F9, 0xE805A1E290CF2456,
0x24B33C9D7ED25117, 0xE74733427B72F0C1, 0x0A804D18B7097475, 0x57E3306D881EDB4F,
0x4AE7D6A36EB5DBCB, 0x2D8D5432157064C8, 0xD1E649DE1E7F268B, 0x8A328A1CEDFE552C,
0x07A3AEC79624C7DA, 0x84547DDC3E203C94, 0x990A98FD5071D263, 0x1A4FF12616EEFC89,
0xF6F7FD1431714200, 0x30C05B1BA332F41C, 0x8D2636B81555A786, 0x46C9FEB55D120902,
0xCCEC0A73B49C9921, 0x4E9D2827355FC492, 0x19EBB029435DCB0F, 0x4659D2B743848A2C,
0x963EF2C96B33BE31, 0x74F85198B05A2E7D, 0x5A0F544DD2B1FB18, 0x03727073C2E134B1,
0xC7F6AA2DE59AEA61, 0x352787BAA0D7C22F, 0x9853EAB63B5E0B35, 0xABBDCDD7ED5C0860,
0xCF05DAF5AC8D77B0, 0x49CAD48CEBF4A71E, 0x7A4C10EC2158C4A6, 0xD9E92AA246BF719E,
0x13AE978D09FE5557, 0x730499AF921549FF, 0x4E4B705B92903BA4, 0xFF577222C14F0A3A,
0x55B6344CF97AAFAE, 0xB862225B055B6960, 0xCAC09AFBDDD2CDB4, 0xDAF8E9829FE96B5F,
0xB5FDFC5D3132C498, 0x310CB380DB6F7503, 0xE87FBB46217A360E, 0x2102AE466EBB1148,
0xF8549E1A3AA5E00D, 0x07A69AFDCC42261A, 0xC4C118BFE78FEAAE, 0xF9F4892ED96BD438,
0x1AF3DBE25D8F45DA, 0xF5B4B0B0D2DEEEB4, 0x962ACEEFA82E1C84, 0x046E3ECAAF453CE9,
0xF05D129681949A4C, 0x964781CE734B3C84, 0x9C2ED44081CE5FBD, 0x522E23F3925E319E,
0x177E00F9FC32F791, 0x2BC60A63A6F3B3F2, 0x222BBFAE61725606, 0x486289DDCC3D6780,
0x7DC7785B8EFDFC80, 0x8AF38731C02BA980, 0x1FAB64EA29A2DDF7, 0xE4D9429322CD065A,
0x9DA058C67844F20C, 0x24C0E332B70019B0, 0x233003B5A6CFE6AD, 0xD586BD01C5C217F6,
0x5E5637885F29BC2B, 0x7EBA726D8C94094B, 0x0A56A5F0BFE39272, 0xD79476A84EE20D06,
0x9E4C1269BAA4BF37, 0x17EFEE45B0DEE640, 0x1D95B0A5FCF90BC6, 0x93CBE0B699C2585D,
0x65FA4F227A2B6D79, 0xD5F9E858292504D5, 0xC2B5A03F71471A6F, 0x59300222B4561E00,
0xCE2F8642CA0712DC, 0x7CA9723FBB2E8988, 0x2785338347F2BA08, 0xC61BB3A141E50E8C,
0x150F361DAB9DEC26, 0x9F6A419D382595F4, 0x64A53DC924FE7AC9, 0x142DE49FFF7A7C3D,
0x0C335248857FA9E7, 0x0A9C32D5EAE45305, 0xE6C42178C4BBB92E, 0x71F1CE2490D20B07,
0xF1BCC3D275AFE51A, 0xE728E8C83C334074, 0x96FBF83A12884624, 0x81A1549FD6573DA5,
0x5FA7867CAF35E149, 0x56986E2EF3ED091B, 0x917F1DD5F8886C61, 0xD20D8C88C8FFE65F,
0x31D71DCE64B2C310, 0xF165B587DF898190, 0xA57E6339DD2CF3A0, 0x1EF6E6DBB1961EC9,
0x70CC73D90BC26E24, 0xE21A6B35DF0C3AD7, 0x003A93D8B2806962, 0x1C99DED33CB890A1,
0xCF3145DE0ADD4289, 0xD0E4427A5514FB72, 0x77C621CC9FB3A483, 0x67A34DAC4356550B,
0xF8D626AAAF278509,
} | core/zobrist.go | 0.744099 | 0.472623 | zobrist.go | starcoder |
package maps
import (
"constraints"
"github.com/dairaga/gs"
"github.com/dairaga/gs/funcs"
"github.com/dairaga/gs/slices"
)
type M[K comparable, V any] map[K]V
// Pair is a 2 dimensional tuple contains key and value in map.
type Pair[K comparable, V any] struct {
_ struct{}
Key K
Value V
}
// P builds a pair from given key k, and value v.
func P[K comparable, V any](k K, v V) Pair[K, V] {
return Pair[K, V]{
Key: k,
Value: v,
}
}
// Form builds a map from pairs.
func From[K comparable, V any](a ...Pair[K, V]) (ret M[K, V]) {
ret = make(M[K, V], len(a))
for i := range a {
ret[a[i].Key] = a[i].Value
}
return ret
}
// Zip combines given two slices into a map.
func Zip[K comparable, V any](a slices.S[K], b slices.S[V]) (ret M[K, V]) {
size := funcs.Min(len(a), len(b))
ret = make(M[K, V], size)
for i := 0; i < size; i++ {
ret[a[i]] = b[i]
}
return ret
}
// -----------------------------------------------------------------------------
// TODO: refactor following functions to methods when go 1.19 releases.
// Fold applies given function op to a start value z and all elements of this map.
func Fold[K comparable, V, U any](m M[K, V], z U, op func(U, K, V) U) (ret U) {
ret = z
for k, v := range m {
ret = op(ret, k, v)
}
return
}
// Collect returns a new slice by applying a partial function p to all elements of map m on which it is defined. It might return different results for different runs.
func Collect[K comparable, V, T any](m M[K, V], p func(K, V) (T, bool)) slices.S[T] {
return Fold(
m,
slices.Empty[T](),
func(z slices.S[T], k K, v V) slices.S[T] {
if val, ok := p(k, v); ok {
return append(z, val)
}
return z
},
)
}
// CollectMap returns a new map by applying a partial function p to all elements of map m on which it is defined.
func CollectMap[K1, K2 comparable, V1, V2 any](m M[K1, V1], p func(K1, V1) (K2, V2, bool)) M[K2, V2] {
return Fold(
m,
make(M[K2, V2]),
func(z M[K2, V2], k K1, v V1) M[K2, V2] {
if k2, v2, ok := p(k, v); ok {
z[k2] = v2
}
return z
},
)
}
// FlatMapSlice returns a new slices by applying given function op to all elements of map m and merge results.
func FlatMapSlice[K comparable, V any, T any](m M[K, V], op func(K, V) slices.S[T]) slices.S[T] {
return Fold(
m,
slices.Empty[T](),
func(z slices.S[T], k K, v V) slices.S[T] {
return append(z, op(k, v)...)
},
)
}
// FlatMap returns a new map by applying given function op to all elements of map m and merge results.
func FlatMap[K1, K2 comparable, V1, V2 any](m M[K1, V1], op func(K1, V1) M[K2, V2]) M[K2, V2] {
return Fold(
m,
make(M[K2, V2]),
func(z M[K2, V2], k K1, v V1) M[K2, V2] {
return z.Merge(op(k, v))
},
)
}
// MapSlice retuns a new slice by applying given function op to all elements of map m.
func MapSlice[K comparable, V, T any](m M[K, V], op func(K, V) T) slices.S[T] {
return Fold(
m,
make(slices.S[T], 0, len(m)),
func(z slices.S[T], k K, v V) slices.S[T] {
return append(z, op(k, v))
},
)
}
// Map retuns a new map by applying given function op to all elements of map m.
func Map[K1, K2 comparable, V1, V2 any](m M[K1, V1], op func(K1, V1) (K2, V2)) M[K2, V2] {
return Fold(
m,
make(M[K2, V2]),
func(z M[K2, V2], k K1, v V1) M[K2, V2] {
return z.Put(op(k, v))
},
)
}
// GroupMap partitions map m into a map of maps according to a discriminator function key. Each element in a group is transformed into a value of type V2 using function val.
func GroupMap[K1, K2 comparable, V1, V2 any](m M[K1, V1], key func(K1, V1) K2, val func(K1, V1) V2) M[K2, slices.S[V2]] {
return Fold(
m,
make(M[K2, slices.S[V2]]),
func(z M[K2, slices.S[V2]], k K1, v V1) M[K2, slices.S[V2]] {
k2 := key(k, v)
v2 := val(k, v)
z[k2] = append(z[k2], v2)
return z
},
)
}
// GroupBy partitions map m into a map of maps according to some discriminator function.
func GroupBy[K, K1 comparable, V any](m M[K, V], key func(K, V) K1) M[K1, M[K, V]] {
return Fold(
m,
make(M[K1, M[K, V]]),
func(z M[K1, M[K, V]], k K, v V) M[K1, M[K, V]] {
k2 := key(k, v)
m2, ok := z[k2]
if !ok {
m2 = make(M[K, V])
}
m2[k] = v
z[k2] = m2
return z
},
)
}
// GroupMapReduce partitions map m into a map according to a discriminator function key. All the values that have the same discriminator are then transformed by the function val and then reduced into a single value with the reduce function op.
func GroupMapReduce[K1, K2 comparable, V1, V2 any](m M[K1, V1], key func(K1, V1) K2, val func(K1, V1) V2, op func(V2, V2) V2) M[K2, V2] {
return Fold(
GroupMap(m, key, val),
make(M[K2, V2]),
func(z M[K2, V2], k K2, v slices.S[V2]) M[K2, V2] {
z[k] = v.Reduce(op).Get()
return z
},
)
}
// PartitionMap applies given function op to each element of the map and returns a pair of maps: the first one made of those values returned by f that were wrapped in scala.util.Left, and the second one made of those wrapped in scala.util.Right.
func PartitionMap[K comparable, V, A, B any](m M[K, V], op func(K, V) gs.Either[A, B]) (M[K, A], M[K, B]) {
t2 := Fold(
m,
gs.T2(make(M[K, A]), make(M[K, B])),
func(z gs.Tuple2[M[K, A], M[K, B]], k K, v V) gs.Tuple2[M[K, A], M[K, B]] {
e := op(k, v)
if e.IsRight() {
z.V2[k] = e.Right()
} else {
z.V1[k] = e.Left()
}
return z
},
)
return t2.V1, t2.V2
}
// MaxBy returns a maximum pair of key and value according to result of ordering function op.
func MaxBy[K comparable, V any, B constraints.Ordered](m M[K, V], op func(K, V) B) gs.Option[Pair[K, V]] {
return slices.MaxBy(
m.Slice(),
func(pair Pair[K, V]) B {
return op(pair.Key, pair.Value)
},
)
}
// MinBy returns a minimum pair of key and value according to result of ordering function op.
func MinBy[K comparable, V any, B constraints.Ordered](m M[K, V], op func(K, V) B) gs.Option[Pair[K, V]] {
return slices.MinBy(
m.Slice(),
func(pair Pair[K, V]) B {
return op(pair.Key, pair.Value)
},
)
} | maps/maps.go | 0.648132 | 0.594669 | maps.go | starcoder |
package main
import (
"math"
"math/rand"
)
// ReflectDir gets a new direction reflected around normal.
func ReflectionDir(incident, surfaceNormal V3) V3 {
IdotN := incident.Dot(surfaceNormal)
return incident.Sub(surfaceNormal.Mul(2 * IdotN))
}
// RefractionDir gets a new direction based on refraction.
func RefractionDir(incident, surfaceNormal V3, eta1, eta2 Float) V3 {
// TODO: get this crap about incident directions figured out once
// and for all
// This works.
// wikipedia assumes incident ray is from cam to point,
// PBR assumes incident ray is from point to cam.
r := eta1 / eta2
c := incident.Dot(surfaceNormal)
if c > 0 {
// in-out
surfaceNormal = surfaceNormal.Mul(-1)
}
rightside := r*c - Float(math.Sqrt(float64(1-(r*r)*(1-c*c))))
return incident.Mul(-r).Add(surfaceNormal.Mul(rightside))
}
// Schlick gets the reflectance coefficient (R) using the schlick approximation
// of the fresnel terms.
func Schlick(incident, surfaceNormal V3, eta1, eta2 Float) Float {
cosTheta := float64(incident.Dot(surfaceNormal))
if eta1 > eta2 && math.Acos(cosTheta) >= math.Asin(eta2/eta1) {
// at or beyond critical angle for total internal reflection
return 1
}
r0 := Float(math.Pow(float64((eta1-eta2)/(eta1+eta2)), 2))
costerm := 1 - Float(math.Abs(cosTheta)) // (1 - (-I dot N))
R := r0 + (1-r0)*(costerm*costerm*costerm*costerm*costerm)
if R < 0 || R > 1 {
r0 = 10
}
return R
}
// randHemi produces a uniform random direction on the hemisphere around normal.
// NOTE: createBasis() may not be robust.
func randHemi(normal V3, rng *rand.Rand) V3 {
b := createBasis(normal)
z := rng.Float64()
r := math.Sqrt(1.0 - z*z)
phi := rng.Float64() * 2 * math.Pi
y, x := math.Sincos(phi)
x *= r
y *= r
return b[0].Mul(x).Add(b[1].Mul(y).Add(b[2].Mul(z)))
}
// randSphere produces a uniform random direction on a sphere.
func randSphere(rng *rand.Rand) V3 {
z := rng.Float64()*2 - 1 // [-1,1)
r := math.Sqrt(1.0 - z*z)
phi := rng.Float64() * 2 * math.Pi
y, x := math.Sincos(phi)
x *= r
y *= r
return V3{Float(x), Float(y), Float(z)}
}
// RandomBounceHemisphere gets a
// DEPRECATED
func RandomBounceHemisphere(normal V3, rng *rand.Rand) V3 {
rval := V3{
Float(rng.Float64())*2 - 1,
Float(rng.Float64())*2 - 1,
Float(rng.Float64())*2 - 1}.Normalize()
if normal.Dot(rval) < 0 {
rval = rval.Mul(-1)
}
return rval
}
// RandomBounceSphere gets a
// DEPRECATED
func RandomBounceSphere(rng *rand.Rand) V3 {
return V3{
Float(rng.Float64())*2 - 1,
Float(rng.Float64())*2 - 1,
Float(rng.Float64())*2 - 1}.Normalize()
} | reflect.go | 0.636014 | 0.536374 | reflect.go | starcoder |
package joejson
import (
"encoding/json"
"fmt"
)
// TypeFeature is the value for a Feature's 'type' member.
const TypeFeature string = "Feature"
// Feature represents a spatially bounded 'thing'.
type Feature struct {
// ID is an optional Feature identifier ('id').
ID any
// Properties is an optional JSON object ('properties').
Properties map[string]any
// Bbox optionally includes information on the coordinate range for the Feature Geometry.
Bbox []Position
// geometry is an unexported field representing one of
// (Point|LineString|MultiPoint|MultiLineString|Polygon|MultiPolygon|GeometryCollection).
geometry any
}
// GeometryType is the type of the Feature's Geometry.
func (f Feature) GeometryType() string {
switch f.geometry.(type) {
case Point:
return GeometryTypePoint
case MultiPoint:
return GeometryTypeMultiPoint
case LineString:
return GeometryTypeLineString
case MultiLineString:
return GeometryTypeMultiLineString
case Polygon:
return GeometryTypePolygon
case MultiPolygon:
return GeometryTypeMultiPolygon
case GeometryCollection:
return GeometryTypeGeometryCollection
default:
return ""
}
}
// WithPoint sets the Feature's Geometry to the provided Point.
func (f Feature) WithPoint(g Point) Feature {
f.geometry = g
return f
}
// AsPoint casts the Feature's Geometry to a Point.
func (f Feature) AsPoint() (Point, bool) {
p, ok := f.geometry.(Point)
return p, ok
}
// WithMultiPoint sets the Feature's Geometry to the provided MultiPoint.
func (f Feature) WithMultiPoint(g MultiPoint) Feature {
f.geometry = g
return f
}
// AsMultiPoint casts the Feature's Geometry to a MultiPoint.
func (f Feature) AsMultiPoint() (MultiPoint, bool) {
p, ok := f.geometry.(MultiPoint)
return p, ok
}
// WithLineString sets the Feature's Geometry to the provided LineString.
func (f Feature) WithLineString(g LineString) Feature {
f.geometry = g
return f
}
// AsLineString casts the Feature's Geometry to a LineString.
func (f Feature) AsLineString() (LineString, bool) {
p, ok := f.geometry.(LineString)
return p, ok
}
// WithMultiLineString sets the Feature's Geometry to the provided LineMultiString.
func (f Feature) WithMultiLineString(g MultiLineString) Feature {
f.geometry = g
return f
}
// AsMultiLineString casts the Feature's Geometry to a MultiLineString.
func (f Feature) AsMultiLineString() (MultiLineString, bool) {
p, ok := f.geometry.(MultiLineString)
return p, ok
}
// WithPolygon sets the Feature's Geometry to the provided Polygon.
func (f Feature) WithPolygon(g Polygon) Feature {
f.geometry = g
return f
}
// AsPolygon casts the Feature's Geometry to a Polygon.
func (f Feature) AsPolygon() (Polygon, bool) {
p, ok := f.geometry.(Polygon)
return p, ok
}
// WithMultiPolygon sets the Feature's Geometry to the provided MultiPolygon.
func (f Feature) WithMultiPolygon(g MultiPolygon) Feature {
f.geometry = g
return f
}
// AsMultiPolygon casts the Feature's Geometry to a MultiPolygon.
func (f Feature) AsMultiPolygon() (MultiPolygon, bool) {
p, ok := f.geometry.(MultiPolygon)
return p, ok
}
// WithGeometryCollection sets the Feature's Geometry to the provided GeometryCollection.
func (f Feature) WithGeometryCollection(g GeometryCollection) Feature {
f.geometry = g
return f
}
// AsGeometryCollection casts Feature's Geometry to a GeometryCollection.
func (f Feature) AsGeometryCollection() (GeometryCollection, bool) {
p, ok := f.geometry.(GeometryCollection)
return p, ok
}
// MarshalJSON is a custom JSON marshaller.
func (f Feature) MarshalJSON() ([]byte, error) {
switch f.ID.(type) {
case string, uint, uint8, uint16, uint32, uint64, int, int8, int16, int32, int64, float32, float64, nil:
default:
return nil, fmt.Errorf(`invalid type "%T" for id, expected string or numeric`, f.ID)
}
return json.Marshal(&struct {
ID any `json:"id,omitempty"`
Type string `json:"type"`
Geometry any `json:"geometry"`
Properties map[string]any `json:"properties,omitempty"`
BBox []Position `json:"bbox,omitempty"`
}{
f.ID,
TypeFeature,
f.geometry,
f.Properties,
f.Bbox,
})
}
// UnmarshalJSON is a custom JSON unmarshaller.
func (f *Feature) UnmarshalJSON(b []byte) error {
var tmp struct {
ID any `json:"id"`
Geometry json.RawMessage `json:"geometry"`
Properties map[string]any `json:"properties"`
Bbox []Position `json:"bbox"`
}
if err := json.Unmarshal(b, &tmp); err != nil {
return err
}
switch tmp.ID.(type) {
case string, float64, nil:
default:
return fmt.Errorf(`invalid type "%T" for id, expected string or numeric`, tmp.ID)
}
f.ID = tmp.ID
f.Properties = tmp.Properties
f.Bbox = tmp.Bbox
var err error
f.geometry, err = unmarshalGeometry(tmp.Geometry)
return err
}
func unmarshalGeometry(bs []byte) (any, error) {
var tmp struct {
Type string `json:"type"`
}
if err := json.Unmarshal(bs, &tmp); err != nil {
return nil, err
}
switch tmp.Type {
case GeometryTypePoint:
var g Point
if err := json.Unmarshal(bs, &g); err != nil {
return nil, err
}
return g, nil
case GeometryTypeMultiPoint:
var g MultiPoint
if err := json.Unmarshal(bs, &g); err != nil {
return nil, err
}
return g, nil
case GeometryTypeLineString:
var g LineString
if err := json.Unmarshal(bs, &g); err != nil {
return nil, err
}
return g, nil
case GeometryTypeMultiLineString:
var g MultiLineString
if err := json.Unmarshal(bs, &g); err != nil {
return nil, err
}
return g, nil
case GeometryTypePolygon:
var g Polygon
if err := json.Unmarshal(bs, &g); err != nil {
return nil, err
}
return g, nil
case GeometryTypeMultiPolygon:
var g MultiPolygon
if err := json.Unmarshal(bs, &g); err != nil {
return nil, err
}
return g, nil
case GeometryTypeGeometryCollection:
var g GeometryCollection
if err := json.Unmarshal(bs, &g); err != nil {
return nil, err
}
return g, nil
default:
return nil, fmt.Errorf("unknown geometry type: %q", tmp.Type)
}
} | feature.go | 0.749362 | 0.582402 | feature.go | starcoder |
package main
import (
"github.com/otyg/threagile/model"
"github.com/otyg/threagile/model/confidentiality"
)
type insecureHandlingOfSensitiveData string
var RiskRule insecureHandlingOfSensitiveData
func (r insecureHandlingOfSensitiveData) Category() model.RiskCategory {
return model.RiskCategory{
Id: "insecure-handling-of-sensitive-data",
Title: "Insecure Handling of Sensitive Data",
Description: "Sensitive data must be handled with care to avoid exposure. The processes handling the data must be sufficiently protected, this is especially important on assets storing sensitive data but even assets which only stores the data in memory must be protected.",
Impact: "Sensitive data might be exposed if stored or processed by a component not sufficiently protected",
ASVS: "[v4.0.2-V8 - Data Protection Verification Requirements](https://github.com/OWASP/ASVS/blob/v4.0.3_release/4.0/en/0x16-V8-Data-Protection.md)",
CheatSheet: "[Proactive Controls #8: protect-data-everywhere](https://cheatsheetseries.owasp.org/IndexProactiveControls.html#8-protect-data-everywhere)",
Action: "Data protection",
Mitigation: "Ensure all components has a confidentiality rating matching the data stored or processed",
Check: "Referenced ASVS chapters, cheat sheet and CWE",
Function: model.Architecture,
STRIDE: model.InformationDisclosure,
DetectionLogic: "Data assets confidentiality rating is checked against the confidentiality rating of each technical asset storing or processing the data asset.",
RiskAssessment: "Impact is based on the classification of the data asset, likelihood and breach probability is based on classification of the technical asset and if the data is stored or processed",
FalsePositives: "Technical assets processing the data can be classed as false positives after individual review if the data is transient. Typical examples are reverse proxies and other network elements.",
ModelFailurePossibleReason: true,
CWE: 200,
}
}
func (r insecureHandlingOfSensitiveData) SupportedTags() []string {
return []string{"PII"}
}
func (r insecureHandlingOfSensitiveData) GenerateRisks() []model.Risk {
risks := make([]model.Risk, 0)
for _, technicalAsset := range model.SortedTechnicalAssetsByTitle() {
if technicalAsset.Confidentiality == confidentiality.StrictlyConfidential || technicalAsset.OutOfScope {
continue
}
var exploitationLikelihood model.RiskExploitationLikelihood
var dataBreachProbability model.DataBreachProbability
storedDataAssetsAtRisk := make(map[string]bool)
switch technicalAsset.Confidentiality {
case confidentiality.Confidential:
exploitationLikelihood = model.Unlikely
dataBreachProbability = model.Improbable
case confidentiality.Restricted:
exploitationLikelihood = model.Likely
dataBreachProbability = model.Possible
case confidentiality.Internal:
exploitationLikelihood = model.VeryLikely
dataBreachProbability = model.Possible
default:
exploitationLikelihood = model.Frequent
dataBreachProbability = model.Probable
}
for _, dataAsset := range technicalAsset.DataAssetsStoredSorted() {
if technicalAsset.Confidentiality < dataAsset.Confidentiality {
var exploitationImpact model.RiskExploitationImpact
switch dataAsset.Confidentiality {
case confidentiality.Internal:
exploitationImpact = model.LowImpact
case confidentiality.Restricted:
exploitationImpact = model.MediumImpact
case confidentiality.Confidential:
exploitationImpact = model.HighImpact
case confidentiality.StrictlyConfidential:
exploitationImpact = model.VeryHighImpact
}
storedDataAssetsAtRisk[dataAsset.Id] = true
risks = append(risks, createRisk(dataAsset.Confidentiality, technicalAsset, exploitationImpact, exploitationLikelihood, dataAsset.Id, dataBreachProbability))
}
}
for _, dataAsset := range technicalAsset.DataAssetsProcessedSorted() {
_, alreadyAtRisk := storedDataAssetsAtRisk[dataAsset.Id]
if !alreadyAtRisk && technicalAsset.Confidentiality < dataAsset.Confidentiality {
var exploitationImpact model.RiskExploitationImpact
switch dataAsset.Confidentiality {
case confidentiality.Internal:
exploitationImpact = model.LowImpact
case confidentiality.Restricted:
exploitationImpact = model.MediumImpact
case confidentiality.Confidential:
exploitationImpact = model.HighImpact
case confidentiality.StrictlyConfidential:
exploitationImpact = model.VeryHighImpact
}
if exploitationLikelihood > model.Unlikely {
exploitationLikelihood = exploitationLikelihood - 1
}
if dataBreachProbability > model.Improbable {
dataBreachProbability = dataBreachProbability - 1
}
risks = append(risks, createRisk(dataAsset.Confidentiality, technicalAsset, exploitationImpact, exploitationLikelihood, dataAsset.Id, dataBreachProbability))
}
}
}
return risks
}
func createRisk(class confidentiality.Confidentiality, technicalAsset model.TechnicalAsset, impact model.RiskExploitationImpact, probability model.RiskExploitationLikelihood, mostCriticalDataId string, dataProbability model.DataBreachProbability) model.Risk {
title := "<b>Potential insecure handling of " + class.String() + " data</b> at <b>" + technicalAsset.Title + "</b>"
risk := model.Risk{
Category: RiskRule.Category(),
Severity: model.CalculateSeverity(probability, impact),
ExploitationLikelihood: probability,
ExploitationImpact: impact,
Title: title,
MostRelevantTechnicalAssetId: technicalAsset.Id,
MostRelevantDataAssetId: mostCriticalDataId,
DataBreachProbability: dataProbability,
DataBreachTechnicalAssetIDs: []string{technicalAsset.Id},
}
risk.SyntheticId = risk.Category.Id + "@" + mostCriticalDataId + "@" + technicalAsset.Id
return risk
} | risks/insecure-handling-of-sensitive-data/insecure-handling-of-sensitive-data.go | 0.763396 | 0.578418 | insecure-handling-of-sensitive-data.go | starcoder |
package neighbors
import (
"fmt"
"runtime"
"sort"
"github.com/pa-m/sklearn/base"
"github.com/pa-m/sklearn/metrics"
"gonum.org/v1/gonum/mat"
"gonum.org/v1/gonum/stat"
)
// KNeighborsRegressor is a Regression based on k-nearest neighbors.
// The target is predicted by local interpolation of the targets
// associated of the nearest neighbors in the training set.
type KNeighborsRegressor struct {
NearestNeighbors
K int
Weight string
Scale bool
Distance Distance
// Runtime members
Xscaled, Y *mat.Dense
}
// NewKNeighborsRegressor returns an initialized *KNeighborsRegressor
func NewKNeighborsRegressor(K int, Weights string) base.Regressor {
return &KNeighborsRegressor{NearestNeighbors: *NewNearestNeighbors(), K: K, Weight: Weights}
}
// Fit ...
func (m *KNeighborsRegressor) Fit(X, Y *mat.Dense) base.Transformer {
m.Xscaled = mat.DenseCopyOf(X)
m.Y = mat.DenseCopyOf(Y)
if m.Distance == nil {
m.Distance = EuclideanDistance
}
if m.K <= 0 {
panic(fmt.Errorf("K<=0"))
}
m.NearestNeighbors.Fit(X)
return m
}
// Predict ...
func (m *KNeighborsRegressor) Predict(X, Y *mat.Dense) base.Regressor {
NFitSamples, _ := m.Xscaled.Dims()
NX, _ := X.Dims()
_, outputs := m.Y.Dims()
NCPU := runtime.NumCPU()
isWeightDistance := m.Weight == "distance"
distances, indices := m.KNeighbors(X, m.K)
base.Parallelize(NCPU, NX, func(th, start, end int) {
d2 := make([]float64, NFitSamples)
idx := make([]int, NFitSamples)
weights := make([]float64, m.K)
ys := make([]float64, m.K)
epsilon := 1e-15
for ik := range weights {
weights[ik] = 1.
}
for sample := start; sample < end; sample++ {
// sort idx to get first K nearest
sort.Slice(idx, func(i, j int) bool { return d2[idx[i]] < d2[idx[j]] })
// set Y(sample,output) to weighted average of K nearest
for o := 0; o < outputs; o++ {
for ik := range ys {
ys[ik] = m.Y.At(int(indices.At(sample, ik)), o)
if isWeightDistance {
weights[ik] = 1. / (epsilon + distances.At(sample, ik))
}
}
Y.Set(sample, o, stat.Mean(ys, weights))
}
}
})
return m
}
// Transform for KNeighborsRegressor
func (m *KNeighborsRegressor) Transform(X, Y *mat.Dense) (Xout, Yout *mat.Dense) {
NSamples, NOutputs := Y.Dims()
Xout = X
Yout = mat.NewDense(NSamples, NOutputs, nil)
m.Predict(X, Yout)
return
}
// Score for KNeighborsRegressor
func (m *KNeighborsRegressor) Score(X, Y *mat.Dense) float64 {
NSamples, NOutputs := Y.Dims()
Ypred := mat.NewDense(NSamples, NOutputs, nil)
m.Predict(X, Ypred)
return metrics.R2Score(Y, Ypred, nil, "").At(0, 0)
} | neighbors/regression.go | 0.821725 | 0.451508 | regression.go | starcoder |
package tile
import (
"fmt"
"math"
)
const threeSixty float64 = 360.0
const oneEighty float64 = 180.0
const radius float64 = 6378137.0
const webMercatorLatLimit float64 = 85.05112877980659
type GenerateTilesConsumerFunc func(tile *Tile)
type GenerateTilesOptions struct {
Bounds *LngLatBbox
Zooms []uint
ConsumerFunc GenerateTilesConsumerFunc
InvertedY bool
}
//Tile struct is the main object we deal with, represents a standard X/Y/Z tile
type Tile struct {
X, Y, Z uint
}
//LngLat holds a standard geographic coordinate pair in decimal degrees
type LngLat struct {
Lng, Lat float64
}
//LngLatBbox bounding box of a tile, in decimal degrees
type LngLatBbox struct {
West, South, East, North float64
}
// Intersects returns true if this bounding box intersects with the other bounding box.
func (b *LngLatBbox) Intersects(o *LngLatBbox) bool {
latOverlaps := (o.North > b.South) && (o.South < b.North)
lngOverlaps := (o.East > b.West) && (o.West < b.East)
return latOverlaps && lngOverlaps
}
//Bbox holds Spherical Mercator bounding box of a tile
type Bbox struct {
Left, Bottom, Right, Top float64
}
//XY holds a Spherical Mercator point
type XY struct {
X, Y float64
}
func deg2rad(deg float64) float64 {
return deg * (math.Pi / oneEighty)
}
func rad2deg(rad float64) float64 {
return rad * (oneEighty / math.Pi)
}
func min(a uint, b uint) uint {
if a < b {
return a
}
return b
}
func pow(a, b int) int {
result := 1
for 0 != b {
if 0 != (b & 1) {
result *= a
}
b >>= 1
a *= a
}
return result
}
// GetTile returns a tile for a given longitude latitude and zoom level
func GetTile(lng float64, lat float64, zoom uint) *Tile {
latRad := deg2rad(lat)
n := math.Pow(2.0, float64(zoom))
x := uint(math.Floor((lng + oneEighty) / threeSixty * n))
y := uint(math.Floor((1.0 - math.Log(math.Tan(latRad)+(1.0/math.Cos(latRad)))/math.Pi) / 2.0 * n))
return &Tile{x, y, zoom}
}
func GenerateTiles(opts *GenerateTilesOptions) {
bounds := opts.Bounds
zooms := opts.Zooms
consumer := opts.ConsumerFunc
var boxes []*LngLatBbox
if bounds.West > bounds.East {
boxes = []*LngLatBbox{
&LngLatBbox{-180.0, bounds.South, bounds.East, bounds.North},
&LngLatBbox{bounds.West, bounds.South, 180.0, bounds.North},
}
} else {
boxes = []*LngLatBbox{bounds}
}
for _, box := range boxes {
// Clamp the individual boxes to web mercator limits
clampedBox := &LngLatBbox{
West: math.Max(-180.0, box.West),
South: math.Max(-webMercatorLatLimit, box.South),
East: math.Min(180.0, box.East),
North: math.Min(webMercatorLatLimit, box.North),
}
for _, z := range zooms {
ll := GetTile(clampedBox.West, clampedBox.South, z)
ur := GetTile(clampedBox.East, clampedBox.North, z)
llx := ll.X
if llx < 0 {
llx = 0
}
ury := ur.Y
if ury < 0 {
ury = 0
}
for i := llx; i < min(ur.X+1, 1<<z); i++ {
for j := ury; j < min(ll.Y+1, 1<<z); j++ {
x := i
y := j
if opts.InvertedY {
// https://gist.github.com/tmcw/4954720
y = uint(math.Pow(2.0, float64(z))) - 1 - y
}
consumer(&Tile{Z: z, X: x, Y: y})
}
}
}
}
}
// Equals compares 2 tiles
func (tile *Tile) Equals(t2 *Tile) bool {
return tile.X == t2.X && tile.Y == t2.Y && tile.Z == t2.Z
}
//Ul returns the upper left corner of the tile decimal degrees
func (tile *Tile) Ul() *LngLat {
n := math.Pow(2.0, float64(tile.Z))
lonDeg := float64(tile.X)/n*threeSixty - oneEighty
latRad := math.Atan(math.Sinh(math.Pi * float64(1-(2*float64(tile.Y)/n))))
latDeg := rad2deg(latRad)
return &LngLat{lonDeg, latDeg}
}
//Bounds returns a LngLatBbox for a given tile
func (tile *Tile) Bounds() *LngLatBbox {
a := tile.Ul()
shifted := Tile{tile.X + 1, tile.Y + 1, tile.Z}
b := shifted.Ul()
return &LngLatBbox{a.Lng, b.Lat, b.Lng, a.Lat}
}
//Parent returns the tile above (i.e. at a lower zoon number) the given tile
func (tile *Tile) Parent() *Tile {
if tile.Z == 0 && tile.X == 0 && tile.Y == 0 {
return tile
}
if math.Mod(float64(tile.X), 2) == 0 && math.Mod(float64(tile.Y), 2) == 0 {
return &Tile{tile.X / 2, tile.Y / 2, tile.Z - 1}
}
if math.Mod(float64(tile.X), 2) == 0 {
return &Tile{tile.X / 2, (tile.Y - 1) / 2, tile.Z - 1}
}
if math.Mod(float64(tile.X), 2) != 0 && math.Mod(float64(tile.Y), 2) != 0 {
return &Tile{(tile.X - 1) / 2, (tile.Y - 1) / 2, tile.Z - 1}
}
if math.Mod(float64(tile.X), 2) != 0 && math.Mod(float64(tile.Y), 2) == 0 {
return &Tile{(tile.X - 1) / 2, tile.Y / 2, tile.Z - 1}
}
return nil
}
//Children returns the 4 tiles below (i.e. at a higher zoom number) the given tile
func (tile *Tile) Children() []*Tile {
kids := []*Tile{
{tile.X * 2, tile.Y * 2, tile.Z + 1},
{tile.X*2 + 1, tile.Y * 2, tile.Z + 1},
{tile.X*2 + 1, tile.Y*2 + 1, tile.Z + 1},
{tile.X * 2, tile.Y*2 + 1, tile.Z + 1},
}
return kids
}
// ToString returns a string representation of the tile.
func (tile *Tile) ToString() string {
return fmt.Sprintf("{%d/%d/%d}", tile.Z, tile.X, tile.Y)
}
//ToXY transforms WGS84 DD to Spherical Mercator meters
func ToXY(ll *LngLat) *XY {
x := radius * deg2rad(ll.Lng)
intrx := (math.Pi * 0.25) + (0.5 * deg2rad(ll.Lat))
y := radius * math.Log(math.Tan(intrx))
return &XY{x, y}
} | tile/tile.go | 0.848659 | 0.595787 | tile.go | starcoder |
package poly
import (
"container/list"
"fmt"
"math"
)
type Monomial struct {
Coef float64
Degree int
}
type Polynomial struct {
monoms *list.List
degree int
}
func New(monoms ...Monomial) *Polynomial {
p := &Polynomial{
monoms: list.New(),
}
for _, m := range monoms {
if m.Coef == 0 {
continue
}
p.addMonom(m)
}
return p
}
var (
Zero = New()
One = New(Monomial{Coef: 1, Degree: 0})
)
func (p *Polynomial) addMonom(monom Monomial) {
for it := p.monoms.Front(); it != nil; it = it.Next() {
m := it.Value.(Monomial)
switch {
case monom.Degree > m.Degree:
continue
case monom.Degree < m.Degree:
p.monoms.InsertBefore(monom, it)
case monom.Degree == m.Degree:
coef := m.Coef + monom.Coef
if coef == 0 {
p.monoms.Remove(it)
} else {
it.Value = Monomial{
Coef: coef,
Degree: monom.Degree,
}
}
p.degree = p.monoms.Back().Value.(Monomial).Degree
}
return
}
p.monoms.PushBack(monom)
p.degree = monom.Degree
}
func (p *Polynomial) subMonom(monom Monomial) {
p.addMonom(Monomial{
Coef: -monom.Coef,
Degree: monom.Degree,
})
}
func (p *Polynomial) AddMonom(monom Monomial) *Polynomial {
o := New()
if p.monoms.Len() == 0 && monom.Coef == 0 {
return o
}
for it := p.monoms.Front(); it != nil; it = it.Next() {
m := it.Value.(Monomial)
coef := m.Coef + monom.Coef
if coef == 0 {
continue
}
o.monoms.PushBack(Monomial{
Coef: coef,
Degree: m.Degree,
})
}
if m := o.monoms.Back(); m != nil {
o.degree = m.Value.(Monomial).Degree
}
return o
}
func (p *Polynomial) SubMonom(monom Monomial) *Polynomial {
return p.AddMonom(Monomial{
Coef: -monom.Coef,
Degree: monom.Degree,
})
}
func (p *Polynomial) MulMonom(monom Monomial) *Polynomial {
o := New()
if monom.Coef == 0 || p.monoms.Len() == 0 {
return o
}
for it := p.monoms.Front(); it != nil; it = it.Next() {
m := it.Value.(Monomial)
degree := m.Degree + monom.Degree
if o.monoms.Len() == 0 || degree > o.degree {
o.degree = degree
}
o.monoms.PushBack(Monomial{
Coef: m.Coef * monom.Coef,
Degree: degree,
})
}
o.degree = o.monoms.Back().Value.(Monomial).Degree
return o
}
func (p1 *Polynomial) Add(p2 *Polynomial) *Polynomial {
o := New()
if p1.monoms.Len() == 0 && p2.monoms.Len() == 0 {
return o
}
it1, it2 := p1.monoms.Front(), p2.monoms.Front()
loop:
for it1 != nil || it2 != nil {
var m Monomial
switch {
case it1 != nil && it2 != nil:
m1 := it1.Value.(Monomial)
m2 := it2.Value.(Monomial)
switch {
case m1.Degree == m2.Degree:
it1 = it1.Next()
it2 = it2.Next()
coef := m1.Coef + m2.Coef
if coef == 0 {
continue loop
}
m = Monomial{
Coef: coef,
Degree: m1.Degree,
}
case m1.Degree < m2.Degree:
it1 = it1.Next()
m = m1
default:
it2 = it2.Next()
m = m2
}
case it1 != nil:
m = it1.Value.(Monomial)
it1 = it1.Next()
case it2 != nil:
m = it2.Value.(Monomial)
it2 = it2.Next()
}
o.monoms.PushBack(m)
}
if m := o.monoms.Back(); m != nil {
o.degree = m.Value.(Monomial).Degree
}
return o
}
func (p1 *Polynomial) Sub(p2 *Polynomial) *Polynomial {
o := New()
if p1.monoms.Len() == 0 && p2.monoms.Len() == 0 {
return o
}
it1, it2 := p1.monoms.Front(), p2.monoms.Front()
loop:
for it1 != nil || it2 != nil {
var m Monomial
switch {
case it1 != nil && it2 != nil:
m1 := it1.Value.(Monomial)
m2 := it2.Value.(Monomial)
switch {
case m1.Degree == m2.Degree:
it1 = it1.Next()
it2 = it2.Next()
coef := m1.Coef - m2.Coef
if coef == 0 {
continue loop
}
m = Monomial{
Coef: coef,
Degree: m1.Degree,
}
case m1.Degree < m2.Degree:
it1 = it1.Next()
m = m1
default:
it2 = it2.Next()
m = m2
m.Coef = -m.Coef
}
case it1 != nil:
m = it1.Value.(Monomial)
it1 = it1.Next()
case it2 != nil:
m = it2.Value.(Monomial)
m.Coef = -m.Coef
it2 = it2.Next()
}
o.monoms.PushBack(m)
}
if m := o.monoms.Back(); m != nil {
o.degree = m.Value.(Monomial).Degree
}
return o
}
func (p1 *Polynomial) Mul(p2 *Polynomial) *Polynomial {
o := New()
for it1 := p1.monoms.Front(); it1 != nil; it1 = it1.Next() {
m1 := it1.Value.(Monomial)
for it2 := p2.monoms.Front(); it2 != nil; it2 = it2.Next() {
m2 := it2.Value.(Monomial)
o.addMonom(Monomial{
Coef: m1.Coef * m2.Coef,
Degree: m1.Degree + m2.Degree,
})
}
}
return o
}
func (p1 *Polynomial) DivMod(p2 *Polynomial) (q, r *Polynomial) {
q = New()
r = p1
d := p2
for r.degree != 0 && r.degree >= d.degree {
m1 := r.monoms.Back().Value.(Monomial)
m2 := d.monoms.Back().Value.(Monomial)
t := Monomial{
Coef: m1.Coef / m2.Coef,
Degree: m1.Degree - m2.Degree,
}
q.addMonom(t)
r = r.Sub(d.MulMonom(t))
}
return
}
func (p1 *Polynomial) Div(p2 *Polynomial) *Polynomial {
q, _ := p1.DivMod(p2)
return q
}
func (p1 *Polynomial) Mod(p2 *Polynomial) *Polynomial {
_, r := p1.DivMod(p2)
return r
}
func (p1 *Polynomial) Equal(p2 *Polynomial) bool {
if p1.degree != p2.degree {
return false
}
if p1.monoms.Len() != p2.monoms.Len() {
return false
}
it1 := p1.monoms.Front()
it2 := p2.monoms.Front()
for it1 != nil {
m1 := it1.Value.(Monomial)
m2 := it2.Value.(Monomial)
if m1.Coef != m2.Coef || m1.Degree != m2.Degree {
return false
}
it1 = it1.Next()
it2 = it2.Next()
}
return true
}
func (p1 *Polynomial) Gcd(p2 *Polynomial) *Polynomial {
a, b := p1, p2
var gcd *Polynomial
for b.Degree() != 0 {
_, r := a.DivMod(b)
a = b
b = r
gcd = b
}
if gcd == nil {
gcd = New()
}
return gcd
}
func (p *Polynomial) Degree() int {
return p.degree
}
func (p *Polynomial) Calc(x float64) float64 {
var fx float64
for it := p.monoms.Front(); it != nil; it = it.Next() {
m := it.Value.(Monomial)
fx += m.Coef * math.Pow(x, float64(m.Degree))
}
return fx
}
func (p *Polynomial) String() string {
if p.monoms.Len() == 0 {
return "0"
}
var res string
for it := p.monoms.Back(); it != nil; it = it.Prev() {
m := it.Value.(Monomial)
if it != p.monoms.Back() {
if m.Coef > 0 {
res += " + "
} else {
res += " - "
}
} else if m.Coef < 0 {
res += "-"
}
if math.Abs(m.Coef) != 1 || m.Degree == 0 {
res += fmt.Sprintf("%.2f", math.Abs(m.Coef))
}
if m.Degree != 0 {
if m.Degree == 1 {
res += "x"
} else {
res += fmt.Sprintf("x^%d", m.Degree)
}
}
}
return res
} | gm64/poly/poly.go | 0.68056 | 0.448245 | poly.go | starcoder |
package reflecttools
import (
"errors"
"fmt"
"reflect"
"strings"
)
const dotPathSep = "."
// SetPtrToStruct is used to inject an object into the specified field on a another object. The target object, supplied value and the type
// of the named target field must all be a pointer to a struct.
func SetPtrToStruct(target interface{}, field string, valuePointer interface{}) error {
if !IsPointerToStruct(target) {
return errors.New("target is not a pointer to a struct")
}
if !IsPointerToStruct(valuePointer) {
return errors.New("value supplied to set on the target is not a pointer to a struct")
}
tv := reflect.ValueOf(target).Elem()
vp := reflect.ValueOf(valuePointer)
if !HasFieldOfName(target, field) {
return fmt.Errorf("target does not have a field called %s", field)
}
tfv := tv.FieldByName(field)
if !tfv.CanSet() {
return fmt.Errorf("field %s on target cannot be set. Check that the field been exported", field)
}
if tfv.Kind() == reflect.Interface {
if vp.Type().Implements(tfv.Type()) {
tfv.Set(vp)
} else {
return fmt.Errorf("supplied value (type %s) does not implement the interface (%s) required by the target field %s", vp.Elem().Type().Name(), tfv.Type().Name(), field)
}
}
if vp.Type().AssignableTo(tfv.Type()) {
tfv.Set(vp)
}
return nil
}
// SetFieldPtrToStruct assigns the supplied object to the supplied reflect Value (which represents a field on a struct). Returns an
// error if the supplied type is an interface and the target field cannot be set o
func SetFieldPtrToStruct(field reflect.Value, valuePointer interface{}) error {
vp := reflect.ValueOf(valuePointer)
if !field.CanSet() {
return fmt.Errorf("field cannot be set")
}
if field.Kind() == reflect.Interface {
if vp.Type().Implements(field.Type()) {
field.Set(vp)
} else {
return fmt.Errorf("supplied value (type %s) does not implement the interface (%s) required by the target field", vp.Elem().Type().Name(), field.Type().Name())
}
}
if vp.Type().AssignableTo(field.Type()) {
field.Set(vp)
}
return nil
}
// NilPointer returns true if the supplied reflect value is a pointer that does not point a valid value.
func NilPointer(v reflect.Value) bool {
return v.Kind() == reflect.Ptr && !v.Elem().IsValid()
}
// NilMap returns true is the supplied reflect value is a Map and is nil.
func NilMap(v reflect.Value) bool {
return v.Kind() == reflect.Map && v.IsNil()
}
// IsPointerToStruct returns true if the supplied interfaces is a pointer to a struct.
func IsPointerToStruct(p interface{}) bool {
pv := reflect.ValueOf(p)
pvk := pv.Kind()
if pvk != reflect.Ptr {
return false
}
vv := pv.Elem()
vvk := vv.Kind()
if vvk != reflect.Struct {
return false
}
return true
}
//IsPointer returns true if the supplied object is a pointer type
func IsPointer(p interface{}) bool {
pv := reflect.ValueOf(p)
pvk := pv.Kind()
return pvk == reflect.Ptr
}
// HasFieldOfName assumes the supplied interface is a pointer to a struct and checks to see if the underlying struct
// has a field of the supplied name. It does not check to see if the field is writable.
func HasFieldOfName(i interface{}, fieldName string) bool {
r := reflect.ValueOf(i).Elem()
f := r.FieldByName(fieldName)
return f.IsValid()
}
// StructOrPointerHasFieldOfName checks whether the supplied object has a field of the specified name
func StructOrPointerHasFieldOfName(i interface{}, fieldName string) bool {
if IsPointer(i) {
return HasFieldOfName(i, fieldName)
}
r := reflect.ValueOf(i)
f := r.FieldByName(fieldName)
return f.IsValid()
}
// HasWritableFieldOfName assumes the supplied interface is a pointer to a struct and checks to see if the underlying struct
// has a writable field of the supplied name.
func HasWritableFieldOfName(i interface{}, fieldName string) bool {
r := reflect.ValueOf(i).Elem()
f := r.FieldByName(fieldName)
return f.IsValid() && f.CanSet()
}
// TypeOfField assumes the supplied interface is a pointer to a struct and that the supplied field name exists on that struct, then
// finds the reflect type of that field.
func TypeOfField(i interface{}, name string) reflect.Type {
r := reflect.ValueOf(i).Elem()
return r.FieldByName(name).Type()
}
// SetInt64 assumes that the supplied interface is a pointer to a struct and has a writable int64 field of the supplied name, then
// sets the field of the supplied value.
func SetInt64(i interface{}, name string, v int64) {
t := FieldValue(i, name)
t.SetInt(v)
}
// SetFloat64 assumes that the supplied interface is a pointer to a struct and has a writable float64 field of the supplied name, then
// sets the field of the supplied value.
func SetFloat64(i interface{}, name string, v float64) {
t := FieldValue(i, name)
t.SetFloat(v)
}
// SetUint64 assumes that the supplied interface is a pointer to a struct and has a writable uint64 field of the supplied name, then
// sets the field of the supplied value.
func SetUint64(i interface{}, name string, v uint64) {
t := FieldValue(i, name)
t.SetUint(v)
}
// SetBool assumes that the supplied interface is a pointer to a struct and has a writable bool field of the supplied name, then
// sets the field of the supplied value.
func SetBool(i interface{}, name string, b bool) {
t := FieldValue(i, name)
t.SetBool(b)
}
// SetString assumes that the supplied interface is a pointer to a struct and has a writable string field of the supplied name, then
// sets the field of the supplied value.
func SetString(i interface{}, name string, s string) {
t := FieldValue(i, name)
t.SetString(s)
}
// SetSliceElem assumes that the supplied interface is a slice or array and then sets the element at the index to the supplied value
func SetSliceElem(i interface{}, fieldName string, value interface{}, index int) {
t := FieldValue(i, fieldName)
se := t.Index(index)
se.Set(reflect.ValueOf(value))
}
// FieldValue assumes the supplied interface is a pointer to a struct, an interface or a struct and has a valid field of the supplied
// name, then returns the reflect value of that field.
func FieldValue(i interface{}, name string) reflect.Value {
var r reflect.Value
r = reflect.ValueOf(i)
k := r.Kind()
if k == reflect.Interface || k == reflect.Ptr {
r = r.Elem()
}
return r.FieldByName(name)
}
// TargetFieldIsArray assumes the supplied interface is a pointer to a struct, an interface or a struct
// and has a valid field of the supplied name, then returns true if the reflect type of that field is Array. Note that
// this method will return false for Slice fields.
func TargetFieldIsArray(i interface{}, name string) bool {
return TypeOfField(i, name).Kind() == reflect.Array
}
// IsSliceOrArray returns true if the supplied value is a slice or an array
func IsSliceOrArray(i interface{}) bool {
pv := reflect.ValueOf(i)
pvk := pv.Kind()
return pvk == reflect.Array || pvk == reflect.Slice
}
// ExtractDotPath converts a dot-delimited path into a string array of its constiuent parts. E.g. "a.b.c" becomes
// ["a","b","c"]
func ExtractDotPath(path string) []string {
return strings.SplitN(path, dotPathSep, -1)
}
// FindNestedField take the output of ExtractDotPath and uses it to traverse an object graph to find a value. Apart from
// final value, each intermediate step in the graph must be a struct or pointer to a struct.
func FindNestedField(path []string, v interface{}) (reflect.Value, error) {
pl := len(path)
head := path[0]
if pl == 1 {
if !StructOrPointerHasFieldOfName(v, head) {
var zero reflect.Value
return zero, fmt.Errorf("field %s does not exist on target object of type %T", head, v)
}
return FieldValue(v, head), nil
}
fv := FieldValue(v, head)
next := fv.Interface()
if !IsPointerToStruct(next) && fv.Kind() != reflect.Struct {
m := fmt.Sprintf("%s is not a struct or a pointer to a struct", head)
var zero reflect.Value
return zero, errors.New(m)
}
return FindNestedField(path[1:], next)
}
// IsZero returns true if i is set to the zero value of i's type
func IsZero(i interface{}) bool {
return i == reflect.Zero(reflect.TypeOf(i)).Interface()
} | reflecttools/reflecttools.go | 0.745213 | 0.547464 | reflecttools.go | starcoder |
package functions
import (
"fmt"
"reflect"
)
type Slices struct {
}
func (s Slices) Length(slice interface{}) int {
return reflect.ValueOf(slice).Len()
}
func (s Slices) StringAt(slice interface{}, index int) (string, error) {
if actual, ok := slice.([]string); ok {
return actual[index], nil
}
return "", fmt.Errorf("unexpected slice type %T", slice)
}
func (s Slices) IntAt(slice interface{}, index int) (int, error) {
if actual, ok := slice.([]int); ok {
return actual[index], nil
}
return 0, fmt.Errorf("unexpected slice type %T", slice)
}
func (s Slices) BoolAt(slice interface{}, index int) (bool, error) {
if actual, ok := slice.([]bool); ok {
return actual[index], nil
}
return false, fmt.Errorf("unexpected slice type %T", slice)
}
func (s Slices) FloatAt(slice interface{}, index int) (float64, error) {
if actual, ok := slice.([]float64); ok {
return actual[index], nil
}
return 0, fmt.Errorf("unexpected slice type %T", slice)
}
func (s Slices) ReverseStrings(slice interface{}) ([]string, error) {
stringsSlice, ok := slice.([]string)
if !ok {
return []string{}, fmt.Errorf("unexpected type, exptected []string but got %T", slice)
}
newSlice := make([]string, len(stringsSlice))
for i, sValue := range stringsSlice {
newSlice[len(newSlice)-1-i] = sValue
}
return newSlice, nil
}
func (s Slices) ReverseFloats(slice interface{}) ([]float64, error) {
stringsSlice, ok := slice.([]float64)
if !ok {
return []float64{}, fmt.Errorf("unexpected type, exptected []float64 but got %T", slice)
}
newSlice := make([]float64, len(stringsSlice))
for i, sValue := range stringsSlice {
newSlice[len(newSlice)-1-i] = sValue
}
return newSlice, nil
}
func (s Slices) ReverseInts(slice interface{}) ([]int, error) {
stringsSlice, ok := slice.([]int)
if !ok {
return []int{}, fmt.Errorf("unexpected type, exptected []float64 but got %T", slice)
}
newSlice := make([]int, len(stringsSlice))
for i, sValue := range stringsSlice {
newSlice[len(newSlice)-1-i] = sValue
}
return newSlice, nil
} | functions/slices.go | 0.710528 | 0.431285 | slices.go | starcoder |
package main
import (
"fmt"
"os"
"runtime"
"strconv"
"strings"
"github.com/TomasCruz/projecteuler"
"github.com/TomasCruz/projecteuler/001-100/091-100/096/sudoku"
)
/*
Problem 96; Su Doku
Su Doku (Japanese meaning number place) is the name given to a popular puzzle concept.
Its origin is unclear, but credit must be attributed to Leonhard Euler who invented a similar,
and much more difficult, puzzle idea called Latin Squares. The objective of Su Doku puzzles, however,
is to replace the blanks (or zeros) in a 9 by 9 grid in such that each row, column, and 3 by 3 box contains
each of the digits 1 to 9. Below is an example of a typical starting puzzle grid and its solution grid.
A well constructed Su Doku puzzle has a unique solution and can be solved by logic, although it may be
necessary to employ "guess and test" methods in order to eliminate options (there is much contested opinion
over this). The complexity of the search determines the difficulty of the puzzle; the example above is
considered easy because it can be solved by straight forward direct deduction.
The 6K text file, sudoku.txt (right click and 'Save Link/Target As...'), contains fifty different Su Doku
puzzles ranging in difficulty, but all with unique solutions (the first puzzle in the file is the example
above).
By solving all fifty puzzles find the sum of the 3-digit numbers found in the top left corner of each
solution grid; for example, 483 is the 3-digit number found in the top left corner of the solution grid above.
*/
func main() {
var fileName string
if len(os.Args) > 1 {
fileName = os.Args[1]
} else {
fileName = "p096_sudoku.txt"
}
projecteuler.Timed(calc, fileName)
}
func calc(args ...interface{}) (result string, err error) {
fileName := args[0].(string)
var textNumbers []string
if textNumbers, err = projecteuler.FileToStrings(fileName); err != nil {
fmt.Println(err)
return
}
var sudokuStrings []string
for i := 0; i < len(textNumbers)/10; i++ {
sudokuStrings = append(sudokuStrings, strings.Join(textNumbers[10*i+1:10*i+10], ""))
}
length := len(sudokuStrings)
puzzles := make([]*sudoku.Sudoku, length)
threadPerCoreGuard := make(chan int, runtime.GOMAXPROCS(runtime.NumCPU()))
solvingResult := make(chan int, length)
resInt := 0
solvedCount := 0
for i := 0; i < length; i++ {
threadPerCoreGuard <- 1
sud := sudoku.NewSudoku(sudokuStrings[i])
puzzles[i] = &sud
go func(sud *sudoku.Sudoku) {
sud.Solve()
<-threadPerCoreGuard
solvingResult <- sud.FirstThree()
solvedCount++
if solvedCount == length {
close(solvingResult)
}
}(puzzles[i])
}
for s := range solvingResult {
resInt += s
}
result = strconv.Itoa(resInt)
return
} | 001-100/091-100/096/main.go | 0.646014 | 0.4474 | main.go | starcoder |
package evaluator
import (
"strconv"
"github.com/lyraproj/issue/issue"
"github.com/lyraproj/pcore/px"
"github.com/lyraproj/pcore/types"
"github.com/lyraproj/puppet-evaluator/pdsl"
"github.com/lyraproj/puppet-parser/parser"
)
func evalArithmeticExpression(e pdsl.Evaluator, expr *parser.ArithmeticExpression) px.Value {
return calculate(expr, e.Eval(expr.Lhs()), e.Eval(expr.Rhs()))
}
func calculate(expr *parser.ArithmeticExpression, a px.Value, b px.Value) px.Value {
op := expr.Operator()
switch a := a.(type) {
case *types.Hash, *types.Array, *types.UriValue:
switch op {
case `+`:
return concatenate(expr, a, b)
case `-`:
return collectionDelete(expr, a, b)
case `<<`:
if av, ok := a.(*types.Array); ok {
return av.Add(b)
}
}
case px.Float:
return lhsFloatArithmetic(expr, a.Float(), b)
case px.Integer:
return lhsIntArithmetic(expr, a.Int(), b)
case px.StringValue:
s := a.String()
if iv, err := strconv.ParseInt(s, 0, 64); err == nil {
return lhsIntArithmetic(expr, iv, b)
}
if fv, err := strconv.ParseFloat(s, 64); err == nil {
return lhsFloatArithmetic(expr, fv, b)
}
panic(evalError(pdsl.NotNumeric, expr.Lhs(), issue.H{`value`: s}))
}
panic(evalError(pdsl.OperatorNotApplicable, expr, issue.H{`operator`: op, `left`: a.PType()}))
}
func lhsIntArithmetic(expr *parser.ArithmeticExpression, ai int64, b px.Value) px.Value {
op := expr.Operator()
switch b := b.(type) {
case px.Integer:
return types.WrapInteger(intArithmetic(expr, ai, b.Int()))
case px.Float:
return types.WrapFloat(floatArithmetic(expr, float64(ai), b.Float()))
case px.StringValue:
s := b.String()
if iv, err := strconv.ParseInt(s, 0, 64); err == nil {
return types.WrapInteger(intArithmetic(expr, ai, iv))
}
if fv, err := strconv.ParseFloat(s, 64); err == nil {
return types.WrapFloat(floatArithmetic(expr, float64(ai), fv))
}
panic(evalError(pdsl.NotNumeric, expr.Rhs(), issue.H{`value`: s}))
default:
panic(evalError(pdsl.OperatorNotApplicableWhen, expr, issue.H{`operator`: op, `left`: `Integer`, `right`: b.PType()}))
}
}
func lhsFloatArithmetic(expr *parser.ArithmeticExpression, af float64, b px.Value) px.Value {
op := expr.Operator()
switch b := b.(type) {
case px.Float:
return types.WrapFloat(floatArithmetic(expr, af, b.Float()))
case px.Integer:
return types.WrapFloat(floatArithmetic(expr, af, float64(b.Int())))
case px.StringValue:
s := b.String()
if iv, err := strconv.ParseInt(s, 0, 64); err == nil {
return types.WrapFloat(floatArithmetic(expr, af, float64(iv)))
}
if fv, err := strconv.ParseFloat(s, 64); err == nil {
return types.WrapFloat(floatArithmetic(expr, af, fv))
}
panic(evalError(pdsl.NotNumeric, expr.Rhs(), issue.H{`value`: s}))
default:
panic(evalError(pdsl.OperatorNotApplicableWhen, expr, issue.H{`operator`: op, `left`: `Float`, `right`: b.PType()}))
}
}
func floatArithmetic(expr *parser.ArithmeticExpression, a float64, b float64) float64 {
switch expr.Operator() {
case `+`:
return a + b
case `-`:
return a - b
case `*`:
return a * b
case `/`:
return a / b
default:
panic(evalError(pdsl.OperatorNotApplicable, expr, issue.H{`operator`: expr.Operator(), `left`: `Float`}))
}
}
func intArithmetic(expr *parser.ArithmeticExpression, a int64, b int64) int64 {
switch expr.Operator() {
case `+`:
return a + b
case `-`:
return a - b
case `*`:
return a * b
case `/`:
return a / b
case `%`:
return a % b
case `<<`:
return a << uint(b)
case `>>`:
return a >> uint(b)
default:
panic(evalError(pdsl.OperatorNotApplicable, expr, issue.H{`operator`: expr.Operator(), `left`: `Integer`}))
}
}
func concatenate(expr *parser.ArithmeticExpression, a px.Value, b px.Value) px.Value {
switch a := a.(type) {
case *types.Array:
switch b := b.(type) {
case *types.Array:
return a.AddAll(b)
case *types.Hash:
return a.AddAll(b)
default:
return a.Add(b)
}
case *types.Hash:
switch b := b.(type) {
case *types.Array:
return a.Merge(types.WrapHashFromArray(b))
case *types.Hash:
return a.Merge(b)
}
case *types.UriValue:
switch b := b.(type) {
case px.StringValue:
return types.WrapURI(a.URL().ResolveReference(types.ParseURI(b.String())))
case *types.UriValue:
return types.WrapURI(a.URL().ResolveReference(b.URL()))
}
}
panic(evalError(pdsl.OperatorNotApplicableWhen, expr, issue.H{`operator`: expr.Operator(), `left`: a.PType(), `right`: b.PType()}))
}
func collectionDelete(expr *parser.ArithmeticExpression, a px.Value, b px.Value) px.Value {
switch a := a.(type) {
case *types.Array:
switch b := b.(type) {
case *types.Array:
return a.DeleteAll(b)
case *types.Hash:
return a.DeleteAll(b)
default:
return a.Delete(b)
}
case *types.Hash:
switch b := b.(type) {
case *types.Array:
return a.DeleteAll(b)
case *types.Hash:
return a.DeleteAll(b.Keys())
default:
return a.Delete(b)
}
default:
panic(evalError(pdsl.OperatorNotApplicable, expr, issue.H{`operator`: expr.Operator(), `left`: a.PType}))
}
} | evaluator/arithmetic.go | 0.676834 | 0.406302 | arithmetic.go | starcoder |
package storage
import (
"fmt"
"sort"
"zircon/apis"
)
type MemoryStorage struct {
isClosed bool
chunks map[apis.ChunkNum]map[apis.Version][]byte
latest map[apis.ChunkNum]apis.Version
}
// Creates an in-memory-only location to store data, and construct an interface by which a chunkserver can store chunks
func ConfigureMemoryStorage() (ChunkStorage, error) {
return &MemoryStorage{
chunks: map[apis.ChunkNum]map[apis.Version][]byte{},
latest: map[apis.ChunkNum]apis.Version{},
}, nil
}
// returns semi-fake storage usage stats for testing
func (m *MemoryStorage) StatsForTesting() int {
if m.isClosed {
panic("attempt to use closed MemoryStorage")
}
chunkCount := 0
for _, v := range m.chunks {
chunkCount += len(v)
}
entryCount := len(m.chunks) + len(m.latest) + chunkCount
// let's approximate 32 bytes per hash table entry
// and 8 MB per chunk of data
return entryCount*32 + chunkCount*int(apis.MaxChunkSize)
}
func (m *MemoryStorage) assertOpen() {
if m.isClosed {
panic("attempt to use closed MemoryStorage")
}
}
func (m *MemoryStorage) ListChunksWithData() ([]apis.ChunkNum, error) {
m.assertOpen()
result := make([]apis.ChunkNum, 0, len(m.chunks))
for k, v := range m.chunks {
if len(v) > 0 {
result = append(result, k)
}
}
return result, nil
}
func (m *MemoryStorage) ListVersions(chunk apis.ChunkNum) ([]apis.Version, error) {
m.assertOpen()
versionMap := m.chunks[chunk]
if versionMap == nil {
return nil, nil
}
result := make([]apis.Version, 0, len(versionMap))
for k, _ := range versionMap {
result = append(result, k)
}
sort.Slice(result, func(i, j int) bool {
return result[i] < result[j]
})
return result, nil
}
func (m *MemoryStorage) ReadVersion(chunk apis.ChunkNum, version apis.Version) ([]byte, error) {
m.assertOpen()
if versionMap := m.chunks[chunk]; versionMap != nil {
if data, found := versionMap[version]; found {
ndata := make([]byte, len(data))
copy(ndata, data)
return ndata, nil
}
}
return nil, fmt.Errorf("no such chunk/version combination: %d/%d", chunk, version)
}
func (m *MemoryStorage) WriteVersion(chunk apis.ChunkNum, version apis.Version, data []byte) error {
m.assertOpen()
if len(data) > apis.MaxChunkSize {
return fmt.Errorf("chunk is too large: %d/%s = data[%d]", chunk, version, len(data))
}
versionMap := m.chunks[chunk]
if versionMap == nil {
versionMap = map[apis.Version][]byte{}
m.chunks[chunk] = versionMap
}
existing, exists := versionMap[version]
if exists {
return fmt.Errorf("chunk/version combination already exists: %d/%d = data[%d]", chunk, version, len(existing))
}
ndata := make([]byte, len(data))
copy(ndata, data)
versionMap[version] = ndata
return nil
}
func (m *MemoryStorage) DeleteVersion(chunk apis.ChunkNum, version apis.Version) error {
m.assertOpen()
versionMap := m.chunks[chunk]
if versionMap == nil {
return fmt.Errorf("chunk/version combination does not exist: %d/%d", chunk, version)
}
_, exists := versionMap[version]
if !exists {
return fmt.Errorf("chunk/version combination does not exist: %d/%d", chunk, version)
}
delete(versionMap, version)
if len(versionMap) == 0 {
delete(m.chunks, chunk)
}
return nil
}
func (m *MemoryStorage) ListChunksWithLatest() ([]apis.ChunkNum, error) {
m.assertOpen()
result := make([]apis.ChunkNum, 0, len(m.latest))
for k, _ := range m.latest {
result = append(result, k)
}
return result, nil
}
func (m *MemoryStorage) GetLatestVersion(chunk apis.ChunkNum) (apis.Version, error) {
m.assertOpen()
if version, found := m.latest[chunk]; found {
return version, nil
}
return 0, fmt.Errorf("no latest version for chunk: %d", chunk)
}
func (m *MemoryStorage) SetLatestVersion(chunk apis.ChunkNum, latest apis.Version) error {
m.assertOpen()
m.latest[chunk] = latest
return nil
}
func (m *MemoryStorage) DeleteLatestVersion(chunk apis.ChunkNum) error {
m.assertOpen()
_, found := m.latest[chunk]
if found {
delete(m.latest, chunk)
return nil
} else {
return fmt.Errorf("cannot delete nonexistent latest version for chunk: %d", chunk)
}
}
func (m *MemoryStorage) Close() {
m.chunks = nil
m.latest = nil
m.isClosed = true
} | src/zircon/chunkserver/storage/memory.go | 0.702326 | 0.423279 | memory.go | starcoder |
package apivideosdk
import (
//"encoding/json"
)
// VideoWatermark struct for VideoWatermark
type VideoWatermark struct {
// id of the watermark
Id *string `json:"id,omitempty"`
// Distance expressed in px or % between the top-border of the video and the watermark-image.
Top *string `json:"top,omitempty"`
// Distance expressed in px or % between the left-border of the video and the watermark-image.
Left *string `json:"left,omitempty"`
// Distance expressed in px or % between the bottom-border of the video and the watermark-image.
Bottom *string `json:"bottom,omitempty"`
// Distance expressed in px or % between the right-border of the video and the watermark-image.
Right *string `json:"right,omitempty"`
// Width of the watermark-image relative to the video if expressed in %. Otherwise a fixed width. NOTE: To keep intrinsic watermark-image width use initial
Width *string `json:"width,omitempty"`
// Width of the watermark-image relative to the video if expressed in %. Otherwise a fixed height. NOTE: To keep intrinsic watermark-image height use initial
Height *string `json:"height,omitempty"`
// Opacity expressed in % only to specify the degree of the watermark-image transparency with the video.
Opacity *string `json:"opacity,omitempty"`
}
// NewVideoWatermark instantiates a new VideoWatermark object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewVideoWatermark() *VideoWatermark {
this := VideoWatermark{}
return &this
}
// NewVideoWatermarkWithDefaults instantiates a new VideoWatermark object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewVideoWatermarkWithDefaults() *VideoWatermark {
this := VideoWatermark{}
return &this
}
// GetId returns the Id field value if set, zero value otherwise.
func (o *VideoWatermark) GetId() string {
if o == nil || o.Id == nil {
var ret string
return ret
}
return *o.Id
}
// GetIdOk returns a tuple with the Id field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *VideoWatermark) GetIdOk() (*string, bool) {
if o == nil || o.Id == nil {
return nil, false
}
return o.Id, true
}
// HasId returns a boolean if a field has been set.
func (o *VideoWatermark) HasId() bool {
if o != nil && o.Id != nil {
return true
}
return false
}
// SetId gets a reference to the given string and assigns it to the Id field.
func (o *VideoWatermark) SetId(v string) {
o.Id = &v
}
// GetTop returns the Top field value if set, zero value otherwise.
func (o *VideoWatermark) GetTop() string {
if o == nil || o.Top == nil {
var ret string
return ret
}
return *o.Top
}
// GetTopOk returns a tuple with the Top field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *VideoWatermark) GetTopOk() (*string, bool) {
if o == nil || o.Top == nil {
return nil, false
}
return o.Top, true
}
// HasTop returns a boolean if a field has been set.
func (o *VideoWatermark) HasTop() bool {
if o != nil && o.Top != nil {
return true
}
return false
}
// SetTop gets a reference to the given string and assigns it to the Top field.
func (o *VideoWatermark) SetTop(v string) {
o.Top = &v
}
// GetLeft returns the Left field value if set, zero value otherwise.
func (o *VideoWatermark) GetLeft() string {
if o == nil || o.Left == nil {
var ret string
return ret
}
return *o.Left
}
// GetLeftOk returns a tuple with the Left field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *VideoWatermark) GetLeftOk() (*string, bool) {
if o == nil || o.Left == nil {
return nil, false
}
return o.Left, true
}
// HasLeft returns a boolean if a field has been set.
func (o *VideoWatermark) HasLeft() bool {
if o != nil && o.Left != nil {
return true
}
return false
}
// SetLeft gets a reference to the given string and assigns it to the Left field.
func (o *VideoWatermark) SetLeft(v string) {
o.Left = &v
}
// GetBottom returns the Bottom field value if set, zero value otherwise.
func (o *VideoWatermark) GetBottom() string {
if o == nil || o.Bottom == nil {
var ret string
return ret
}
return *o.Bottom
}
// GetBottomOk returns a tuple with the Bottom field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *VideoWatermark) GetBottomOk() (*string, bool) {
if o == nil || o.Bottom == nil {
return nil, false
}
return o.Bottom, true
}
// HasBottom returns a boolean if a field has been set.
func (o *VideoWatermark) HasBottom() bool {
if o != nil && o.Bottom != nil {
return true
}
return false
}
// SetBottom gets a reference to the given string and assigns it to the Bottom field.
func (o *VideoWatermark) SetBottom(v string) {
o.Bottom = &v
}
// GetRight returns the Right field value if set, zero value otherwise.
func (o *VideoWatermark) GetRight() string {
if o == nil || o.Right == nil {
var ret string
return ret
}
return *o.Right
}
// GetRightOk returns a tuple with the Right field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *VideoWatermark) GetRightOk() (*string, bool) {
if o == nil || o.Right == nil {
return nil, false
}
return o.Right, true
}
// HasRight returns a boolean if a field has been set.
func (o *VideoWatermark) HasRight() bool {
if o != nil && o.Right != nil {
return true
}
return false
}
// SetRight gets a reference to the given string and assigns it to the Right field.
func (o *VideoWatermark) SetRight(v string) {
o.Right = &v
}
// GetWidth returns the Width field value if set, zero value otherwise.
func (o *VideoWatermark) GetWidth() string {
if o == nil || o.Width == nil {
var ret string
return ret
}
return *o.Width
}
// GetWidthOk returns a tuple with the Width field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *VideoWatermark) GetWidthOk() (*string, bool) {
if o == nil || o.Width == nil {
return nil, false
}
return o.Width, true
}
// HasWidth returns a boolean if a field has been set.
func (o *VideoWatermark) HasWidth() bool {
if o != nil && o.Width != nil {
return true
}
return false
}
// SetWidth gets a reference to the given string and assigns it to the Width field.
func (o *VideoWatermark) SetWidth(v string) {
o.Width = &v
}
// GetHeight returns the Height field value if set, zero value otherwise.
func (o *VideoWatermark) GetHeight() string {
if o == nil || o.Height == nil {
var ret string
return ret
}
return *o.Height
}
// GetHeightOk returns a tuple with the Height field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *VideoWatermark) GetHeightOk() (*string, bool) {
if o == nil || o.Height == nil {
return nil, false
}
return o.Height, true
}
// HasHeight returns a boolean if a field has been set.
func (o *VideoWatermark) HasHeight() bool {
if o != nil && o.Height != nil {
return true
}
return false
}
// SetHeight gets a reference to the given string and assigns it to the Height field.
func (o *VideoWatermark) SetHeight(v string) {
o.Height = &v
}
// GetOpacity returns the Opacity field value if set, zero value otherwise.
func (o *VideoWatermark) GetOpacity() string {
if o == nil || o.Opacity == nil {
var ret string
return ret
}
return *o.Opacity
}
// GetOpacityOk returns a tuple with the Opacity field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *VideoWatermark) GetOpacityOk() (*string, bool) {
if o == nil || o.Opacity == nil {
return nil, false
}
return o.Opacity, true
}
// HasOpacity returns a boolean if a field has been set.
func (o *VideoWatermark) HasOpacity() bool {
if o != nil && o.Opacity != nil {
return true
}
return false
}
// SetOpacity gets a reference to the given string and assigns it to the Opacity field.
func (o *VideoWatermark) SetOpacity(v string) {
o.Opacity = &v
}
type NullableVideoWatermark struct {
value *VideoWatermark
isSet bool
}
func (v NullableVideoWatermark) Get() *VideoWatermark {
return v.value
}
func (v *NullableVideoWatermark) Set(val *VideoWatermark) {
v.value = val
v.isSet = true
}
func (v NullableVideoWatermark) IsSet() bool {
return v.isSet
}
func (v *NullableVideoWatermark) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableVideoWatermark(val *VideoWatermark) *NullableVideoWatermark {
return &NullableVideoWatermark{value: val, isSet: true}
} | model_video_watermark.go | 0.786869 | 0.457924 | model_video_watermark.go | starcoder |
package mssqlx
import (
"context"
"database/sql"
"github.com/jmoiron/sqlx"
)
// Stmtx wraps over sqlx.Stmt
type Stmtx struct {
*sqlx.Stmt
}
// Exec executes a prepared statement with the given arguments and returns a Result summarizing the effect of the statement.
func (s *Stmtx) Exec(args ...interface{}) (sql.Result, error) {
return s.ExecContext(context.Background(), args...)
}
// ExecContext executes a prepared statement with the given arguments and returns a Result summarizing the effect of the statement.
func (s *Stmtx) ExecContext(ctx context.Context, args ...interface{}) (result sql.Result, err error) {
r, err := retryFunc("stmt_exec", func() (interface{}, error) {
return s.Stmt.ExecContext(ctx, args...)
})
if err == nil {
result = r.(sql.Result)
}
return
}
// Query executes a prepared query statement with the given arguments and returns the query results as a *Rows.
func (s *Stmtx) Query(args ...interface{}) (*sql.Rows, error) {
return s.QueryContext(context.Background(), args...)
}
// QueryContext executes a prepared query statement with the given arguments and returns the query results as a *Rows.
func (s *Stmtx) QueryContext(ctx context.Context, args ...interface{}) (result *sql.Rows, err error) {
r, err := retryFunc("stmt_query", func() (interface{}, error) {
return s.Stmt.QueryContext(ctx, args...)
})
if err == nil {
result = r.(*sql.Rows)
}
return
}
// Queryx executes a prepared query statement with the given arguments and returns the query results as a *Rows.
func (s *Stmtx) Queryx(args ...interface{}) (*sqlx.Rows, error) {
return s.QueryxContext(context.Background(), args...)
}
// QueryxContext executes a prepared query statement with the given arguments and returns the query results as a *Rows.
func (s *Stmtx) QueryxContext(ctx context.Context, args ...interface{}) (result *sqlx.Rows, err error) {
r, err := retryFunc("stmt_query", func() (interface{}, error) {
return s.Stmt.QueryxContext(ctx, args...)
})
if err == nil {
result = r.(*sqlx.Rows)
}
return
} | stmtx.go | 0.712232 | 0.409339 | stmtx.go | starcoder |
package day19
import (
"math"
"strconv"
"strings"
"github.com/heindsight/aoc21/utils/set"
"github.com/heindsight/aoc21/utils/input"
"github.com/heindsight/aoc21/utils/numeric"
)
type Vector [3]int
func (p Vector) Sub(q Vector) Vector {
d := Vector{}
for i := range p {
d[i] = p[i] - q[i]
}
return d
}
func (p Vector) Add(q Vector) Vector {
d := Vector{}
for i := range p {
d[i] = p[i] + q[i]
}
return d
}
func (p Vector) Length() float64 {
s := 0.0
for _, x := range p {
s += math.Pow(float64(x), 2)
}
return math.Sqrt(s)
}
func (p Vector) Manhattan() int {
s := 0
for _, x := range p {
s += numeric.Abs(x)
}
return s
}
func (p Vector) Dot(q Vector) int {
v := 0
for i := range p {
v += p[i] * q[i]
}
return v
}
type Matrix [3]Vector
func (m *Matrix) Mul(p Vector) Vector {
q := Vector{}
for i, row := range m {
q[i] = row.Dot(p)
}
return q
}
var Rotations = []Matrix{
{{1, 0, 0}, {0, 1, 0}, {0, 0, 1}},
{{1, 0, 0}, {0, 0, 1}, {0, -1, 0}},
{{1, 0, 0}, {0, 0, -1}, {0, 1, 0}},
{{1, 0, 0}, {0, -1, 0}, {0, 0, -1}},
{{0, 1, 0}, {1, 0, 0}, {0, 0, -1}},
{{0, 1, 0}, {0, 0, 1}, {1, 0, 0}},
{{0, 1, 0}, {0, 0, -1}, {-1, 0, 0}},
{{0, 1, 0}, {-1, 0, 0}, {0, 0, 1}},
{{0, 0, 1}, {1, 0, 0}, {0, 1, 0}},
{{0, 0, 1}, {0, 1, 0}, {-1, 0, 0}},
{{0, 0, 1}, {0, -1, 0}, {1, 0, 0}},
{{0, 0, 1}, {-1, 0, 0}, {0, -1, 0}},
{{0, 0, -1}, {1, 0, 0}, {0, -1, 0}},
{{0, 0, -1}, {0, 1, 0}, {1, 0, 0}},
{{0, 0, -1}, {0, -1, 0}, {-1, 0, 0}},
{{0, 0, -1}, {-1, 0, 0}, {0, 1, 0}},
{{0, -1, 0}, {1, 0, 0}, {0, 0, 1}},
{{0, -1, 0}, {0, 0, 1}, {-1, 0, 0}},
{{0, -1, 0}, {0, 0, -1}, {1, 0, 0}},
{{0, -1, 0}, {-1, 0, 0}, {0, 0, -1}},
{{-1, 0, 0}, {0, 1, 0}, {0, 0, -1}},
{{-1, 0, 0}, {0, 0, 1}, {0, 1, 0}},
{{-1, 0, 0}, {0, 0, -1}, {0, -1, 0}},
{{-1, 0, 0}, {0, -1, 0}, {0, 0, 1}},
}
var Identity = Rotations[0]
type Scanner struct {
Beacons []Vector
Distances map[Vector]set.Set
}
func makeScanner() Scanner {
s := Scanner{}
s.Distances = make(map[Vector]set.Set)
return s
}
func (s *Scanner) AddBeacon(p Vector) {
s.Distances[p] = set.NewSet()
for _, q := range s.Beacons {
d := p.Sub(q).Length()
s.Distances[p].Add(d)
s.Distances[q].Add(d)
}
s.Beacons = append(s.Beacons, p)
}
func (s *Scanner) MatchBeacons(other *Scanner) map[Vector]Vector {
matches := make(map[Vector]Vector)
for p, dists_p := range s.Distances {
for q, dists_q := range other.Distances {
common := dists_p.Intersection(dists_q)
if common.Length() >= 11 {
matches[p] = q
}
}
}
return matches
}
func (s *Scanner) Align(transform Transformation) Scanner {
transformed := Scanner{}
transformed.Beacons = make([]Vector, len(s.Beacons))
transformed.Distances = make(map[Vector]set.Set, len(s.Distances))
for i, b := range s.Beacons {
q := transform.Transform(b)
transformed.Beacons[i] = q
transformed.Distances[q] = s.Distances[b].Copy()
}
return transformed
}
type Transformation struct {
Rotation Matrix
Translation Vector
}
func (t *Transformation) Transform(p Vector) Vector {
return t.Rotation.Mul(p).Add(t.Translation)
}
func findXForm(matches map[Vector]Vector) (Transformation, bool) {
var xform Transformation
for _, xform.Rotation = range Rotations {
first := true
found := false
for p, q := range matches {
if first {
rotated := xform.Rotation.Mul(p)
xform.Translation = q.Sub(rotated)
first = false
found = true
} else if xform.Transform(p) != q {
found = false
break
}
}
if found {
return xform, true
}
}
return Transformation{}, false
}
func findAlignment(scanner *Scanner, aligned []Scanner) (Transformation, bool) {
for j := len(aligned) - 1; j >= 0; j-- {
matches := scanner.MatchBeacons(&aligned[j])
if len(matches) >= 12 {
transform, found_transform := findXForm(matches)
if found_transform {
return transform, true
}
}
}
return Transformation{}, false
}
func alignScanners(scanners []Scanner) ([]Scanner, []Transformation) {
aligned := make([]Scanner, 1, len(scanners))
transformations := make([]Transformation, 1, len(scanners))
aligned[0] = scanners[0]
transformations[0] = Transformation{Rotation: Identity}
found_alignment := set.NewSet()
found_alignment.Add(0)
for len(aligned) < len(scanners) {
start_size := len(aligned)
for i := 1; i < len(scanners); i++ {
if found_alignment.Contains(i) {
continue
}
transform, found := findAlignment(&scanners[i], aligned)
if found {
aligned = append(aligned, scanners[i].Align(transform))
transformations = append(transformations, transform)
found_alignment.Add(i)
}
}
if len(aligned) == start_size {
panic("No new alignments found! Would loop forever")
}
}
return aligned, transformations
}
func readScanners() []Scanner {
scanners := []Scanner{}
var scanner *Scanner
for line := range input.ReadLines() {
if len(line) == 0 {
continue
}
if strings.HasPrefix(line, "---") {
scanners = append(scanners, makeScanner())
scanner = &scanners[len(scanners)-1]
continue
}
beacon := Vector{}
for i, val := range strings.Split(line, ",") {
v, err := strconv.Atoi(val)
if err != nil {
panic(err)
}
beacon[i] = v
}
scanner.AddBeacon(beacon)
}
return scanners
} | solutions/day19/day19.go | 0.622918 | 0.542076 | day19.go | starcoder |
// Package testt provides utilities for functions that consume *testing.T.
package testt
import (
"errors"
"fmt"
"reflect"
"runtime"
"sync"
"testing"
)
// ExpectFatal fails the test if the specified function does _not_ fail fatally,
// i.e. does not call any of t.{FailNow, Fatal, Fatalf}.
// If it does fail fatally, returns the fatal error message it logged.
// It is recommended the error message be checked to distinguish the
// expected failure from unrelated failures that may have occurred.
func ExpectFatal(t testing.TB, fn func(t testing.TB)) string {
t.Helper()
if msg := CaptureFatal(t, fn); msg != nil {
return *msg
}
t.Fatalf("%s did not fail fatally as expected", funcName(fn))
return ""
}
// CaptureFatal returns fatal error message if the specified function fails
// fatally, i.e. calls any of t.{FailNow, Fatal, Fatalf}.
// If it does fail fatally, returns the fatal error message it logged.
func CaptureFatal(t testing.TB, fn func(t testing.TB)) (msg *string) {
t.Helper()
// Defer and recover to capture the expected fatal message.
defer func() {
switch r := recover().(type) {
case failure:
// panic from fatal fakeT failure, return the message
m := string(r)
msg = &m
case nil:
// no panic at all, do nothing
default:
// another panic was detected, re-raise
panic(r)
}
}()
fn(&fakeT{realT: t})
return nil
}
func funcName(i interface{}) string {
return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()
}
// ExpectError determines whether t.Errorf or t.Error was called at least
// once during a test, and returns the set of strings that were specified
// as arguments to the error calls.
func ExpectError(t testing.TB, fn func(testing.TB)) []string {
t.Helper()
ft := &fakeT{realT: t}
fn(ft)
if ft.errs == nil {
t.Fatalf("%s did not raise an error as was expected", funcName(fn))
}
return ft.errs
}
// ParallelFatal runs the provided functions in parallel. It waits for every
// function to complete and if any fails fatally, i.e. calls any of t.{FailNow,
// Fatal, Fatalf}, then it fails fatally itself.
func ParallelFatal(t testing.TB, fns ...func(testing.TB)) {
t.Helper()
fnErrs := make(map[string]error)
var mu sync.Mutex
addErr := func(fn string, err error) {
mu.Lock()
defer mu.Unlock()
fnErrs[fn] = err
}
var wg sync.WaitGroup
for _, fn := range fns {
wg.Add(1)
go func(fn func(testing.TB)) {
defer wg.Done()
if errMsg := CaptureFatal(t, fn); errMsg != nil {
addErr(funcName(fn), errors.New(*errMsg))
}
}(fn)
}
wg.Wait()
if len(fnErrs) > 0 {
t.Fatalf("ParallelFatal: %d functions failed fatally: %v", len(fnErrs), fnErrs)
}
}
// fakeT is a testing.TB implementation that can be used as an input to unit tests
// such that it is possible to check that the correct errors are raised.
type fakeT struct {
// Any methods not explicitly implemented here will panic when called.
testing.TB
realT testing.TB
// err is used to store the strings that are specified as arguments to
// Error and Errorf when it is called.
errs []string
}
// failure is a unique type to distinguish test failures from other panics.
type failure string
// FailNow implements the testing.TB FailNow method so that the failure can be
// retrieved by making the call within the lambda argument to ExpectFatal.
func (ft *fakeT) FailNow() {
ft.fatal("")
}
// Fatal implements the testing.TB Fatalf method so that the failure can be
// retrieved by making the call within the lambda argument to ExpectFatal.
func (ft *fakeT) Fatal(args ...interface{}) {
ft.fatal(fmt.Sprintln(args...))
}
// Fatalf implements the testing.TB Fatalf method so that the failure can be
// retrieved by making the call within the lambda argument to ExpectFatal.
func (ft *fakeT) Fatalf(format string, args ...interface{}) {
ft.fatal(fmt.Sprintf(format, args...))
}
func (ft *fakeT) fatal(msg string) {
panic(failure(msg))
}
// Log implements the testing.TB Log method by delegating to the real *testing.T.
func (ft *fakeT) Log(args ...interface{}) {
ft.realT.Log(args...)
}
// Log implements the testing.TB Logf method by delegating to the real *testing.T.
func (ft *fakeT) Logf(format string, args ...interface{}) {
ft.realT.Logf(format, args...)
}
// Errorf implements the testing.TB Errorf method, but rather than reporting the
// error catches it in the errs field of the fakeT.
func (ft *fakeT) Errorf(format string, args ...interface{}) {
ft.errs = append(ft.errs, fmt.Sprintf(format, args...))
}
// Error implements the testing.TB Error method, but rather than reporting the
// error catches it in the errs field of the fakeT.
func (ft *fakeT) Error(args ...interface{}) {
ft.errs = append(ft.errs, fmt.Sprintln(args...))
}
// Helper implements the testing.TB Helper method as a noop.
func (*fakeT) Helper() {} | testt.go | 0.696991 | 0.522141 | testt.go | starcoder |
package xarray
import (
"bytes"
"encoding/json"
"github.com/go-xe2/x/core/rwmutex"
"github.com/go-xe2/x/type/t"
"github.com/go-xe2/x/utils/xrand"
"math"
"sort"
)
type TArray struct {
mu *rwmutex.RWMutex
array []interface{}
}
func New(unsafe ...bool) *TArray {
return NewArraySize(0, 0, unsafe...)
}
func NewArray(unsafe ...bool) *TArray {
return NewArraySize(0, 0, unsafe...)
}
func NewArraySize(size int, cap int, unsafe ...bool) *TArray {
return &TArray{
mu: rwmutex.New(unsafe...),
array: make([]interface{}, size, cap),
}
}
func NewFrom(array []interface{}, unsafe ...bool) *TArray {
return NewArrayFrom(array, unsafe...)
}
func NewFromCopy(array []interface{}, unsafe ...bool) *TArray {
return NewArrayFromCopy(array, unsafe...)
}
func NewArrayFrom(array []interface{}, unsafe ...bool) *TArray {
return &TArray{
mu: rwmutex.New(unsafe...),
array: array,
}
}
func NewArrayFromCopy(array []interface{}, unsafe ...bool) *TArray {
newTArray := make([]interface{}, len(array))
copy(newTArray, array)
return &TArray{
mu: rwmutex.New(unsafe...),
array: newTArray,
}
}
func (a *TArray) Get(index int) interface{} {
a.mu.RLock()
defer a.mu.RUnlock()
value := a.array[index]
return value
}
func (a *TArray) Set(index int, value interface{}) *TArray {
a.mu.Lock()
defer a.mu.Unlock()
a.array[index] = value
return a
}
func (a *TArray) SetTArray(array []interface{}) *TArray {
a.mu.Lock()
defer a.mu.Unlock()
a.array = array
return a
}
func (a *TArray) Replace(array []interface{}) *TArray {
a.mu.Lock()
defer a.mu.Unlock()
max := len(array)
if max > len(a.array) {
max = len(a.array)
}
for i := 0; i < max; i++ {
a.array[i] = array[i]
}
return a
}
func (a *TArray) Sum() (sum int) {
a.mu.RLock()
defer a.mu.RUnlock()
for _, v := range a.array {
sum += t.Int(v)
}
return
}
func (a *TArray) SortFunc(less func(v1, v2 interface{}) bool) *TArray {
a.mu.Lock()
defer a.mu.Unlock()
sort.Slice(a.array, func(i, j int) bool {
return less(a.array[i], a.array[j])
})
return a
}
func (a *TArray) InsertBefore(index int, value interface{}) *TArray {
a.mu.Lock()
defer a.mu.Unlock()
rear := append([]interface{}{}, a.array[index:]...)
a.array = append(a.array[0:index], value)
a.array = append(a.array, rear...)
return a
}
func (a *TArray) InsertAfter(index int, value interface{}) *TArray {
a.mu.Lock()
defer a.mu.Unlock()
rear := append([]interface{}{}, a.array[index+1:]...)
a.array = append(a.array[0:index+1], value)
a.array = append(a.array, rear...)
return a
}
func (a *TArray) Remove(index int) interface{} {
a.mu.Lock()
defer a.mu.Unlock()
if index == 0 {
value := a.array[0]
a.array = a.array[1:]
return value
} else if index == len(a.array)-1 {
value := a.array[index]
a.array = a.array[:index]
return value
}
value := a.array[index]
a.array = append(a.array[:index], a.array[index+1:]...)
return value
}
func (a *TArray) PushLeft(value ...interface{}) *TArray {
a.mu.Lock()
a.array = append(value, a.array...)
a.mu.Unlock()
return a
}
func (a *TArray) PushRight(value ...interface{}) *TArray {
a.mu.Lock()
a.array = append(a.array, value...)
a.mu.Unlock()
return a
}
func (a *TArray) PopRand() interface{} {
return a.Remove(xrand.Intn(len(a.array)))
}
func (a *TArray) PopRands(size int) []interface{} {
a.mu.Lock()
defer a.mu.Unlock()
if size > len(a.array) {
size = len(a.array)
}
array := make([]interface{}, size)
for i := 0; i < size; i++ {
index := xrand.Intn(len(a.array))
array[i] = a.array[index]
a.array = append(a.array[:index], a.array[index+1:]...)
}
return array
}
func (a *TArray) PopLeft() interface{} {
a.mu.Lock()
defer a.mu.Unlock()
value := a.array[0]
a.array = a.array[1:]
return value
}
func (a *TArray) PopRight() interface{} {
a.mu.Lock()
defer a.mu.Unlock()
index := len(a.array) - 1
value := a.array[index]
a.array = a.array[:index]
return value
}
func (a *TArray) PopLefts(size int) []interface{} {
a.mu.Lock()
defer a.mu.Unlock()
length := len(a.array)
if size > length {
size = length
}
value := a.array[0:size]
a.array = a.array[size:]
return value
}
func (a *TArray) PopRights(size int) []interface{} {
a.mu.Lock()
defer a.mu.Unlock()
index := len(a.array) - size
if index < 0 {
index = 0
}
value := a.array[index:]
a.array = a.array[:index]
return value
}
func (a *TArray) Range(start int, end ...int) []interface{} {
a.mu.RLock()
defer a.mu.RUnlock()
offsetEnd := len(a.array)
if len(end) > 0 && end[0] < offsetEnd {
offsetEnd = end[0]
}
if start > offsetEnd {
return nil
}
if start < 0 {
start = 0
}
array := ([]interface{})(nil)
if a.mu.IsSafe() {
array = make([]interface{}, offsetEnd-start)
copy(array, a.array[start:offsetEnd])
} else {
array = a.array[start:offsetEnd]
}
return array
}
func (a *TArray) SubSlice(offset int, length ...int) []interface{} {
a.mu.RLock()
defer a.mu.RUnlock()
size := len(a.array)
if len(length) > 0 {
size = length[0]
}
if offset > len(a.array) {
return nil
}
if offset < 0 {
offset = len(a.array) + offset
if offset < 0 {
return nil
}
}
if size < 0 {
offset += size
size = -size
if offset < 0 {
return nil
}
}
end := offset + size
if end > len(a.array) {
end = len(a.array)
size = len(a.array) - offset
}
if a.mu.IsSafe() {
s := make([]interface{}, size)
copy(s, a.array[offset:])
return s
} else {
return a.array[offset:end]
}
}
func (a *TArray) Append(value ...interface{}) *TArray {
a.PushRight(value...)
return a
}
func (a *TArray) Len() int {
a.mu.RLock()
length := len(a.array)
a.mu.RUnlock()
return length
}
func (a *TArray) Slice() []interface{} {
array := ([]interface{})(nil)
if a.mu.IsSafe() {
a.mu.RLock()
defer a.mu.RUnlock()
array = make([]interface{}, len(a.array))
copy(array, a.array)
} else {
array = a.array
}
return array
}
func (a *TArray) Clone() (newTArray *TArray) {
a.mu.RLock()
array := make([]interface{}, len(a.array))
copy(array, a.array)
a.mu.RUnlock()
return NewArrayFrom(array, !a.mu.IsSafe())
}
func (a *TArray) Clear() *TArray {
a.mu.Lock()
if len(a.array) > 0 {
a.array = make([]interface{}, 0)
}
a.mu.Unlock()
return a
}
func (a *TArray) Contains(value interface{}) bool {
return a.Search(value) != -1
}
func (a *TArray) Search(value interface{}) int {
if len(a.array) == 0 {
return -1
}
a.mu.RLock()
result := -1
for index, v := range a.array {
if v == value {
result = index
break
}
}
a.mu.RUnlock()
return result
}
func (a *TArray) Unique() *TArray {
a.mu.Lock()
for i := 0; i < len(a.array)-1; i++ {
for j := i + 1; j < len(a.array); j++ {
if a.array[i] == a.array[j] {
a.array = append(a.array[:j], a.array[j+1:]...)
}
}
}
a.mu.Unlock()
return a
}
func (a *TArray) LockFunc(f func(array []interface{})) *TArray {
a.mu.Lock()
defer a.mu.Unlock()
f(a.array)
return a
}
func (a *TArray) RLockFunc(f func(array []interface{})) *TArray {
a.mu.RLock()
defer a.mu.RUnlock()
f(a.array)
return a
}
func (a *TArray) Merge(array interface{}) *TArray {
switch v := array.(type) {
case *TArray:
a.Append(t.Interfaces(v.Slice())...)
case *TIntArray:
a.Append(t.Interfaces(v.Slice())...)
case *TStringArray:
a.Append(t.Interfaces(v.Slice())...)
case *TSortedArray:
a.Append(t.Interfaces(v.Slice())...)
case *TSortedIntArray:
a.Append(t.Interfaces(v.Slice())...)
case *TSortedStringArray:
a.Append(t.Interfaces(v.Slice())...)
default:
a.Append(t.Interfaces(array)...)
}
return a
}
func (a *TArray) Fill(startIndex int, num int, value interface{}) *TArray {
a.mu.Lock()
defer a.mu.Unlock()
if startIndex < 0 {
startIndex = 0
}
for i := startIndex; i < startIndex+num; i++ {
if i > len(a.array)-1 {
a.array = append(a.array, value)
} else {
a.array[i] = value
}
}
return a
}
func (a *TArray) Chunk(size int) [][]interface{} {
if size < 1 {
return nil
}
a.mu.RLock()
defer a.mu.RUnlock()
length := len(a.array)
chunks := int(math.Ceil(float64(length) / float64(size)))
var n [][]interface{}
for i, end := 0, 0; chunks > 0; chunks-- {
end = (i + 1) * size
if end > length {
end = length
}
n = append(n, a.array[i*size:end])
i++
}
return n
}
func (a *TArray) Pad(size int, val interface{}) *TArray {
a.mu.Lock()
defer a.mu.Unlock()
if size == 0 || (size > 0 && size < len(a.array)) || (size < 0 && size > -len(a.array)) {
return a
}
n := size
if size < 0 {
n = -size
}
n -= len(a.array)
tmp := make([]interface{}, n)
for i := 0; i < n; i++ {
tmp[i] = val
}
if size > 0 {
a.array = append(a.array, tmp...)
} else {
a.array = append(tmp, a.array...)
}
return a
}
func (a *TArray) Rand() interface{} {
a.mu.RLock()
defer a.mu.RUnlock()
return a.array[xrand.Intn(len(a.array))]
}
func (a *TArray) Rands(size int) []interface{} {
a.mu.RLock()
defer a.mu.RUnlock()
if size > len(a.array) {
size = len(a.array)
}
n := make([]interface{}, size)
for i, v := range xrand.Perm(len(a.array)) {
n[i] = a.array[v]
if i == size-1 {
break
}
}
return n
}
func (a *TArray) Shuffle() *TArray {
a.mu.Lock()
defer a.mu.Unlock()
for i, v := range xrand.Perm(len(a.array)) {
a.array[i], a.array[v] = a.array[v], a.array[i]
}
return a
}
func (a *TArray) Reverse() *TArray {
a.mu.Lock()
defer a.mu.Unlock()
for i, j := 0, len(a.array)-1; i < j; i, j = i+1, j-1 {
a.array[i], a.array[j] = a.array[j], a.array[i]
}
return a
}
func (a *TArray) Join(glue string) string {
a.mu.RLock()
defer a.mu.RUnlock()
buffer := bytes.NewBuffer(nil)
for k, v := range a.array {
buffer.WriteString(t.String(v))
if k != len(a.array)-1 {
buffer.WriteString(glue)
}
}
return buffer.String()
}
func (a *TArray) CountValues() map[interface{}]int {
m := make(map[interface{}]int)
a.mu.RLock()
defer a.mu.RUnlock()
for _, v := range a.array {
m[v]++
}
return m
}
func (a *TArray) String() string {
a.mu.RLock()
defer a.mu.RUnlock()
jsonContent, _ := json.Marshal(a.array)
return string(jsonContent)
}
func (a *TArray) MarshalJSON() ([]byte, error) {
a.mu.RLock()
defer a.mu.RUnlock()
return json.Marshal(a.array)
} | container/xarray/xarray_normal_any.go | 0.506103 | 0.423279 | xarray_normal_any.go | starcoder |
package main
import (
"sort"
"strconv"
"github.com/james-wallis/adventofcode/utils"
)
// ReadLinesAndConvertToInts reads a file, splits it into lines and converts the lines into ints (assumes given file only contains ints)
func ReadLinesAndConvertToInts(path string) ([]int64, error) {
lines, readFileErr := utils.ReadLines(path)
if readFileErr != nil {
return nil, readFileErr
}
var numbers []int64
for i := 0; i < len(lines); i++ {
convertedLine, conversionErr := strconv.ParseInt(lines[i], 10, 64)
if conversionErr != nil {
return nil, conversionErr
}
numbers = append(numbers, convertedLine)
}
return numbers, nil
}
func addInts(x int64, y int64) int64 {
return x + y
}
func multiplyInts(x int64, y int64) int64 {
return x * y
}
func intIs2020(x int64) bool {
return x == 2020
}
func findNumberToMake2020(numToAddTo int64, numbers []int64) (result int64) {
result = -1
for i := 0; i < len(numbers); i++ {
if intIs2020(addInts(numToAddTo, numbers[i])) {
return numbers[i]
}
}
return
}
// CalculateWhichTwoNumbersMake2020 given an input array, will return two numbers which when added together make 2020 O(n^2)
func CalculateWhichTwoNumbersMake2020(numbers []int64) (int64, int64) {
for i := 0; i < len(numbers); i++ {
for j := 0; j < len(numbers); j++ {
if intIs2020(addInts(numbers[i], numbers[j])) {
return numbers[i], numbers[j]
}
}
}
return -1, -1
}
// CalculateWhichThreeNumbersMake2020 given an input array, will return three numbers which when added together make 2020 O(n^3)
func CalculateWhichThreeNumbersMake2020(numbers []int64) (int64, int64, int64) {
for i := 0; i < len(numbers); i++ {
for j := 0; j < len(numbers); j++ {
for k := 0; k < len(numbers); k++ {
if intIs2020(addInts(numbers[k], addInts(numbers[i], numbers[j]))) {
return numbers[i], numbers[j], numbers[k]
}
}
}
}
return -1, -1, -1
}
// OptimisedTwoNumbersMake2020 like CalculateWhichTwoNumbersMake2020 but O(N)
func OptimisedTwoNumbersMake2020(numbers []int64) (int64, int64) {
sort.Slice(numbers, func(i, j int) bool { return numbers[i] < numbers[j] })
start := 0
end := len(numbers) - 1
for i := 0; i < len(numbers); i++ {
low := numbers[start]
high := numbers[end]
sol := low + high
if sol == 2020 {
return low, high
} else if sol > 2020 {
end--
} else if sol < 2020 {
start++
}
}
return 0, 0
} | 1/day1.go | 0.561455 | 0.424889 | day1.go | starcoder |
package pir
import (
"errors"
"math"
"sync"
"github.com/ncw/gmp"
"github.com/sachaservan/paillier"
"github.com/sachaservan/pir/dpf"
)
// DBMetadata contains information on the layout
// and size information for a slot database type
type DBMetadata struct {
SlotBytes int
DBSize int
}
// Database is a set of slots arranged in a grid of size width x height
// where each slot has size slotBytes
type Database struct {
DBMetadata
Slots []*Slot
Keywords []uint // set of keywords (optional)
}
// SecretSharedQueryResult contains shares of the resulting slots
type SecretSharedQueryResult struct {
SlotBytes int
Shares []*Slot
}
// EncryptedSlot is an array of ciphertext bytes
type EncryptedSlot struct {
Cts []*paillier.Ciphertext
}
// DoublyEncryptedSlot is an array of doubly encrypted ciphertexts
// which decrypt to a set of ciphertexts
// that then decrypt to a slot
type DoublyEncryptedSlot struct {
Cts []*paillier.Ciphertext // note: level2 ciphertexts (see Paillier)
}
// EncryptedQueryResult is an array of encrypted slots
type EncryptedQueryResult struct {
Slots []*EncryptedSlot
Pk *paillier.PublicKey
SlotBytes int
NumBytesPerCiphertext int
}
// DoublyEncryptedQueryResult is an array of encrypted slots
type DoublyEncryptedQueryResult struct {
Slots []*DoublyEncryptedSlot
Pk *paillier.PublicKey
SlotBytes int
NumBytesPerCiphertext int
}
// NewDatabase returns an empty database
func NewDatabase() *Database {
return &Database{}
}
// PrivateSecretSharedQuery uses the provided PIR query to retreive a slot row
func (db *Database) PrivateSecretSharedQuery(query *QueryShare, nprocs int) (*SecretSharedQueryResult, error) {
bits := db.ExpandSharedQuery(query, nprocs)
return db.PrivateSecretSharedQueryWithExpandedBits(query, bits, nprocs)
}
// PrivateSecretSharedQueryWithExpandedBits returns the result without expanding the query DPF
func (db *Database) PrivateSecretSharedQueryWithExpandedBits(query *QueryShare, bits []bool, nprocs int) (*SecretSharedQueryResult, error) {
// height of databse given query.GroupSize = dbWidth
dimWidth := query.GroupSize
dimHeight := int(math.Ceil(float64(db.DBSize / query.GroupSize)))
// mapping of results; one for each process
results := make([]*Slot, dimWidth)
// initialize the slots
for col := 0; col < dimWidth; col++ {
results[col] = &Slot{
Data: make([]byte, db.SlotBytes),
}
}
for row := 0; row < dimHeight; row++ {
if bits[row] {
for col := 0; col < dimWidth; col++ {
slotIndex := row*dimWidth + col
// xor if bit is set and within bounds
if slotIndex < len(db.Slots) {
XorSlots(results[col], db.Slots[slotIndex])
} else {
break
}
}
}
}
return &SecretSharedQueryResult{db.SlotBytes, results}, nil
}
// ExpandSharedQuery returns the expands the DPF and returns an array of bits
func (db *Database) ExpandSharedQuery(query *QueryShare, nprocs int) []bool {
var wg sync.WaitGroup
dimHeight := int(math.Ceil(float64(db.DBSize / query.GroupSize)))
// num bits to represent the index
numBits := uint(math.Log2(float64(dimHeight)) + 1)
if query.IsKeywordBased {
numBits = uint(32)
}
// init server DPF
pf := dpf.ServerInitialize(query.PrfKeys, numBits)
bits := make([]bool, dimHeight)
// expand the DPF into the bits array
for i := 0; i < dimHeight; i++ {
// key (index or uint) depending on whether
// the query is keyword based or index based
// when keyword based use FSS
key := uint(i)
if query.IsKeywordBased {
key = db.Keywords[i]
}
// don't spin up go routines in the single-thread case
if nprocs == 1 {
if query.IsTwoParty {
res := pf.Evaluate2P(query.ShareNumber, query.KeyTwoParty, key)
// IMPORTANT: take mod 2 of uint *before* casting to float64, otherwise there is an overflow edge case!
bits[i] = (int(math.Abs(float64(res%2))) == 0)
} else {
res := pf.EvaluateMP(query.KeyMultiParty, key)
// IMPORTANT: take mod 2 of uint *before* casting to float64, otherwise there is an overflow edge case!
bits[i] = (int(math.Abs(float64(res%2))) == 0)
}
} else {
wg.Add(1)
go func(i int, key uint) {
defer wg.Done()
if query.IsTwoParty {
res := pf.Evaluate2P(query.ShareNumber, query.KeyTwoParty, key)
// IMPORTANT: take mod 2 of uint *before* casting to float64, otherwise there is an overflow edge case!
bits[i] = (int(math.Abs(float64(res%2))) == 0)
} else {
res := pf.EvaluateMP(query.KeyMultiParty, key)
// IMPORTANT: take mod 2 of uint *before* casting to float64, otherwise there is an overflow edge case!
bits[i] = (int(math.Abs(float64(res%2))) == 0)
}
}(i, key)
// launch nprocs threads in parallel to evaluate the DPF
if i%nprocs == 0 || i+1 == dimHeight {
wg.Wait()
}
}
}
return bits
}
// PrivateEncryptedQuery uses the provided PIR query to retreive a slot row (encrypted)
// the tricky details are in regards to converting slot bytes to ciphertexts, specifically
// the encryption scheme might not have a message space large enough to accomodate
// all the bytes in a slot, thus requiring the bytes to be split up into several ciphertexts
func (db *Database) PrivateEncryptedQuery(query *EncryptedQuery, nprocs int) (*EncryptedQueryResult, error) {
// width of databse given query.height
dimWidth := query.DBWidth
dimHeight := query.DBHeight
// how many ciphertexts are needed to represent a slot
msgSpaceBytes := float64(len(query.Pk.N.Bytes()) - 2)
numCiphertextsPerSlot := int(math.Ceil(float64(db.SlotBytes) / msgSpaceBytes))
numBytesPerCiphertext := 0
// mapping of results; one for each process
slotRes := make([][]*EncryptedSlot, nprocs)
// how many rows each process gets
numRowsPerProc := int(float64(dimHeight) / float64(nprocs))
var wg sync.WaitGroup
for i := 0; i < nprocs; i++ {
slotRes[i] = make([]*EncryptedSlot, dimWidth)
wg.Add(1)
go func(i int) {
defer wg.Done()
start := i * numRowsPerProc
end := i*numRowsPerProc + numRowsPerProc
// handle the edge case
if i+1 == nprocs {
end = dimHeight
}
// initialize the slots
for col := 0; col < dimWidth; col++ {
slotRes[i][col] = &EncryptedSlot{
Cts: make([]*paillier.Ciphertext, numCiphertextsPerSlot),
}
for j := range slotRes[i][col].Cts {
slotRes[i][col].Cts[j] = nullCiphertext(query.Pk, paillier.EncLevelOne)
}
}
for row := start; row < end; row++ {
for col := 0; col < dimWidth; col++ {
slotIndex := row*dimWidth + col
if slotIndex >= len(db.Slots) {
continue
}
// convert the slot into big.Int array
intArr, numBytesPerInt, err := db.Slots[slotIndex].ToGmpIntArray(numCiphertextsPerSlot)
if err != nil {
panic(err)
}
// set the number of bytes that each ciphertest represents
if numBytesPerCiphertext == 0 {
numBytesPerCiphertext = numBytesPerInt
}
for j, val := range intArr {
sel := query.Pk.ConstMult(query.EBits[row], val)
slotRes[i][col].Cts[j] = query.Pk.Add(slotRes[i][col].Cts[j], sel)
}
}
}
}(i)
}
wg.Wait()
slots := slotRes[0]
for i := 1; i < nprocs; i++ {
for j := 0; j < dimWidth; j++ {
addEncryptedSlots(query.Pk, slots[j], slotRes[i][j])
}
}
queryResult := &EncryptedQueryResult{
Pk: query.Pk,
Slots: slots,
NumBytesPerCiphertext: numBytesPerCiphertext,
SlotBytes: db.SlotBytes,
}
return queryResult, nil
}
// PrivateDoublyEncryptedQuery executes a row PIR query and col PIR query by recursively
// applying PrivateEncryptedQuery
func (db *Database) PrivateDoublyEncryptedQuery(query *DoublyEncryptedQuery, nprocs int) (*DoublyEncryptedQueryResult, error) {
if query.Row.GroupSize > db.DBSize || query.Row.GroupSize == 0 {
return nil, errors.New("invalid group size provided in query")
}
if query.Col.GroupSize > query.Row.DBWidth || query.Col.GroupSize == 0 {
return nil, errors.New("invalid group size provided in query")
}
// get the row
rowQueryRes, err := db.PrivateEncryptedQuery(query.Row, nprocs)
if err != nil {
return nil, err
}
return db.PrivateEncryptedQueryOverEncryptedResult(query.Col, rowQueryRes, nprocs)
}
// PrivateEncryptedQueryOverEncryptedResult executes the query over an encrypted query result
func (db *Database) PrivateEncryptedQueryOverEncryptedResult(query *EncryptedQuery, result *EncryptedQueryResult, nprocs int) (*DoublyEncryptedQueryResult, error) {
// number of ciphertexts needed to encrypt a slot
numCiphertextsPerSlot := len(result.Slots[0].Cts)
if len(result.Slots)%query.GroupSize != 0 {
panic("row has a size that is not a multiple of the group size")
}
// need to encrypt each of the ciphertexts representing one slot
// res is a 2D array where each row is an encrypted slot composed of possibly multiple ciphertexts
res := make([][]*paillier.Ciphertext, query.GroupSize)
// initialize the slots
for i := 0; i < query.GroupSize; i++ {
res[i] = make([]*paillier.Ciphertext, numCiphertextsPerSlot)
for j := 0; j < numCiphertextsPerSlot; j++ {
res[i][j] = nullCiphertext(query.Pk, paillier.EncLevelTwo)
}
}
// group memeber
member := 0
// apply the PIR column query to get the desired column ciphertext
for col := 0; col < len(result.Slots); col++ {
if col%query.GroupSize == 0 {
member = 0
}
// "selection" bit
bitIndex := int(col / query.GroupSize)
bitCt := query.EBits[bitIndex]
slotCiphertexts := result.Slots[col].Cts
for j, slotCiphertext := range slotCiphertexts {
ctVal := slotCiphertext.C
sel := query.Pk.ConstMult(bitCt, ctVal)
res[member][j] = query.Pk.Add(res[member][j], sel)
}
member++
}
resSlots := make([]*DoublyEncryptedSlot, query.GroupSize)
for i, cts := range res {
resSlots[i] = &DoublyEncryptedSlot{
Cts: cts,
}
}
queryResult := &DoublyEncryptedQueryResult{
Pk: query.Pk,
Slots: resSlots,
NumBytesPerCiphertext: result.NumBytesPerCiphertext,
SlotBytes: db.SlotBytes,
}
return queryResult, nil
}
// BuildForData constrcuts a PIR database
// of slots where each string gets a slot
// and automatically finds the bandwidth-optimal
// width and height for PIR
func (db *Database) BuildForData(data []string) {
slotSize := GetRequiredSlotSize(data)
db.BuildForDataWithSlotSize(data, slotSize)
}
// BuildForDataWithSlotSize constrcuts a PIR database
// of slots where each string gets a slot of the specified size
func (db *Database) BuildForDataWithSlotSize(data []string, slotSize int) {
db.Slots = make([]*Slot, len(data))
db.SlotBytes = slotSize
db.DBSize = len(data)
for i := 0; i < len(data); i++ {
slotData := make([]byte, slotSize)
stringData := []byte(data[i])
copy(slotData[:], stringData)
// make a new slot with slotData
db.Slots[i] = &Slot{
Data: slotData,
}
}
}
// SetKeywords set the keywords (uints) associated with each row of the database
func (db *Database) SetKeywords(keywords []uint) {
db.Keywords = keywords
}
// IndexToCoordinates returns the 2D coodindates for an index
// a PIR query should use the first value to recover the row
// and the second value to recover the column in the response
func (dbmd *DBMetadata) IndexToCoordinates(index, width, height int) (int, int) {
return int(index / width), int(index % width)
}
// GetDimentionsForDatabase returns the width and height given a height constraint
// height is the desired height of the database (number of rows)
// groupSize is the number of *adjacent* slots needed to constitute a "group" (default = 1)
func (dbmd *DBMetadata) GetDimentionsForDatabase(height int, groupSize int) (int, int) {
dimWidth := int(math.Ceil(float64(dbmd.DBSize / (height * groupSize))))
if dimWidth == 0 {
dimWidth = 1
}
dimHeight := height
// trim the height to fit the database without extra rows
dimHeight = int(math.Ceil(float64(dbmd.DBSize / (dimWidth * groupSize))))
return dimWidth * groupSize, dimHeight
}
// GetSqrtOfDBSize returns sqrt(DBSize) + 1
func (dbmd *DBMetadata) GetSqrtOfDBSize() int {
return int(math.Sqrt(float64(dbmd.DBSize)) + 1)
}
// GetOptimalDBDimentions returns the optimal DB dimentions for PIR
func GetOptimalDBDimentions(slotSize int, dbSize int) (int, int) {
height := int(math.Max(1, math.Sqrt(float64(dbSize*slotSize))))
width := math.Ceil(float64(dbSize) / float64(height))
return int(width), int(height)
}
// GetOptimalWeightedDBDimentions returns the optimal DB dimentions for PIR
// where the height of the database is weighted by weight (int) >= 1
func GetOptimalWeightedDBDimentions(slotSize int, dbSize int, weight int) (int, int) {
width, height := GetOptimalDBDimentions(slotSize, dbSize)
newWidth := int(width / weight)
newHeight := int(math.Ceil(float64(height * weight)))
return newWidth, newHeight
}
func addEncryptedSlots(pk *paillier.PublicKey, a, b *EncryptedSlot) {
for j := 0; j < len(b.Cts); j++ {
a.Cts[j] = pk.Add(a.Cts[j], b.Cts[j])
}
}
func nullCiphertext(pk *paillier.PublicKey, level paillier.EncryptionLevel) *paillier.Ciphertext {
return pk.EncryptWithRAtLevel(gmp.NewInt(0), gmp.NewInt(1), level)
} | db.go | 0.675872 | 0.453867 | db.go | starcoder |
package exporter
import (
"fmt"
"time"
)
// Keep track of labels values for a metric.
type LabelValueTracker interface {
Observe(labels map[string]string) (bool, error)
DeleteByLabels(labels map[string]string) ([]map[string]string, error)
DeleteByRetention(retention time.Duration) []map[string]string
}
// Represents the label values for a single time series, i.e. if a time series was created with
// myVec.WithLabelValues("404", "GET").Add(42)
// then a labelValues with values = []{"404", "GET"} and the current timestamp is created.
type observedLabelValues struct {
values []string
lastUpdate time.Time
}
// Represents a list of labels for all time series ever observed (unless they are deleted).
type observedLabels struct {
labelNames []string
values []*observedLabelValues
}
func NewLabelValueTracker(labelNames []string) LabelValueTracker {
names := make([]string, len(labelNames))
copy(names, labelNames)
return &observedLabels{
labelNames: names,
values: make([]*observedLabelValues, 0),
}
}
func (observed *observedLabels) Observe(labels map[string]string) (bool, error) {
for _, err := range []error{
observed.assertLabelNamesExist(labels),
observed.assertLabelNamesComplete(labels),
observed.assertLabelValuesNotEmpty(labels),
} {
if err != nil {
return false, fmt.Errorf("error observing label values: %v", err)
}
}
values := observed.makeLabelValues(labels)
return observed.addOrUpdate(values), nil
}
func (observed *observedLabels) DeleteByLabels(labels map[string]string) ([]map[string]string, error) {
for _, err := range []error{
observed.assertLabelNamesExist(labels),
observed.assertLabelValuesNotEmpty(labels),
// Don't assertLabelNamesComplete(), because missing labels represent wildcards when deleting.
} {
if err != nil {
return nil, fmt.Errorf("error deleting label values: %v", err)
}
}
values := observed.makeLabelValues(labels)
deleted := make([]map[string]string, 0)
remaining := make([]*observedLabelValues, 0, len(observed.values))
for _, observedValues := range observed.values {
if equalsIgnoreEmpty(values, observedValues.values) {
deleted = append(deleted, observed.values2map(observedValues))
} else {
remaining = append(remaining, observedValues)
}
}
observed.values = remaining
return deleted, nil
}
func (observed *observedLabels) DeleteByRetention(retention time.Duration) []map[string]string {
retentionTime := time.Now().Add(-retention)
deleted := make([]map[string]string, 0)
remaining := make([]*observedLabelValues, 0, len(observed.values))
for _, observedValues := range observed.values {
if observedValues.lastUpdate.Before(retentionTime) {
deleted = append(deleted, observed.values2map(observedValues))
} else {
remaining = append(remaining, observedValues)
}
}
observed.values = remaining
return deleted
}
func (observed *observedLabels) values2map(observedValues *observedLabelValues) map[string]string {
result := make(map[string]string)
for i := range observedValues.values {
result[observed.labelNames[i]] = observedValues.values[i]
}
return result
}
func (observed *observedLabels) assertLabelNamesExist(labels map[string]string) error {
for key := range labels {
if !containsString(observed.labelNames, key) {
return fmt.Errorf("label '%v' is not defined for the metric.", key)
}
}
return nil
}
func (observed *observedLabels) assertLabelNamesComplete(labels map[string]string) error {
if len(observed.labelNames) != len(labels) {
return fmt.Errorf("got %v label(s), but the metric was initialized with %v label(s) %v", len(labels), len(observed.labelNames), observed.labelNames)
}
return nil
}
// If we want to support empty label values, we must refactor DeleteByLabels(),
// because currently empty label values represent wildcards for deleting.
func (observed *observedLabels) assertLabelValuesNotEmpty(labels map[string]string) error {
for name, val := range labels {
if len(val) == 0 {
return fmt.Errorf("label %v is empty. empty values are not supported", name)
}
}
return nil
}
func (observed *observedLabels) makeLabelValues(labels map[string]string) []string {
result := make([]string, len(observed.labelNames))
for i, name := range observed.labelNames {
result[i] = labels[name] // Missing labels are represented as empty strings.
}
return result
}
func (observed *observedLabels) addOrUpdate(values []string) bool {
for _, observedValues := range observed.values {
if equals(values, observedValues.values) {
observedValues.lastUpdate = time.Now()
return false
}
}
observed.values = append(observed.values, &observedLabelValues{
values: values,
lastUpdate: time.Now(),
})
return true
}
func equals(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
// test if the strings in 'a' are the same as the strings in 'b', but treat empty strings as a wildcard
func equalsIgnoreEmpty(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if len(a[i]) > 0 && len(b[i]) > 0 && a[i] != b[i] {
return false
}
}
return true
}
// test if the string 's' is contained in 'l'
func containsString(l []string, s string) bool {
for i := range l {
if l[i] == s {
return true
}
}
return false
} | vendor/github.com/fstab/grok_exporter/exporter/labelValueTracker.go | 0.805479 | 0.573678 | labelValueTracker.go | starcoder |
package color
import (
"image/color"
)
// ToNRGBA converts a color to RGBA values which are not premultiplied, unlike color.RGBA().
func ToNRGBA(c color.Color) (r, g, b, a int) {
// We use UnmultiplyAlpha with RGBA, RGBA64, and unrecognized implementations of Color.
// It works for all Colors whose RGBA() method is implemented according to spec, but is only necessary for those.
// Only RGBA and RGBA64 have components which are already premultiplied.
switch col := c.(type) {
// NRGBA and NRGBA64 are not premultiplied
case color.NRGBA:
r = int(col.R)
g = int(col.G)
b = int(col.B)
a = int(col.A)
case *color.NRGBA:
r = int(col.R)
g = int(col.G)
b = int(col.B)
a = int(col.A)
case color.NRGBA64:
r = int(col.R) >> 8
g = int(col.G) >> 8
b = int(col.B) >> 8
a = int(col.A) >> 8
case *color.NRGBA64:
r = int(col.R) >> 8
g = int(col.G) >> 8
b = int(col.B) >> 8
a = int(col.A) >> 8
// Gray and Gray16 have no alpha component
case *color.Gray:
r = int(col.Y)
g = int(col.Y)
b = int(col.Y)
a = 0xff
case color.Gray:
r = int(col.Y)
g = int(col.Y)
b = int(col.Y)
a = 0xff
case *color.Gray16:
r = int(col.Y) >> 8
g = int(col.Y) >> 8
b = int(col.Y) >> 8
a = 0xff
case color.Gray16:
r = int(col.Y) >> 8
g = int(col.Y) >> 8
b = int(col.Y) >> 8
a = 0xff
// Alpha and Alpha16 contain only an alpha component.
case color.Alpha:
r = 0xff
g = 0xff
b = 0xff
a = int(col.A)
case *color.Alpha:
r = 0xff
g = 0xff
b = 0xff
a = int(col.A)
case color.Alpha16:
r = 0xff
g = 0xff
b = 0xff
a = int(col.A) >> 8
case *color.Alpha16:
r = 0xff
g = 0xff
b = 0xff
a = int(col.A) >> 8
default: // RGBA, RGBA64, and unknown implementations of Color
r, g, b, a = unmultiplyAlpha(c)
}
return
}
// unmultiplyAlpha returns a color's RGBA components as 8-bit integers by calling c.RGBA() and then removing the alpha premultiplication.
// It is only used by ToRGBA.
func unmultiplyAlpha(c color.Color) (r, g, b, a int) {
red, green, blue, alpha := c.RGBA()
if alpha != 0 && alpha != 0xffff {
red = (red * 0xffff) / alpha
green = (green * 0xffff) / alpha
blue = (blue * 0xffff) / alpha
}
// Convert from range 0-65535 to range 0-255
r = int(red >> 8)
g = int(green >> 8)
b = int(blue >> 8)
a = int(alpha >> 8)
return
} | vendor/fyne.io/fyne/v2/internal/color/color.go | 0.699973 | 0.545528 | color.go | starcoder |
package spline
import (
"image/draw"
"image/color"
"math"
)
type Canvas struct {
draw.Image
Matrix
}
func (canvas *Canvas) DrawLine(x1, y1, x2, y2 float64, col color.Color, antialiased bool) *Canvas {
if antialiased {
xiaolinWuLine(canvas, x1, y1, x2, y2, col.(color.NRGBA))
} else {
bresenhamLine(canvas, x1, y1, x2, y2, col.(color.NRGBA))
}
return canvas
}
// Bresenham's line algorithm
// http://en.wikipedia.org/wiki/Bresenham's_line_algorithm
func bresenhamLine(canvas *Canvas, x1, y1, x2, y2 float64, col color.NRGBA) *Canvas {
x1, y1 = canvas.TransformPoint(x1, y1)
x2, y2 = canvas.TransformPoint(x2, y2)
abs := func(i float64) float64 {
if i < 0 {
return -i
}
return i
}
steep := abs(y1-y2) > abs(x1-x2)
if steep {
x1, y1 = y1, x1
x2, y2 = y2, x2
}
if x1 > x2 {
x1, x2 = x2, x1
y1, y2 = y2, y1
}
dx := x2 - x1
dy := abs(y2 - y1)
err := dx / 2
y := y1
var ystep float64 = -1
if y1 < y2 {
ystep = 1
}
for x := x1; x <= x2; x++ {
if steep {
canvas.Set(int(y), int(x), color.NRGBA{col.R, col.G, col.B, col.A})
} else {
canvas.Set(int(x), int(y), color.NRGBA{col.R, col.G, col.B, col.A})
}
err -= dy
if err < 0 {
y += ystep
err += dx
}
}
return canvas
}
// <NAME>'s antialiased line algorithm
// https://en.wikipedia.org/wiki/Xiaolin_Wu's_line_algorithm
func xiaolinWuLine(canvas *Canvas, x1, y1, x2, y2 float64, col color.NRGBA) *Canvas {
ipart := func(x float64) float64 {
return math.Floor(x)
}
round := func(x float64) float64 {
return ipart(x + 0.5)
}
fpart := func(x float64) float64 {
if x < 0 {
return 1 - (x - ipart(x))
}
return x - ipart(x)
}
rfpart := func(x float64) float64 {
return 1 - fpart(x)
}
x1, y1 = canvas.TransformPoint(x1, y1)
x2, y2 = canvas.TransformPoint(x2, y2)
dx := x2 - x1
dy := y2 - y1
ax := dx
if ax < 0 {
ax = -ax
}
ay := dy
if ay < 0 {
ay = -ay
}
var plot func(int, int, float64)
if ax < ay {
x1, y1 = y1, x1
x2, y2 = y2, x2
dx, dy = dy, dx
plot = func(x, y int, c float64) {
canvas.Set(y, x, color.NRGBA{col.R, col.G, col.B, uint8(255 * c)})
}
} else {
plot = func(x, y int, c float64) {
canvas.Set(x, y, color.NRGBA{col.R, col.G, col.B, uint8(255 * c)})
}
}
if x2 < x1 {
x1, x2 = x2, x1
y1, y2 = y2, y1
}
gradient := dy / dx
// handle first endpoint
xend := round(x1)
yend := y1 + gradient * (xend-x1)
xgap := rfpart(x1 + 0.5) * 1.5
xpxl1 := int(xend)
ypxl1 := int(ipart(yend))
plot(xpxl1, ypxl1, rfpart(yend) * xgap)
plot(xpxl1, ypxl1+1, fpart(yend) * xgap)
intery := yend + gradient
// handle second endpoint
xend = round(x2)
yend = y2 + gradient * (xend-x2)
xgap = fpart(x2 + 0.8)
xpxl2 := int(xend)
ypxl2 := int(ipart(yend))
plot(xpxl2, ypxl2, rfpart(yend) * xgap)
plot(xpxl2, ypxl2+1, fpart(yend) * xgap)
for x := xpxl1 + 1; x <= xpxl2-1; x++ {
plot(x, int(ipart(intery)), rfpart(intery))
plot(x, int(ipart(intery))+1, fpart(intery))
intery = intery + gradient
}
return canvas
} | line.go | 0.626238 | 0.638455 | line.go | starcoder |
package day18
import (
"adventofcode/utils"
"fmt"
"math"
"strconv"
)
type TNode struct {
parent *TNode
left *TNode
right *TNode
value int
}
func (tn TNode) String() string {
if tn.isLeaf() {
return fmt.Sprint(tn.value)
}
return fmt.Sprintf("[%s,%s]", tn.left, tn.right)
}
func (tn TNode) isLeaf() bool {
return tn.left == nil && tn.right == nil
}
func parseTNode(input string) *TNode {
var currentNode *TNode
currentNode = &TNode{nil, nil, nil, -1}
for i := 0; i < len(input); i++ {
switch input[i] {
case '[':
{
nextNode := TNode{currentNode, nil, nil, -1}
currentNode.left = &nextNode
currentNode = &nextNode
}
case ',':
{
nextNode := TNode{currentNode.parent, nil, nil, -1}
currentNode.parent.right = &nextNode
currentNode = &nextNode
}
case ']':
currentNode = currentNode.parent
default:
numberLength := 1
value := -1
for {
parsed, err := strconv.ParseInt(input[i:i+numberLength], 10, 64)
if err != nil {
break
} else {
value = int(parsed)
numberLength++
}
}
i += (numberLength - 2)
currentNode.value = value
}
}
return currentNode
}
func findNextLeft(t *TNode) *TNode {
currentNode := t
for currentNode.parent != nil {
if currentNode.parent.right == currentNode {
currentNode = currentNode.parent.left
for currentNode.right != nil {
currentNode = currentNode.right
}
return currentNode
}
currentNode = currentNode.parent
}
return nil
}
func findNextRight(t *TNode) *TNode {
currentNode := t
for currentNode.parent != nil {
if currentNode.parent.left == currentNode {
currentNode = currentNode.parent.right
for currentNode != nil && currentNode.left != nil {
currentNode = currentNode.left
}
return currentNode
}
currentNode = currentNode.parent
}
return nil
}
func findNodeAtDepth(t *TNode, depthLeft int) *TNode {
if depthLeft == 0 && !t.isLeaf() {
return t
}
if depthLeft <= 0 {
return nil
}
if t.left != nil {
if foundLeft := findNodeAtDepth(t.left, depthLeft-1); foundLeft != nil {
return foundLeft
}
}
if t.right != nil {
if foundRight := findNodeAtDepth(t.right, depthLeft-1); foundRight != nil {
return foundRight
}
}
return nil
}
func findValueGreaterEqualThan(t *TNode, v int) *TNode {
if t.isLeaf() && t.value >= v {
return t
}
if t.left != nil {
if leftFound := findValueGreaterEqualThan(t.left, v); leftFound != nil {
return leftFound
}
}
if t.right != nil {
if rightFound := findValueGreaterEqualThan(t.right, v); rightFound != nil {
return rightFound
}
}
return nil
}
func explode(t *TNode) bool {
if nodeToExplode := findNodeAtDepth(t, 4); nodeToExplode != nil {
leftValue := nodeToExplode.left.value
rightValue := nodeToExplode.right.value
nodeToExplode.left = nil
nodeToExplode.right = nil
nodeToExplode.value = 0
// add left number to next left
if nextLeft := findNextLeft(nodeToExplode); nextLeft != nil {
nextLeft.value += leftValue
}
// add right number to next left
if nextRight := findNextRight(nodeToExplode); nextRight != nil {
nextRight.value += rightValue
}
return true
}
return false
}
func split(t *TNode) bool {
if valueToSplit := findValueGreaterEqualThan(t, 10); valueToSplit != nil {
leftValue := int(math.Floor(float64(valueToSplit.value) / 2.0))
rightValue := int(math.Ceil(float64(valueToSplit.value) / 2.0))
valueToSplit.left = &TNode{valueToSplit, nil, nil, leftValue}
valueToSplit.right = &TNode{valueToSplit, nil, nil, rightValue}
valueToSplit.value = -1
return true
}
return false
}
func add(l *TNode, r *TNode) *TNode {
additionRoot := TNode{nil, l, r, -1}
l.parent = &additionRoot
r.parent = &additionRoot
splited := true
exploded := true
for splited || exploded {
for exploded {
exploded = explode(&additionRoot)
}
splited = split(&additionRoot)
exploded = explode(&additionRoot)
}
return &additionRoot
}
func calcMagnitude(t *TNode) int {
if t.value != -1 {
return t.value
} else {
return 3*calcMagnitude(t.left) + 2*calcMagnitude(t.right)
}
}
func addLines(lines []string) *TNode {
prevSum := parseTNode(lines[0])
for i := 1; i < len(lines); i++ {
nextLine := parseTNode(lines[i])
prevSum = add(prevSum, nextLine)
}
return prevSum
}
func largestPossibleMagnitude(lines []string) int {
largest := 0
for i := 0; i < len(lines)-1; i++ {
for j := 0; j < len(lines); j++ {
if j != i {
iNode := parseTNode(lines[i])
jNode := parseTNode(lines[j])
if mag := calcMagnitude(add(iNode, jNode)); mag > largest {
largest = mag
}
}
}
}
return largest
}
func PrintSolution() {
lines := utils.ParseLines("./inputs/day18.txt")
magnitude := calcMagnitude(addLines(lines))
fmt.Println("Magnitude (Day 1)", magnitude)
MaxMagnitude := largestPossibleMagnitude(lines)
fmt.Println("MaxMagnitude (Day 2)", MaxMagnitude)
} | day18/day18.go | 0.54359 | 0.44089 | day18.go | starcoder |
package metronome
import (
"time"
"gitlab.com/gomidi/midi/smf"
)
// absPosToMsec transforms the absolute ticks to microseconds based on the tempi
func absPosToMsec(metricTicks smf.MetricTicks, temps tempi, absPos uint64) (msec int64) {
/*
calculate the abstime in msec for every tempo position up to the last tempo position before absPos
the abstime of a tempo position is calculated the following way:
absTime = absTimePrevTempo + metricTicks.FractionalDuration(lastTempo, uint32(absPosCurrent - absPosPrevious)).Microseconds()
the abstime of the ticks is
absTime = absTimePrevTempo + metricTicks.FractionalDuration(lastTempo, uint32(absPos - absPosPrevious)).Microseconds()
*/
var absTimeLastTempo int64
var absTicksLastTempo uint64
var lastTempo float64 = 120.0
for _, tm := range temps {
if tm.absPos > absPos {
break
}
if tm.absPos == 0 {
lastTempo = tm.bpm
continue
}
absTime := absTimeLastTempo + metricTicks.FractionalDuration(lastTempo, uint32(tm.absPos-absTicksLastTempo)).Microseconds()
absTimeLastTempo = absTime
absTicksLastTempo = tm.absPos
lastTempo = tm.bpm
}
if absPos == absTicksLastTempo {
return absTimeLastTempo
}
msec = absTimeLastTempo + metricTicks.FractionalDuration(lastTempo, uint32(absPos-absTicksLastTempo)).Microseconds()
//fmt.Printf("converted tick at %v to microsecs %v\n", absPos, msec)
return
}
// msecToAbsPos calculates the ticks based on the microseconds and the tempi
func msecToAbsPos(metricTicks smf.MetricTicks, temps tempi, msec int64) (absPos uint64) {
/*
calculate the abstick for every tempo absTime up to the last tempo time before msec
the abstick of a tempo time is calculated the following way:
abstick = absTickPrevTempo + metricTicks.FractionalTicks(lastTempo, (absTimeCurrent-absTimePrevious) *time.Microsecond )
the abstime of the ticks is
abstick = absTickPrevTempo + metricTicks.FractionalTicks(lastTempo, (msec-absTimePrevious) *time.Microsecond )
*/
var absTickLastTempo uint64
var absTimeLastTempo int64
var lastTempo float64 = 120.0
for _, tm := range temps {
if tm.msec > msec {
break
}
if tm.msec == 0 {
lastTempo = tm.bpm
continue
}
abstick := absTickLastTempo + uint64(metricTicks.FractionalTicks(lastTempo, time.Duration(tm.msec-absTimeLastTempo)*time.Microsecond))
absTickLastTempo = abstick
absTimeLastTempo = tm.msec
tm.absPos = abstick
lastTempo = tm.bpm
}
if msec == absTimeLastTempo {
return absTickLastTempo
}
absPos = absTickLastTempo + uint64(metricTicks.FractionalTicks(lastTempo, time.Duration(msec-absTimeLastTempo)*time.Microsecond))
//fmt.Printf("converted microsec %v to abstick %v\n", msec, absPos)
return
}
// timeDistanceToTempo calculates the tempo based on the distance in microseconds
func timeDistaneToTempo(msecA, msecB int64) (bpm float64) {
return float64(60000000) / float64(msecB-msecA)
} | metronome/helpers.go | 0.715424 | 0.78108 | helpers.go | starcoder |
package hash
import "math/big"
func newGroup(a, b, c *big.Int) *group {
return &group{a: a, b: b, c: c}
}
func cloneGroup(g *group) *group {
return newGroup(g.a, g.b, g.c)
}
func newGroupFromDiscriminant(a, b, d *big.Int) *group {
z := new(big.Int).Sub(new(big.Int).Mul(b, b), d)
c := floorDivision(z, new(big.Int).Mul(a, big.NewInt(4)))
return newGroup(a, b, c)
}
func identityForDiscriminant(d *big.Int) *group {
return newGroupFromDiscriminant(big.NewInt(1), big.NewInt(1), d)
}
func (g *group) identity() *group {
return newGroupFromDiscriminant(big.NewInt(1), big.NewInt(1), g.discriminant())
}
func (g *group) Serialize() []byte {
return append(append(g.a.Bytes(), g.b.Bytes()...), g.c.Bytes()...)
}
func (g *group) pow(n int64) *group {
x := cloneGroup(g)
items_prod := g.identity()
for n > 0 {
if n&1 == 1 {
items_prod = items_prod.multiply(x)
if items_prod == nil {
return nil
}
}
x = x.square()
if x == nil {
return nil
}
n >>= 1
}
return items_prod
}
func (g *group) reduced() *group {
g = g.normalized()
a := new(big.Int).Set(g.a)
b := new(big.Int).Set(g.b)
c := new(big.Int).Set(g.c)
for (a.Cmp(c) == 1) || ((a.Cmp(c) == 0) && (b.Sign() == -1)) {
s := new(big.Int).Add(c, b)
s = floorDivision(s, new(big.Int).Add(c, c))
oldA := new(big.Int).Set(a)
oldB := new(big.Int).Set(b)
a = new(big.Int).Set(c)
b.Neg(b)
x := new(big.Int).Mul(big.NewInt(2), s)
x.Mul(x, c)
b.Add(b, x)
c.Mul(c, s)
c.Mul(c, s)
oldB.Mul(oldB, s)
c.Sub(c, oldB)
c.Add(c, oldA)
}
return newGroup(a, b, c).normalized()
}
func (g *group) normalized() *group {
a := new(big.Int).Set(g.a)
b := new(big.Int).Set(g.b)
c := new(big.Int).Set(g.c)
if (b.Cmp(new(big.Int).Neg(a)) == 1) && (b.Cmp(a) < 1) {
return g
}
r := new(big.Int).Sub(a, b)
r = floorDivision(r, new(big.Int).Mul(a, big.NewInt(2)))
t := new(big.Int).Mul(big.NewInt(2), r)
t.Mul(t, a)
oldB := new(big.Int).Set(b)
b.Add(b, t)
x := new(big.Int).Mul(a, r)
x.Mul(x, r)
y := new(big.Int).Mul(oldB, r)
c.Add(c, x)
c.Add(c, y)
return newGroup(a, b, c)
}
func (g *group) discriminant() *big.Int {
if g.d == nil {
d := new(big.Int).Set(g.b)
d.Mul(d, d)
a := new(big.Int).Set(g.a)
a.Mul(a, g.c)
a.Mul(a, big.NewInt(4))
d.Sub(d, a)
g.d = d
}
return g.d
}
func (g0 *group) multiply(g1 *group) *group {
x := g0.reduced()
y := g1.reduced()
g := new(big.Int).Add(x.b, y.b)
g = floorDivision(g, big.NewInt(2))
h := new(big.Int).Sub(y.b, x.b)
h = floorDivision(h, big.NewInt(2))
w1 := allInputValueGCD(y.a, g)
w := allInputValueGCD(x.a, w1)
j := new(big.Int).Set(w)
r := big.NewInt(0)
s := floorDivision(x.a, w)
t := floorDivision(y.a, w)
u := floorDivision(g, w)
b := new(big.Int).Mul(h, u)
sc := new(big.Int).Mul(s, x.c)
b.Add(b, sc)
k_temp, constant_factor, solvable := solveMod(new(big.Int).Mul(t, u), b, new(big.Int).Mul(s, t))
if !solvable {
return nil
}
n, _, solvable := solveMod(new(big.Int).Mul(t, constant_factor), new(big.Int).Sub(h, new(big.Int).Mul(t, k_temp)), s)
if !solvable {
return nil
}
k := new(big.Int).Add(k_temp, new(big.Int).Mul(constant_factor, n))
l := floorDivision(new(big.Int).Sub(new(big.Int).Mul(t, k), h), s)
tuk := new(big.Int).Mul(t, u)
tuk.Mul(tuk, k)
hu := new(big.Int).Mul(h, u)
tuk.Sub(tuk, hu)
tuk.Sub(tuk, sc)
st := new(big.Int).Mul(s, t)
m := floorDivision(tuk, st)
ru := new(big.Int).Mul(r, u)
a3 := st.Sub(st, ru)
ju := new(big.Int).Mul(j, u)
mr := new(big.Int).Mul(m, r)
ju = ju.Add(ju, mr)
kt := new(big.Int).Mul(k, t)
ls := new(big.Int).Mul(l, s)
kt = kt.Add(kt, ls)
b3 := ju.Sub(ju, kt)
kl := new(big.Int).Mul(k, l)
jm := new(big.Int).Mul(j, m)
c3 := kl.Sub(kl, jm)
return newGroup(a3, b3, c3).reduced()
}
func floorDivision(x, y *big.Int) *big.Int {
var r big.Int
q, _ := new(big.Int).QuoRem(x, y, &r)
if (r.Sign() == 1 && y.Sign() == -1) || (r.Sign() == -1 && y.Sign() == 1) {
q.Sub(q, big.NewInt(1))
}
return q
}
func encoding(buf []byte, bytes_size int) []byte {
var carry uint8 = 1
for i := len(buf) - 1; i >= len(buf)-bytes_size; i-- {
thisdigit := uint8(buf[i])
thisdigit = thisdigit ^ 0xff
if thisdigit == 0xff {
if carry == 1 {
thisdigit = 0
carry = 1
} else {
carry = 0
}
} else {
thisdigit = thisdigit + carry
carry = 0
}
buf[i] = thisdigit
}
for i := len(buf) - bytes_size - 1; i >= 0; i-- {
buf[i] = 0xff
}
return buf
}
func allInputValueGCD(a, b *big.Int) (r *big.Int) {
if a.Sign() == 0 {
return new(big.Int).Abs(b)
}
if b.Sign() == 0 {
return new(big.Int).Abs(a)
}
return new(big.Int).GCD(nil, nil, new(big.Int).Abs(a), new(big.Int).Abs(b))
}
func solveMod(a, b, m *big.Int) (s, t *big.Int, solvable bool) {
g, d, _ := extendedGCD(a, m)
r := big.NewInt(1)
bb := new(big.Int).Set(b)
q, r := bb.DivMod(b, g, r)
if r.Cmp(big.NewInt(0)) != 0 {
return nil, nil, false
}
q.Mul(q, d)
s = q.Mod(q, m)
t = floorDivision(m, g)
return s, t, true
}
func extendedGCD(a, b *big.Int) (r, s, t *big.Int) {
r0 := new(big.Int).Set(a)
r1 := new(big.Int).Set(b)
s0 := big.NewInt(1)
s1 := big.NewInt(0)
t0 := big.NewInt(0)
t1 := big.NewInt(1)
if r0.Cmp(r1) == 1 {
oldR0 := new(big.Int).Set(r0)
r0 = r1
r1 = oldR0
oldS0 := new(big.Int).Set(s0)
s0 = t0
oldS1 := new(big.Int).Set(s1)
s1 = t1
t0 = oldS0
t1 = oldS1
}
for r1.Sign() == 1 {
r := big.NewInt(1)
bb := new(big.Int).Set(b)
q, r := bb.DivMod(r0, r1, r)
r0 = r1
r1 = r
oldS0 := new(big.Int).Set(s0)
s0 = s1
s1 = new(big.Int).Sub(oldS0, new(big.Int).Mul(q, s1))
oldT0 := new(big.Int).Set(t0)
t0 = t1
t1 = new(big.Int).Sub(oldT0, new(big.Int).Mul(q, t1))
}
return r0, s0, t0
}
func (g *group) square() *group {
u, _, solvable := solveMod(g.b, g.c, g.a)
if !solvable {
return nil
}
A := new(big.Int).Mul(g.a, g.a)
au := new(big.Int).Mul(g.a, u)
B := new(big.Int).Sub(g.b, new(big.Int).Mul(au, big.NewInt(2)))
C := new(big.Int).Mul(u, u)
m := new(big.Int).Mul(g.b, u)
m = new(big.Int).Sub(m, g.c)
m = floorDivision(m, g.a)
C = new(big.Int).Sub(C, m)
return newGroup(A, B, C).reduced()
} | pkg/util/difficulty/hash/group.go | 0.682679 | 0.468547 | group.go | starcoder |
package copier
import (
"database/sql"
"reflect"
)
type Converter func(from reflect.Value, toType reflect.Type) (reflect.Value, error)
type Transformer map[string]interface{}
type Mapper interface {
From(fromValue interface{}) CopyCommand
RegisterConverter(matcher TypeMatcher, converter Converter) Mapper
RegisterConverterFunc(matcher TypeMatcherFunc, converter Converter) Mapper
RegisterResetDiffField(diffFields []DiffFieldPair) Mapper
RegisterTransformer(transformer Transformer) Mapper
Install(Module) Mapper
}
type Module func(Mapper)
type TypeMatcher interface {
Matches(Target) bool
}
type TypeMatcherFunc func(Target) bool
func (f TypeMatcherFunc) Matches(target Target) bool {
return f(target)
}
type Target struct {
From reflect.Type
To reflect.Type
}
func (t Target) Matches(target Target) bool {
return t == target
}
type DiffFieldPair struct {
Origin string
Targets []string
}
type CopyCommand interface {
CopyTo(toValue interface{}) error
}
func set(to, from reflect.Value) bool {
if from.IsValid() {
if to.Kind() == reflect.Ptr {
// set `to` to nil if from is nil
if from.Kind() == reflect.Ptr && from.IsNil() {
to.Set(reflect.Zero(to.Type()))
return true
} else if to.IsNil() {
to.Set(reflect.New(to.Type().Elem()))
}
to = to.Elem()
}
if from.Type().ConvertibleTo(to.Type()) {
to.Set(from.Convert(to.Type()))
} else if scanner, ok := to.Addr().Interface().(sql.Scanner); ok {
err := scanner.Scan(from.Interface())
if err != nil {
return false
}
} else if from.Kind() == reflect.Ptr {
return set(to, from.Elem())
} else {
return false
}
}
return true
}
func indirect(reflectValue reflect.Value) reflect.Value {
for reflectValue.Kind() == reflect.Ptr {
reflectValue = reflectValue.Elem()
}
return reflectValue
}
func indirectType(reflectType reflect.Type) reflect.Type {
for reflectType.Kind() == reflect.Ptr {
reflectType = reflectType.Elem()
}
return reflectType
}
func deepFields(reflectType reflect.Type) []reflect.StructField {
var fields []reflect.StructField
if reflectType = indirectType(reflectType); reflectType.Kind() == reflect.Struct {
for i := 0; i < reflectType.NumField(); i++ {
v := reflectType.Field(i)
if v.Anonymous {
fields = append(fields, deepFields(v.Type)...)
} else {
fields = append(fields, v)
}
}
}
return fields
}
func indirectAsNonNil(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
return indirectAsNonNil(v.Elem())
}
return v
} | copier.go | 0.641647 | 0.417806 | copier.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.