code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package input
import (
"github.com/Jeffail/benthos/v3/internal/docs"
"github.com/Jeffail/benthos/v3/lib/input/reader"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/message/batch"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/types"
"github.com/Jeffail/benthos/v3/lib/util/aws/session"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeKinesis] = TypeSpec{
constructor: NewKinesis,
Summary: `
Receive messages from a Kinesis stream.`,
Description: `
It's possible to use DynamoDB for persisting shard iterators by setting the
table name. Offsets will then be tracked per ` + "`client_id`" + ` per
` + "`shard_id`" + `. When using this mode you should create a table with
` + "`namespace`" + ` as the primary key and ` + "`shard_id`" + ` as a sort key.
Use the ` + "`batching`" + ` fields to configure an optional
[batching policy](/docs/configuration/batching#batch-policy). Any other batching
mechanism will stall with this input due its sequential transaction model.`,
sanitiseConfigFunc: func(conf Config) (interface{}, error) {
return sanitiseWithBatch(conf.Kinesis, conf.Kinesis.Batching)
},
FieldSpecs: append(
append(docs.FieldSpecs{
docs.FieldCommon("stream", "The Kinesis stream to consume from."),
docs.FieldCommon("shard", "The shard to consume from."),
docs.FieldCommon("client_id", "The client identifier to assume."),
docs.FieldCommon("commit_period", "The rate at which offset commits should be sent."),
docs.FieldCommon("dynamodb_table", "A DynamoDB table to use for offset storage."),
docs.FieldCommon("start_from_oldest", "Whether to consume from the oldest message when an offset does not yet exist for the stream."),
}, session.FieldSpecs()...),
docs.FieldAdvanced("timeout", "The period of time to wait before abandoning a request and trying again."),
docs.FieldAdvanced("limit", "The maximum number of messages to consume from each request."),
batch.FieldSpec(),
),
Categories: []Category{
CategoryServices,
CategoryAWS,
},
}
}
//------------------------------------------------------------------------------
// NewKinesis creates a new AWS Kinesis input type.
func NewKinesis(conf Config, mgr types.Manager, log log.Modular, stats metrics.Type) (Type, error) {
k, err := reader.NewKinesis(conf.Kinesis, log, stats)
if err != nil {
return nil, err
}
var kb reader.Type = k
if !conf.Kinesis.Batching.IsNoop() {
if kb, err = reader.NewSyncBatcher(conf.Kinesis.Batching, k, mgr, log, stats); err != nil {
return nil, err
}
}
return NewReader(
TypeKinesis,
reader.NewPreserver(kb),
log, stats,
)
}
//------------------------------------------------------------------------------ | lib/input/kinesis.go | 0.672439 | 0.456713 | kinesis.go | starcoder |
package encoding
// writeBigEndian writes x into buf as a big-endian n-byte
// integer. If the buffer is too small, a panic will ensue.
func writeBigEndian(buf []byte, x uint64, n int) {
for i := 1; i <= n; i++ {
buf[i-1] = byte(x >> uint(8*(n-i)))
}
}
// readBigEndian reads buf as a big-endian integer and returns
// the result.
func readBigEndian(buf []byte) uint64 {
var x uint64
for i := 1; i <= len(buf); i++ {
x |= uint64(buf[i-1]) << uint(8*(len(buf)-i))
}
return x
}
const maxVarintSize = 9
// putUvarint encodes a uint64 into buf and returns the number of
// bytes written. If the buffer is too small, a panic will ensue. Note
// that this varint encoding matches the sqlite4 definition but
// differs from the encoding/binary.{Put,Read}Uvarint() definitions.
func putUvarint(buf []byte, x uint64) int {
// Treat each byte of the encoding as an unsigned integer
// between 0 and 255.
// Let the bytes of the encoding be called A0, A1, A2, ..., A8.
// If x<=240 then output a single by A0 equal to x.
// If x<=2287 then output A0 as (x-240)/256 + 241 and A1 as (x-240)%256.
// If x<=67823 then output A0 as 249, A1 as (x-2288)/256, and A2 as (x-2288)%256.
// If x<=16777215 then output A0 as 250 and A1 through A3 as a big-endian 3-byte integer.
// If x<=4294967295 then output A0 as 251 and A1..A4 as a big-ending 4-byte integer.
// If x<=1099511627775 then output A0 as 252 and A1..A5 as a big-ending 5-byte integer.
// If x<=281474976710655 then output A0 as 253 and A1..A6 as a big-ending 6-byte integer.
// If x<=72057594037927935 then output A0 as 254 and A1..A7 as a big-ending 7-byte integer.
// Otherwise then output A0 as 255 and A1..A8 as a big-ending 8-byte integer.
switch {
case x <= 240:
buf[0] = byte(x)
return 1
case x <= 2287:
buf[0] = byte((x-240)/256 + 241)
buf[1] = byte((x - 240) % 256)
return 2
case x <= 67823:
buf[0] = byte(249)
buf[1] = byte((x - 2288) / 256)
buf[2] = byte((x - 2288) % 256)
return 3
case x <= 16777215:
buf[0] = byte(250)
writeBigEndian(buf[1:], x, 3)
return 4
case x <= 4294967295:
buf[0] = byte(251)
writeBigEndian(buf[1:], x, 4)
return 5
case x <= 1099511627775:
buf[0] = byte(252)
writeBigEndian(buf[1:], x, 5)
return 6
case x <= 281474976710655:
buf[0] = byte(253)
writeBigEndian(buf[1:], x, 6)
return 7
case x <= 72057594037927935:
buf[0] = byte(254)
writeBigEndian(buf[1:], x, 7)
return 8
default:
buf[0] = byte(255)
writeBigEndian(buf[1:], x, 8)
return 9
}
}
// getUvarint decodes a varint-encoded byte slice and returns the result
// and the length of byte slice been used.
func getUvarint(b []byte) (uint64, int) {
// Treat each byte of the encoding as an unsigned integer
// between 0 and 255.
// Let the bytes of the encoding be called A0, A1, A2, ..., A8.
// If A0 is between 0 and 240 inclusive, then the result is the value of A0.
// If A0 is between 241 and 248 inclusive, then the result is 240+256*(A0-241)+A1.
// If A0 is 249 then the result is 2288+256*A1+A2.
// If A0 is 250 then the result is A1..A3 as a 3-byte big-ending integer.
// If A0 is 251 then the result is A1..A4 as a 4-byte big-ending integer.
// If A0 is 252 then the result is A1..A5 as a 5-byte big-ending integer.
// If A0 is 253 then the result is A1..A6 as a 6-byte big-ending integer.
// If A0 is 254 then the result is A1..A7 as a 7-byte big-ending integer.
// If A0 is 255 then the result is A1..A8 as a 8-byte big-ending integer.
switch {
case b[0] >= 0 && b[0] <= 240:
return uint64(b[0]), 1
case b[0] >= 241 && b[0] <= 248:
return 240 + 256*(uint64(b[0])-241) + uint64(b[1]), 2
case b[0] == 249:
return 2288 + 256*uint64(b[1]) + uint64(b[2]), 3
case b[0] == 250:
return readBigEndian(b[1:4]), 4
case b[0] == 251:
return readBigEndian(b[1:5]), 5
case b[0] == 252:
return readBigEndian(b[1:6]), 6
case b[0] == 253:
return readBigEndian(b[1:7]), 7
case b[0] == 254:
return readBigEndian(b[1:8]), 8
case b[0] == 255:
return readBigEndian(b[1:9]), 9
default:
panic("varint: invalid format given")
}
} | util/encoding/varint.go | 0.63023 | 0.514888 | varint.go | starcoder |
package plausible
// TimeseriesQuery represents an API query for time series information over a period of time.
// In an aggregate query, the Metrics field is mandatory, all the others are optional.
type TimeseriesQuery struct {
// Period to consider for the time series query.
// The result will include results over this period of time.
// This field is mandatory.
Period TimePeriod
// Filters is a filter over properties to narrow down the time series results.
// This field is optional.
Filters Filter
// Metrics to be included in the time series information.
// This field is optional.
Metrics Metrics
// Interval of time to consider for the time series result.
// This field is optional.
Interval TimeInterval
}
// Validate tells whether the query is valid or not.
// If the query is not valid, a string explaining why the query is not valid will be returned.
func (aq *TimeseriesQuery) Validate() (ok bool, invalidReason string) {
if aq.Period.IsEmpty() {
return false, "a period must be specified for a timeseries query"
}
return true, ""
}
func (aq *TimeseriesQuery) toQueryArgs() QueryArgs {
queryArgs := QueryArgs{}
queryArgs.Merge(aq.Period.toQueryArgs())
if !aq.Filters.IsEmpty() {
queryArgs.Merge(aq.Filters.toQueryArgs())
}
if !aq.Metrics.IsEmpty() {
queryArgs.Merge(aq.Metrics.toQueryArgs())
}
if !aq.Interval.IsEmpty() {
queryArgs.Merge(aq.Interval.toQueryArgs())
}
return queryArgs
}
// TimeseriesResult represents the result of a time series query.
type TimeseriesResult []TimeseriesDataPoint
type rawTimeseriesResponse struct {
Results []TimeseriesDataPoint `json:"results"`
}
// MetricsResult contains the results for metrics data.
type MetricsResult struct {
// BounceRateRaw contains information about the bounce rate.
// This field must only be used if the query requested the bounce rate metric.
// Even when the query requests information for the bounce rate metric, some data points can
// have this field as nil.
// If you don't care about the nil value, use the BounceRate function to get this value.
BounceRateRaw *float64 `json:"bounce_rate"`
// Pageviews contains information about the number of page views.
// This field must only be used if the query requested the page views metric.
Pageviews int `json:"pageviews"`
// VisitDurationRaw contains information about the visit duration.
// Only use this field if the query requested the visit duration metric.
// Even when the query requests information for the visit duration metric, some data points can
// have this field as nil.
// If you don't care about the nil value, use the VisitDuration function to get this value.
VisitDurationRaw *float64 `json:"visit_duration"`
// Visitors contains information about the number of visitors.
// This field must only be used if the query requested the visitors metric.
Visitors int `json:"visitors"`
}
// BounceRate returns the bounce rate associated with this result.
// It will return 0 (zero) if the bounce rate information is not present.
func (mr *MetricsResult) BounceRate() float64 {
if mr.BounceRateRaw == nil {
return 0
}
return *mr.BounceRateRaw
}
// VisitDuration returns the visit duration associated with this result.
// It will return 0 (zero) if the visit duration information is not present.
func (mr *MetricsResult) VisitDuration() float64 {
if mr.VisitDurationRaw == nil {
return 0
}
return *mr.VisitDurationRaw
}
// TimeseriesDataPoint represents a data point in a time series result.
type TimeseriesDataPoint struct {
// Date is a string containing information about the date this result refers to in the format of "yyyy-mm-dd".
// For some queries, this string will also include information about an hour of day, in the format "yyyy-mm-dd hh:mm:ss"
Date string `json:"date"`
// MetricsResult contains the metric results for the metrics included in the query
MetricsResult
} | plausible/timeseries_query.go | 0.914715 | 0.569972 | timeseries_query.go | starcoder |
package lm
import (
"errors"
"fmt"
"log"
"github.com/alldroll/rbtree"
"github.com/suggest-go/suggest/pkg/utils"
)
// NGramVectorBuilder is an entity that responses for building NGramVector
type NGramVectorBuilder interface {
// Put adds the given sequence of nGrams and count to model
Put(nGrams []WordID, count WordCount) error
// Build creates new instance of NGramVector
Build() NGramVector
}
// NGramVectorFactory represents a factory method for creating a NGramVector instance.
type NGramVectorFactory func(ch <-chan NGramNode) NGramVector
// ErrNGramOrderIsOutOfRange informs that the given NGrams is out of range for the given
var ErrNGramOrderIsOutOfRange = errors.New("nGrams order is out of range")
// nGramVectorBuilder implements NGramVectorBuilder interface
type nGramVectorBuilder struct {
parents []NGramVector
factory NGramVectorFactory
tree rbtree.Tree
}
// NGramNode represents tree node for the given nGram
type NGramNode struct {
Key Key
Count WordCount
}
// Less tells is current elements is bigger than the other
func (n *NGramNode) Less(other rbtree.Item) bool {
return n.Key < other.(*NGramNode).Key
}
// Key represents a NGramNode key as a composition of a NGram context and wordID
type Key uint64
// MakeKey creates uint64 key for the given pair (word, context)
func MakeKey(word WordID, context ContextOffset) Key {
if context > maxContextOffset {
log.Fatal(ErrContextOverflow)
}
return Key(utils.Pack(context, word))
}
// GetWordID returns the wordID for the given key
func (k Key) GetWordID() WordID {
return utils.UnpackRight(uint64(k))
}
// GetContext returns the context for the given key
func (k Key) GetContext() ContextOffset {
return utils.UnpackLeft(uint64(k))
}
// NewNGramVectorBuilder creates new instance of NGramVectorBuilder
func NewNGramVectorBuilder(parents []NGramVector, factory NGramVectorFactory) NGramVectorBuilder {
return &nGramVectorBuilder{
parents: parents,
factory: factory,
tree: rbtree.New(),
}
}
// Put adds the given sequence of nGrams and count to model
func (m *nGramVectorBuilder) Put(nGrams []WordID, count WordCount) error {
if len(nGrams) != len(m.parents)+1 {
return ErrNGramOrderIsOutOfRange
}
parent := InvalidContextOffset
for i, nGram := range nGrams {
if i == len(nGrams)-1 {
node := &NGramNode{
Key: MakeKey(nGram, parent),
Count: count,
}
prev := m.tree.Find(node)
if prev != nil {
(prev.(*NGramNode)).Count += count
} else {
if _, err := m.tree.Insert(node); err != nil {
return fmt.Errorf("failed to insert the node: %w", err)
}
}
} else {
parent = m.parents[i].GetContextOffset(nGram, parent)
}
}
return nil
}
// Build creates new instance of NGramVector
func (m *nGramVectorBuilder) Build() NGramVector {
ch := make(chan NGramNode)
go func() {
for iter := m.tree.NewIterator(); iter.Next() != nil; {
node := iter.Get().(*NGramNode)
ch <- *node
}
close(ch)
}()
return m.factory(ch)
} | pkg/lm/ngram_vector_builder.go | 0.656988 | 0.499451 | ngram_vector_builder.go | starcoder |
package htest
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"sync"
"testing"
"github.com/fatih/color"
)
const lineWidth = 40
// ResponseAsserter is responsible for making assertions based on the expected and the actual value returned from httptest.ResponseRecorder
type ResponseAsserter interface {
// ExpectHeader triggers an error if the actual value for the key is different from the expected value
ExpectHeader(key, expected string) ResponseAsserter
// ExpectCookie triggers an error if the actual value for the key is different from the expected value
ExpectCookie(key, expected string) ResponseAsserter
// ExpectCookie triggers an error if the actual value for the key is different from the expected value
ExpectStatus(expected int) ResponseAsserter
// ExpectBody triggers an error if actual body received is different from the expected one
ExpectBody(expected string) ResponseAsserter
// ExpectBody triggers an error if actual body does not contain the passed string
ExpectBodyContains(str string) ResponseAsserter
// ExpectBodyBytes triggers an error if actual body received is different from the expected one
ExpectBodyBytes(b []byte) ResponseAsserter
// ExpectJSON triggers an error if actual body received is different from the expected one.
// Before comparing it marshals the data passed a argument using json.Marshal
ExpectJSON(data interface{}) ResponseAsserter
// Recorder returns the underlying ResponseRecorder instance
Recorder() *httptest.ResponseRecorder
}
type responseAsserter struct {
t testing.TB
w *httptest.ResponseRecorder
r *http.Request
printRequest sync.Once
}
// NewResponseAsserter create a new response asserter
func NewResponseAsserter(t testing.TB, w *httptest.ResponseRecorder, r *http.Request) ResponseAsserter {
ra := &responseAsserter{
t: t,
w: w,
r: r,
}
return ra
}
func (ra *responseAsserter) ExpectCookie(key, expected string) ResponseAsserter {
cookies, ok := ra.w.HeaderMap["Set-Cookie"]
if !ok {
ra.Errorf("No cookies set")
return ra
}
found := false
for _, cookiestr := range cookies {
splitted := strings.Split(cookiestr, "=")
k, actual := splitted[0], splitted[1]
if k == key {
found = true
if actual != expected {
ra.ErrorKV("cookie", key, "equal", expected, actual)
}
}
}
if !found {
ra.Errorf("Cookie %s not found", key)
}
return ra
}
func (ra *responseAsserter) ExpectHeader(key, expected string) ResponseAsserter {
actual := ra.w.Header().Get(key)
if actual != expected {
ra.ErrorKV("header", key, "equal", expected, actual)
}
return ra
}
func (ra *responseAsserter) ExpectBody(expected string) ResponseAsserter {
actual := ra.w.Body.String()
if actual != expected {
ra.Error("body", "equal", expected, actual)
}
return ra
}
func (ra *responseAsserter) ExpectBodyBytes(expected []byte) ResponseAsserter {
actual := ra.w.Body.Bytes()
if !bytes.Equal(actual, expected) {
ra.Error("body", "equal", expected, actual)
}
return ra
}
func (ra *responseAsserter) ExpectBodyContains(expected string) ResponseAsserter {
actual := ra.w.Body.String()
if !strings.Contains(actual, expected) {
ra.Error("body", "contain", expected, actual)
}
return ra
}
func (ra *responseAsserter) ExpectJSON(data interface{}) ResponseAsserter {
expected, err := json.Marshal(data)
if err != nil {
ra.Errorf("ExpectJSON error marshalling data: %s", err.Error())
}
actual := ra.w.Body.Bytes()
if !bytes.Equal(actual, expected) {
ra.Error("JSON", "equal", string(expected), string(actual))
}
return ra
}
func (ra *responseAsserter) ExpectStatus(expected int) ResponseAsserter {
actual := ra.w.Code
if actual != expected {
ra.Error("status code", "equal", expected, actual)
}
return ra
}
func (ra *responseAsserter) Error(kind, verb string, expected, actual interface{}) {
ra.Errorf(ra.errorFormatterKV(kind, "", verb, expected, actual))
}
func (ra *responseAsserter) ErrorKV(kind, key, verb string, expected, actual interface{}) {
ra.Errorf(ra.errorFormatterKV(kind, key, verb, expected, actual))
}
func (ra *responseAsserter) errorFormatterKV(kind, key, verb string, expected, actual interface{}) string {
expected = wrapWithQuotesForString(expected)
actual = wrapWithQuotesForString(actual)
if key != "" {
key += " "
}
return fmt.Sprintf("%s %sshould %s %s but got %s", magenta(strings.Title(kind)), cyan(key), verb, green(expected), red(actual))
}
func (ra *responseAsserter) Recorder() *httptest.ResponseRecorder {
return ra.w
}
func wrapWithQuotesForString(i interface{}) interface{} {
switch i.(type) {
case string:
return fmt.Sprintf(`"%s"`, i.(string))
default:
return i
}
}
var red = color.New(color.FgRed).SprintFunc()
var green = color.New(color.FgGreen).SprintFunc()
var magenta = color.New(color.FgHiMagenta).SprintFunc()
var cyan = color.New(color.FgHiCyan).SprintFunc()
var methodColor = color.New(color.FgMagenta).SprintFunc()
var pathColor = color.New(color.Bold, color.Italic, color.FgHiBlue).SprintFunc()
func (ra *responseAsserter) Errorf(format string, args ...interface{}) {
trace := Trace().OnlyTests().String()
whitespaced := "\r" + getWhitespaceString() + "\r\t"
var request string
ra.printRequest.Do(func() {
request = getWhitespaceString() + "\r\t" + methodColor(ra.r.Method) + " " + pathColor(ra.r.URL.Path) + "\n\r\t"
request += red(strings.Repeat("\u2500", lineWidth))
request += "\n\r\t"
})
errorStr := trace
str := fmt.Sprintf(whitespaced+request+format+"\n\r\t"+errorStr, args...)
ra.t.Error(str)
} | vendor/github.com/celrenheit/htest/response_asserter.go | 0.623606 | 0.406833 | response_asserter.go | starcoder |
package main
import (
"bufio"
"fmt"
"log"
"math"
"os"
"sort"
"strconv"
"time"
)
type point struct {
x, y int
}
func (p point) add(d point) point {
return point{p.x + d.x, p.y + d.y}
}
func (p *point) isValid(w, h int) bool {
return p.x >= 0 && p.y >= 0 && p.x < w && p.y < h
}
func (p *point) distance(o point) float64 {
return math.Abs(math.Sqrt(math.Pow(float64(o.x-p.x), 2) + math.Pow(float64(o.y-p.y), 2)))
}
var osets = []point{{-1, 0}, {-1, -1}, {0, -1}, {-1, 1}, {1, 0}, {1, 1}, {0, 1}, {-1, 1}}
func enqueueNeighbors(p point, visited map[point]bool, w, h int, q chan point) {
for _, offset := range osets {
if neighbor := p.add(offset); neighbor.isValid(w, h) && !visited[neighbor] {
visited[neighbor] = true
q <- neighbor
}
}
}
// Are a b and c coliniear w/ b and c on the same vector
// and b sits between a and c
func colinear(a, b, c point) bool {
val := a.x*(b.y-c.y) +
b.x*(c.y-a.y) +
c.x*(a.y-b.y)
if val != 0 {
return false
}
return (b.x <= a.x && b.y <= a.y && c.x <= b.x && c.y <= b.y) ||
(b.x >= a.x && b.y <= a.y && c.x >= b.x && c.y <= b.y) ||
(b.x >= a.x && b.y >= a.y && c.x >= b.x && c.y >= b.y) ||
(b.x <= a.x && b.y >= a.y && c.x <= b.x && c.y >= b.y)
}
func search(f [][]int, p point, w, h int, done chan bool) {
defer func() { done <- true }()
if f[p.y][p.x] == 0 {
return
}
defer func() { f[p.y][p.x]-- }()
seen := make(map[point]bool)
visited := make(map[point]bool)
visited[p] = true
log.Printf("Starting search %v", p)
q := make(chan point, w*h)
enqueueNeighbors(p, visited, w, h, q)
for {
select {
case candidate := <-q:
enqueueNeighbors(candidate, visited, w, h, q)
if f[candidate.y][candidate.x] == 0 {
// no data here, move on.
//log.Printf("no data")
continue
}
//log.Printf("candidate: %v", candidate)
// Is there a point in the seen set that is colinear w/ this point and the origin.
visible := true
for s := range seen {
//log.Printf(" checking seen %v", s)
if colinear(p, s, candidate) {
//log.Printf(" %v is blocked by %v", candidate, s)
visible = false
break
}
}
seen[candidate] = true
if visible {
//log.Printf(" %v is visible by %v", candidate, p)
f[p.y][p.x]++
}
case <-time.After(1 * time.Second):
return
}
}
}
func findAngles(f [][]int, p point, w, h int, angles map[point]int32) {
for y := 0; y < h; y++ {
for x := 0; x < w; x++ {
if f[y][x] != 0 && !(p.x == x && p.y == y) {
angle := math.Atan2(float64(y-p.y), float64(x-p.x))*(180.0/math.Pi) + 90.0
if angle < 0 {
angle += 360
}
var err error
angle, err = strconv.ParseFloat(fmt.Sprintf("%.2f", angle), 64)
if err != nil {
panic("failed float trimming")
}
angles[point{x, y}] = int32(angle * 100)
}
}
}
}
type spoint struct {
p point
angle int32
dist float64
}
func buildSpoints(p point, angles map[point]int32, res []spoint) {
idx := 0
for k, v := range angles {
res[idx] = spoint{k, v, p.distance(k)}
idx++
}
}
type byAngleDist []spoint
func (a byAngleDist) Len() int {
return len(a)
}
func (a byAngleDist) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
func (a byAngleDist) Less(i, j int) bool {
if a[i].angle < a[j].angle {
return true
} else if a[i].angle == a[j].angle {
if a[i].dist < a[j].dist {
return true
}
}
return false
}
func destroy(pts []spoint, destC chan spoint) {
lastAngle := int32(-1)
sendAny := false
for i, pt := range pts {
if pt.p.x >= 0 && pt.angle != lastAngle {
lastAngle = pt.angle
destC <- pt
pts[i] = spoint{point{-1, -1}, -1, -1}
sendAny = true
}
}
if sendAny {
destroy(pts, destC)
} else {
close(destC)
}
}
func main() {
// Build the field where 1 is astroid, 0 is nothing.
field := make([][]int, 0, 50)
row := 0
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
text := scanner.Text()
field = append(field, make([]int, len(text)))
for i, ch := range text {
if ch == '#' {
field[row][i] = 1
}
}
row++
}
if err := scanner.Err(); err != nil {
log.Println(err)
}
width := len(field[0])
height := len(field)
log.Printf("w,h = %v,%v", width, height)
log.Printf("Loaded %v", field)
// In parallel, launch a BFS search for visible astroids from each astroid.
done := make(chan bool, 100)
defer close(done)
for x := 0; x < width; x++ {
for y := 0; y < height; y++ {
go search(field, point{x, y}, width, height, done)
}
}
// wait for the serches to be done.
for i := 0; i < width*height; i++ {
<-done
}
// Calculate the answer to part 1, most visible astroids.
max := 0
var maxp point
for y := 0; y < height; y++ {
for x := 0; x < width; x++ {
fmt.Printf("%4d ", field[y][x])
if field[y][x] > max {
max = field[y][x]
maxp = point{x, y}
}
}
fmt.Printf("\n")
}
log.Printf("Max: %v @ %v", max, maxp)
// Sweep the laser until the laser doesn't sweep anymore.
angles := make(map[point]int32)
findAngles(field, maxp, width, height, angles)
/* // Debugging - print the angles
for y := 0; y < height; y++ {
for x := 0; x < width; x++ {
if ang, ok := angles[point{x, y}]; !ok {
if maxp.x == x && maxp.y == y {
fmt.Printf(" XX ")
} else {
fmt.Printf(" ")
}
} else {
fmt.Printf("%6d ", ang)
}
}
fmt.Printf("\n")
}
*/
//angOrd = make(chan point, 500)
spoints := make([]spoint, len(angles))
buildSpoints(maxp, angles, spoints)
sort.Sort(byAngleDist(spoints))
//log.Printf("%v", spoints)
destC := make(chan spoint)
go destroy(spoints, destC)
order := 1
for pt := range destC {
log.Printf("O: %4d -- {%3d,%3d} @ %6d -- Ans: %5d", order, pt.p.x, pt.p.y, pt.angle, (pt.p.x*100 + pt.p.y))
order++
}
} | day10/p2/main.go | 0.608129 | 0.433622 | main.go | starcoder |
package missing_vault_isolation
import (
"github.com/threagile/threagile/model"
)
func Category() model.RiskCategory {
return model.RiskCategory{
Id: "missing-vault-isolation",
Title: "Missing Vault Isolation",
Description: "Ativos de cofre altamente confidenciais e seus armazenamentos de dados devem ser isolados de outros ativos " +
"por sua própria segmentação de rede trust-boundary (" + model.ExecutionEnvironment.String() + " boundaries não contam como isolamento de rede).",
Impact: "Se este risco não for mitigado, os invasores que atacam com sucesso outros componentes do sistema podem ter um caminho fácil para " +
"ativos de cofre altamente confidenciais e seus armazenamentos de dados, uma vez que não são separados por segmentação de rede",
ASVS: "V1 - Architecture, Design and Threat Modeling Requirements",
CheatSheet: "https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html",
Action: "Network Segmentation",
Mitigation: "Aplique um limite de confiança de segmentação de rede em torno dos ativos de cofre altamente confidenciais e seus armazenamentos de dados.",
Check: "As recomendações do cheat sheet e do ASVS/CSVS referenciado são aplicadas?",
Function: model.Operations,
STRIDE: model.ElevationOfPrivilege,
DetectionLogic: "In-scope vault assets " +
"quando cercado por outros ativos (não relacionados ao cofre) (sem um limite de confiança de rede no meio). " +
"Este risco é especialmente prevalente quando outros ativos não relacionados ao cofre estão no mesmo ambiente de execução (ou seja, mesmo banco de dados ou mesmo servidor de aplicativos).",
RiskAssessment: "O padrão é " + model.MediumImpact.String() + " impacto. O impacto é aumentado para " + model.HighImpact.String() + " quando o ativo está faltando " +
"trust-boundary proteção é classificada como " + model.StrictlyConfidential.String() + " ou " + model.MissionCritical.String() + ".",
FalsePositives: "Quando todos os ativos dentro do limite de confiança da segmentação de rede são reforçados e protegidos da mesma forma como se todos fossem " +
"cofres com dados de maior sensibilidade.",
ModelFailurePossibleReason: false,
CWE: 1008,
}
}
func SupportedTags() []string {
return []string{}
}
func GenerateRisks() []model.Risk {
risks := make([]model.Risk, 0)
for _, technicalAsset := range model.ParsedModelRoot.TechnicalAssets {
if !technicalAsset.OutOfScope && technicalAsset.Technology == model.Vault {
moreImpact := technicalAsset.Confidentiality == model.StrictlyConfidential ||
technicalAsset.Integrity == model.MissionCritical ||
technicalAsset.Availability == model.MissionCritical
sameExecutionEnv := false
createRiskEntry := false
// now check for any other same-network assets of non-vault-related types
for sparringAssetCandidateId, _ := range model.ParsedModelRoot.TechnicalAssets { // so inner loop again over all assets
if technicalAsset.Id != sparringAssetCandidateId {
sparringAssetCandidate := model.ParsedModelRoot.TechnicalAssets[sparringAssetCandidateId]
if sparringAssetCandidate.Technology != model.Vault && !isVaultStorage(technicalAsset, sparringAssetCandidate) {
if technicalAsset.IsSameExecutionEnvironment(sparringAssetCandidateId) {
createRiskEntry = true
sameExecutionEnv = true
} else if technicalAsset.IsSameTrustBoundaryNetworkOnly(sparringAssetCandidateId) {
createRiskEntry = true
}
}
}
}
if createRiskEntry {
risks = append(risks, createRisk(technicalAsset, moreImpact, sameExecutionEnv))
}
}
}
return risks
}
func isVaultStorage(vault model.TechnicalAsset, storage model.TechnicalAsset) bool {
return storage.Type == model.Datastore && vault.HasDirectConnection(storage.Id)
}
func createRisk(techAsset model.TechnicalAsset, moreImpact bool, sameExecutionEnv bool) model.Risk {
impact := model.MediumImpact
likelihood := model.Unlikely
others := "<b>in the same network segment</b>"
if moreImpact {
impact = model.HighImpact
}
if sameExecutionEnv {
likelihood = model.Likely
others = "<b>in the same execution environment</b>"
}
risk := model.Risk{
Category: Category(),
Severity: model.CalculateSeverity(likelihood, impact),
ExploitationLikelihood: likelihood,
ExploitationImpact: impact,
Title: "<b>Missing Vault Isolation</b> to further encapsulate and protect vault-related asset <b>" + techAsset.Title + "</b> against unrelated " +
"lower protected assets " + others + ", which might be easier to compromise by attackers",
MostRelevantTechnicalAssetId: techAsset.Id,
DataBreachProbability: model.Improbable,
DataBreachTechnicalAssetIDs: []string{techAsset.Id},
}
risk.SyntheticId = risk.Category.Id + "@" + techAsset.Id
return risk
} | risks/built-in/missing-vault-isolation/missing-vault-isolation-rule.go | 0.523177 | 0.503357 | missing-vault-isolation-rule.go | starcoder |
package kv
import (
"fmt"
"reflect"
"github.com/Ch1f/otel/api/kv/value"
)
// KeyValue holds a key and value pair.
type KeyValue struct {
Key Key
Value value.Value
}
// Bool creates a new key-value pair with a passed name and a bool
// value.
func Bool(k string, v bool) KeyValue {
return Key(k).Bool(v)
}
// Int64 creates a new key-value pair with a passed name and an int64
// value.
func Int64(k string, v int64) KeyValue {
return Key(k).Int64(v)
}
// Uint64 creates a new key-value pair with a passed name and a uint64
// value.
func Uint64(k string, v uint64) KeyValue {
return Key(k).Uint64(v)
}
// Float64 creates a new key-value pair with a passed name and a float64
// value.
func Float64(k string, v float64) KeyValue {
return Key(k).Float64(v)
}
// Int32 creates a new key-value pair with a passed name and an int32
// value.
func Int32(k string, v int32) KeyValue {
return Key(k).Int32(v)
}
// Uint32 creates a new key-value pair with a passed name and a uint32
// value.
func Uint32(k string, v uint32) KeyValue {
return Key(k).Uint32(v)
}
// Float32 creates a new key-value pair with a passed name and a float32
// value.
func Float32(k string, v float32) KeyValue {
return Key(k).Float32(v)
}
// String creates a new key-value pair with a passed name and a string
// value.
func String(k, v string) KeyValue {
return Key(k).String(v)
}
// Stringer creates a new key-value pair with a passed name and a string
// value generated by the passed Stringer interface.
func Stringer(k string, v fmt.Stringer) KeyValue {
return Key(k).String(v.String())
}
// Int creates a new key-value pair instance with a passed name and
// either an int32 or an int64 value, depending on whether the int
// type is 32 or 64 bits wide.
func Int(k string, v int) KeyValue {
return Key(k).Int(v)
}
// Uint creates a new key-value pair instance with a passed name and
// either an uint32 or an uint64 value, depending on whether the uint
// type is 32 or 64 bits wide.
func Uint(k string, v uint) KeyValue {
return Key(k).Uint(v)
}
// Array creates a new key-value pair with a passed name and a array.
// Only arrays of primitive type are supported.
func Array(k string, v interface{}) KeyValue {
return Key(k).Array(v)
}
// Infer creates a new key-value pair instance with a passed name and
// automatic type inference. This is slower, and not type-safe.
func Infer(k string, value interface{}) KeyValue {
if value == nil {
return String(k, "<nil>")
}
if stringer, ok := value.(fmt.Stringer); ok {
return String(k, stringer.String())
}
rv := reflect.ValueOf(value)
switch rv.Kind() {
case reflect.Array, reflect.Slice:
return Array(k, value)
case reflect.Bool:
return Bool(k, rv.Bool())
case reflect.Int, reflect.Int8, reflect.Int16:
return Int(k, int(rv.Int()))
case reflect.Int32:
return Int32(k, int32(rv.Int()))
case reflect.Int64:
return Int64(k, int64(rv.Int()))
case reflect.Uint, reflect.Uint8, reflect.Uint16:
return Uint(k, uint(rv.Uint()))
case reflect.Uint32:
return Uint32(k, uint32(rv.Uint()))
case reflect.Uint64, reflect.Uintptr:
return Uint64(k, rv.Uint())
case reflect.Float32:
return Float32(k, float32(rv.Float()))
case reflect.Float64:
return Float64(k, rv.Float())
case reflect.String:
return String(k, rv.String())
}
return String(k, fmt.Sprint(value))
} | api/kv/kv.go | 0.770378 | 0.547585 | kv.go | starcoder |
package formats
// Used for codes in the AMADEUS code tables. Code Length is one alphanumeric character.
// pattern = "[0-9A-Z]"
type AMA_EDICodesetType_Length1 string
// Used for codes in the AMADEUS code tables. Code Length is three alphanumeric characters.
// pattern = "[0-9A-Z]{1,3}"
type AMA_EDICodesetType_Length1to3 string
// Format limitations: an..1
type AlphaNumericString_Length0To1 string
// Format limitations: an..18
type AlphaNumericString_Length0To18 string
// Format limitations: an..3
type AlphaNumericString_Length0To3 string
// Format limitations: an1
type AlphaNumericString_Length1To1 string
// Format limitations: an..10
type AlphaNumericString_Length1To10 string
// Format limitations: an..109
type AlphaNumericString_Length1To109 string
// Format limitations: an..11
type AlphaNumericString_Length1To11 string
// Format limitations: an..12
type AlphaNumericString_Length1To12 string
// Format limitations: an..126
type AlphaNumericString_Length1To126 string
// Format limitations: an..127
type AlphaNumericString_Length1To127 string
// Format limitations: an..13
type AlphaNumericString_Length1To13 string
// Format limitations: an..14
type AlphaNumericString_Length1To14 string
// Format limitations: an..15
type AlphaNumericString_Length1To15 string
// Format limitations: an..17
type AlphaNumericString_Length1To17 string
// Format limitations: an..18
type AlphaNumericString_Length1To18 string
// Format limitations: an..19
type AlphaNumericString_Length1To19 string
// Format limitations: an..199
type AlphaNumericString_Length1To199 string
// Format limitations: an..2
type AlphaNumericString_Length1To2 string
// Format limitations: an..20
type AlphaNumericString_Length1To20 string
// Format limitations: an..200
type AlphaNumericString_Length1To200 string
// Format limitations: an..25
type AlphaNumericString_Length1To25 string
// Format limitations: an..250
type AlphaNumericString_Length1To250 string
// Format limitations: an..256
type AlphaNumericString_Length1To256 string
// Format limitations: an..27
type AlphaNumericString_Length1To27 string
// Format limitations: an..28
type AlphaNumericString_Length1To28 string
// Format limitations: an..3
type AlphaNumericString_Length1To3 string
// Format limitations: an..30
type AlphaNumericString_Length1To30 string
// Format limitations: an..35
type AlphaNumericString_Length1To35 string
// Format limitations: an..4
type AlphaNumericString_Length1To4 string
// Format limitations: an..40
type AlphaNumericString_Length1To40 string
// Format limitations: an..49
type AlphaNumericString_Length1To49 string
// Format limitations: an..5
type AlphaNumericString_Length1To5 string
// Format limitations: an..50
type AlphaNumericString_Length1To50 string
// Format limitations: an..56
type AlphaNumericString_Length1To56 string
// Format limitations: an..57
type AlphaNumericString_Length1To57 string
// Format limitations: an..6
type AlphaNumericString_Length1To6 string
// Format limitations: an..60
type AlphaNumericString_Length1To60 string
// Format limitations: an..7
type AlphaNumericString_Length1To7 string
// Format limitations: an..70
type AlphaNumericString_Length1To70 string
// Format limitations: an..8
type AlphaNumericString_Length1To8 string
// Format limitations: an..9
type AlphaNumericString_Length1To9 string
// Format limitations: an..99999
type AlphaNumericString_Length1To99999 string
// Format limitations: an2
type AlphaNumericString_Length2To2 string
// Format limitations: an2..3
type AlphaNumericString_Length2To3 string
// Format limitations: an3
type AlphaNumericString_Length3To3 string
// Format limitations: an3..5
type AlphaNumericString_Length3To5 string
// Format limitations: an4
type AlphaNumericString_Length4To4 string
// Format limitations: an5..6
type AlphaNumericString_Length5To6 string
// Format limitations: an6
type AlphaNumericString_Length6To6 string
// Format limitations: an7
type AlphaNumericString_Length7To7 string
// Format limitations: an9
type AlphaNumericString_Length9To9 string
// Format limitations: a..1
type AlphaString_Length0To1 string
// Format limitations: a1
type AlphaString_Length1To1 string
// Format limitations: a..2
type AlphaString_Length1To2 string
// Format limitations: a..3
type AlphaString_Length1To3 string
// Format limitations: a..30
type AlphaString_Length1To30 string
// Format limitations: a..56
type AlphaString_Length1To56 string
// Format limitations: a..57
type AlphaString_Length1To57 string
// Format limitations: a2
type AlphaString_Length2To2 string
// Format limitations: a3
type AlphaString_Length3To3 string
// Format limitations: a3..5
type AlphaString_Length3To5 string
// Date format: DDMMYY
// pattern = "(0[1-9]|[1-2][0-9]|3[0-1])(0[1-9]|1[0-2])[0-9]{2}"
type Date_DDMMYY string
// Date format: MMYY
// pattern = "(0[1-9]|1[0-2])([0-9][0-9])"
type Date_MMYY string
// Date format: YYYYMMDD
// pattern = "[0-9]{4}(0[1-9]|1[0-2])(0[1-9]|[1-2][0-9]|3[0-1])"
type Date_YYYYMMDD string
// Value of the day in the month. Only significant digits are mandatory. Example: 7
// pattern = "(0?[1-9]|[1-2][0-9]|3[0-1])"
type Day_nN string
// Format limitations: n..10
type DecimalLengthNTo10 float64
// Format limitations: n..15
type DecimalLengthNTo15 float64
// Format limitations: n..18
type DecimalLengthNTo18 float64
// Format limitations: n..3
type DecimalLengthNTo3 float64
// Format limitations: n..4
type DecimalLengthNTo4 float64
// Format limitations: n..6
type DecimalLengthNTo6 float64
// Format limitations: n..8
type DecimalLengthNTo8 float64
// Format limitations: n..9
type DecimalLengthNTo9 float64
// Value of the hours in the time. Only significant digits are mandatory. Example: 7
// pattern = "[0-1]?[0-9]|2[0-3]"
type Hour_hH string
// Value of the month. Only significant digits are mandatory. Example: 7
// pattern = "(0?[1-9]|1[0-2])"
type Month_mM string
// Format limitations: n..10
type NumericDecimal_Length1To10 float64
// Format limitations: n..11
type NumericDecimal_Length1To11 float64
// Format limitations: n..12
type NumericDecimal_Length1To12 float64
// Format limitations: n..18
type NumericDecimal_Length1To18 float64
// Format limitations: n..35
type NumericDecimal_Length1To35 float64
// Format limitations: n..5
type NumericDecimal_Length1To5 float64
// Format limitations: n..8
type NumericDecimal_Length1To8 float64
// Format limitations: n..9
type NumericDecimal_Length1To9 float64
// Format limitations: n..3
// pattern = "-?[0-9]{0,3}"
type NumericInteger_Length0To3 int32
// Format limitations: n..35
// pattern = "-?[0-9]{0,35}"
type NumericInteger_Length0To35 int32
// Format limitations: n10
// pattern = "-?[0-9]{10,10}"
type NumericInteger_Length10To10 int32
// Format limitations: n1
// pattern = "-?[0-9]{1,1}"
type NumericInteger_Length1To1 int32
// Format limitations: n..10
// pattern = "-?[0-9]{1,10}"
type NumericInteger_Length1To10 int32
// Format limitations: n..11
// pattern = "-?[0-9]{1,11}"
type NumericInteger_Length1To11 int32
// Format limitations: n..12
// pattern = "-?[0-9]{1,12}"
type NumericInteger_Length1To12 int32
// Format limitations: n..13
// pattern = "-?[0-9]{1,13}"
type NumericInteger_Length1To13 int32
// Format limitations: n..15
// pattern = "-?[0-9]{1,15}"
type NumericInteger_Length1To15 int32
// Format limitations: n..18
// pattern = "-?[0-9]{1,18}"
type NumericInteger_Length1To18 int32
// Format limitations: n..2
// pattern = "-?[0-9]{1,2}"
type NumericInteger_Length1To2 int32
// Format limitations: n..3
// pattern = "-?[0-9]{1,3}"
type NumericInteger_Length1To3 int32
// Format limitations: n..4
// pattern = "-?[0-9]{1,4}"
type NumericInteger_Length1To4 int32
// Format limitations: n..5
// pattern = "-?[0-9]{1,5}"
type NumericInteger_Length1To5 int32
// Format limitations: n..6
// pattern = "-?[0-9]{1,6}"
type NumericInteger_Length1To6 int32
// Format limitations: n..8
// pattern = "-?[0-9]{1,8}"
type NumericInteger_Length1To8 int32
// Format limitations: n..9
// pattern = "-?[0-9]{1,9}"
type NumericInteger_Length1To9 int32
// Format limitations: n2
// pattern = "-?[0-9]{2,2}"
type NumericInteger_Length2To2 int32
// Format limitations: n3
// pattern = "-?[0-9]{3,3}"
type NumericInteger_Length3To3 int32
// Format limitations: n4
// pattern = "-?[0-9]{4,4}"
type NumericInteger_Length4To4 int32
// Format limitations: an10
type NumericString_Length10To10 string
// Format limitations: an3
type NumericString_Length3To3 string
// Format limitations: n1
type StringLength1To1 string
// Format limitations: an..12
type StringLength1To12 string
// Format limitations: an..18
type StringLength1To18 string
// Format limitations: an..2
type StringLength1To2 string
// Format limitations: an..20
type StringLength1To20 string
// Format limitations: an..3
type StringLength1To3 string
// Format limitations: an..35
type StringLength1To35 string
// Format limitations: an..4
type StringLength1To4 string
// Format limitations: an..5
type StringLength1To5 string
// Format limitations: an..6
type StringLength1To6 string
// Format limitations: an..7
type StringLength1To7 string
// Format limitations: an..9
type StringLength1To9 string
// Format limitations: an..9999
type StringLength1To9999 string
// Format limitations: an3..5
type StringLength3To5 string
// Format limitations: n6
type StringLength6To6 string
// Time format: 24H. All digits are mandatory . Example: from 0000 to 2359
// pattern = "([0-1][0-9]|2[0-3])[0-5][0-9]"
type Time24_HHMM string
// Time format: 24H.Only significant digits are mandatory. Example: from 0 to 2359
// pattern = "([0-1]?[0-9]|2[0-3])?[0-5]?[0-9]"
type Time24_hhmM string
// Value of the year. Example: 2003
// pattern = "[0-9]{4}"
type Year_YYYY string
// **********************************
// Formats for backward compatibility
// **********************************
// Format limitations: an..35
type AlphaNumericStringLength1To35 string
// Format limitations: an..14
type AlphaNumericStringLength1To14 string
// Format limitations: an1
type AlphaNumericStringLength1To1 string
// Format limitations: an..30
type AlphaNumericStringLength1To30 string
// Format limitations: n..15
type NumericIntegerLength1To15 int32
// Format limitations: an..99999
type AlphaNumericStringLength1To99999 string
// Format limitations: an..3
type AlphaNumericStringLength1To3 string
// Format limitations: an..9
type AlphaNumericStringLength1To9 string
// Format limitations: an..17
type AlphaNumericStringLength1To17 string
// Format limitations: an..25
type AlphaNumericStringLength1To25 string
// Format limitations: an..5
type AlphaNumericStringLength1To5 string
// Format limitations: an..6
type AlphaNumericStringLength1To6 string
// Format limitations: an..70
type AlphaNumericStringLength1To70 string
// Format limitations: a..6
type AlphaStringLength1To6 string
// Format limitations: an..10
type AlphaNumericStringLength1To10 string
// Format limitations: an..4
type AlphaNumericStringLength1To4 string
// **************************
// END backward compatibility
// **************************
// Value of the minutes in the time. Only significant digits are mandatory. Example: 7
type Minute_mM string
// Format limitations: n..2
type NumericDecimal_Length1To2 float64
// Format limitations: an..547
type AlphaNumericString_Length1To547 string
// Format limitations: a..6
type AlphaString_Length1To6 string
// Format limitations: an..100
type AlphaNumericString_Length1To100 string
// Format limitations: an..500
type AlphaNumericString_Length1To500 string
// Format limitations: an..999
type AlphaNumericString_Length1To999 string
// Format limitations: an..255
type AlphaNumericString_Length1To255 string
// Format limitations: an..320
type AlphaNumericString_Length1To320 string
// Format limitations: an..64
type AlphaNumericString_Length1To64 string
// Format limitations: an..90
type AlphaNumericString_Length1To90 string
// Format limitations: an..2
type AlphaNumericString_Length0To2 string
// Format limitations: an..55
type AlphaNumericString_Length1To55 string
// Format limitations: n2..4
type NumericInteger_Length2To4 int32
// Format limitations: an..63
type AlphaNumericString_Length1To63 string
// Format limitations: an..32
type AlphaNumericString_Length1To32 string
// Format limitations: an..16
type AlphaNumericString_Length1To16 string
// Format limitations: an8
type AlphaNumericString_Length8To8 string
// Format limitations: n8
type NumericInteger_Length8To8 int32
// Format limitations: an..24
type AlphaNumericString_Length1To24 string
// Format limitations: n6
type NumericInteger_Length6To6 int32
// Format limitations: n..4
type NumericInteger_Length0To4 int32
// Format limitations: n..3
type NumericDecimal_Length1To3 float64
// Format limitations: a..60
type AlphaString_Length1To60 string
// Format limitations: an..22
type AlphaNumericString_Length1To22 string
// Format limitations: n5..8
type NumericInteger_Length5To8 int32
// Format limitations: an..31
type AlphaNumericString_Length1To31 string
// Format limitations: an..120
type AlphaNumericString_Length1To120 string
// Format limitations: n..35
type NumericInteger_Length1To35 int32
// Format limitations: an..188
type AlphaNumericString_Length1To188 string
// Format limitations: n..30
type NumericDecimal_Length1To30 float64
// Format limitations: n..20
type NumericDecimal_Length1To20 float64
// Format limitations: n..20
type NumericInteger_Length1To20 int32
// Format limitations: an..61
type AlphaNumericString_Length1To61 string
// Format limitations: an..512
type AlphaNumericString_Length1To512 string
// Format limitations: an..99
type AlphaNumericString_Length1To99 string
// Format limitations: an..108
type AlphaNumericString_Length0To108 string
// Format limitations: an..56
type AlphaNumericString_Length0To56 string
// Format limitations: an..144
type AlphaNumericString_Length0To144 string
// Format limitations: an..400
type AlphaNumericString_Length1To400 string
// Format limitations: an6..9
type AlphaNumericString_Length6To9 string
// Format limitations: an..70
type AlphaNumericString_Length0To70 string
// Format limitations: an..6
type AlphaNumericString_Length0To6 string
// Format limitations: a..3
type AlphaString_Length0To3 string
// Format limitations: an..12
type AlphaNumericString_Length0To12 string
// Format limitations: a..9
type AlphaString_Length0To9 string
// Format limitations: an..5
type AlphaNumericString_Length0To5 string
// Format limitations: n..6
type NumericInteger_Length0To6 int32 | structs/formats/types.go | 0.636805 | 0.452778 | types.go | starcoder |
package types
import (
"fmt"
"reflect"
structpb "github.com/golang/protobuf/ptypes/struct"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
)
// baseMap is a reflection based map implementation designed to handle a variety of map-like types.
type baseMap struct {
value interface{}
refValue reflect.Value
}
// stringMap is a specialization to improve the performance of simple key, value pair lookups by
// string as this is the most common usage of maps.
type stringMap struct {
*baseMap
mapStrStr map[string]string
}
// NewDynamicMap returns a traits.Mapper value with dynamic key, value pairs.
func NewDynamicMap(value interface{}) traits.Mapper {
return &baseMap{value, reflect.ValueOf(value)}
}
// NewStringStringMap returns a specialized traits.Mapper with string keys and values.
func NewStringStringMap(value map[string]string) traits.Mapper {
return &stringMap{
baseMap: &baseMap{value: value},
mapStrStr: value,
}
}
var (
// MapType singleton.
MapType = NewTypeValue("map",
traits.ContainerType,
traits.IndexerType,
traits.IterableType,
traits.SizerType)
)
func (m *baseMap) Contains(index ref.Value) ref.Value {
return Bool(m.Get(index).Type() != ErrType)
}
func (m *baseMap) ConvertToNative(refType reflect.Type) (interface{}, error) {
// JSON conversion.
if refType == jsonValueType || refType == jsonStructType {
jsonEntries, err :=
m.ConvertToNative(reflect.TypeOf(map[string]*structpb.Value{}))
if err != nil {
return nil, err
}
jsonMap := &structpb.Struct{
Fields: jsonEntries.(map[string]*structpb.Value)}
if refType == jsonStructType {
return jsonMap, nil
}
return &structpb.Value{
Kind: &structpb.Value_StructValue{
StructValue: jsonMap}}, nil
}
// Non-map conversion.
if refType.Kind() != reflect.Map {
return nil, fmt.Errorf("type conversion error from map to '%v'", refType)
}
// Map conversion.
thisType := m.refValue.Type()
thisKey := thisType.Key()
thisKeyKind := thisKey.Kind()
thisElem := thisType.Elem()
thisElemKind := thisElem.Kind()
otherKey := refType.Key()
otherKeyKind := otherKey.Kind()
otherElem := refType.Elem()
otherElemKind := otherElem.Kind()
if otherKeyKind == thisKeyKind && otherElemKind == thisElemKind {
return m.value, nil
}
elemCount := m.Size().(Int)
nativeMap := reflect.MakeMapWithSize(refType, int(elemCount))
it := m.Iterator()
for it.HasNext() == True {
key := it.Next()
refKeyValue, err := key.ConvertToNative(otherKey)
if err != nil {
return nil, err
}
refElemValue, err := m.Get(key).ConvertToNative(otherElem)
if err != nil {
return nil, err
}
nativeMap.SetMapIndex(
reflect.ValueOf(refKeyValue),
reflect.ValueOf(refElemValue))
}
return nativeMap.Interface(), nil
}
func (m *stringMap) ConvertToNative(refType reflect.Type) (interface{}, error) {
if !m.baseMap.refValue.IsValid() {
m.baseMap.refValue = reflect.ValueOf(m.value)
}
return m.baseMap.ConvertToNative(refType)
}
func (m *baseMap) ConvertToType(typeVal ref.Type) ref.Value {
switch typeVal {
case MapType:
return m
case TypeType:
return MapType
}
return NewErr("type conversion error from '%s' to '%s'", MapType, typeVal)
}
func (m *baseMap) Equal(other ref.Value) ref.Value {
if MapType != other.Type() {
return ValOrErr(other, "no such overload")
}
otherMap := other.(traits.Mapper)
if m.Size() != otherMap.Size() {
return False
}
it := m.Iterator()
for it.HasNext() == True {
key := it.Next()
if otherVal := otherMap.Get(key); IsError(otherVal) {
return False
} else if thisVal := m.Get(key); IsError(thisVal) {
return False
} else {
valEq := thisVal.Equal(otherVal)
if valEq == False || IsUnknownOrError(valEq) {
return valEq
}
}
}
return True
}
func (m *stringMap) Equal(other ref.Value) ref.Value {
if !m.baseMap.refValue.IsValid() {
m.baseMap.refValue = reflect.ValueOf(m.value)
}
return m.baseMap.Equal(other)
}
func (m *baseMap) Get(key ref.Value) ref.Value {
// TODO: There are multiple reasons why a Get could fail. Typically, this is because the key
// does not exist in the map; however, it's possible that the value cannot be converted to
// the desired type. Refine this strategy to disambiguate these cases.
thisKeyType := m.refValue.Type().Key()
nativeKey, err := key.ConvertToNative(thisKeyType)
if err != nil {
return &Err{err}
}
nativeKeyVal := reflect.ValueOf(nativeKey)
if !nativeKeyVal.Type().AssignableTo(thisKeyType) {
return NewErr("no such key: '%v'", nativeKey)
}
value := m.refValue.MapIndex(nativeKeyVal)
if !value.IsValid() {
return NewErr("no such key: '%v'", nativeKey)
}
return NativeToValue(value.Interface())
}
func (m *stringMap) Get(key ref.Value) ref.Value {
strKey, ok := key.(String)
if !ok {
return ValOrErr(key, "no such key: %v", key)
}
val, found := m.mapStrStr[string(strKey)]
if !found {
return NewErr("no such key: %s", key)
}
return String(val)
}
func (m *baseMap) Iterator() traits.Iterator {
mapKeys := m.refValue.MapKeys()
return &mapIterator{
baseIterator: &baseIterator{},
mapValue: m,
mapKeys: mapKeys,
cursor: 0,
len: int(m.Size().(Int))}
}
func (m *stringMap) Iterator() traits.Iterator {
if !m.baseMap.refValue.IsValid() {
m.baseMap.refValue = reflect.ValueOf(m.value)
}
return m.baseMap.Iterator()
}
func (m *baseMap) Size() ref.Value {
return Int(m.refValue.Len())
}
func (m *stringMap) Size() ref.Value {
return Int(len(m.mapStrStr))
}
func (m *baseMap) Type() ref.Type {
return MapType
}
func (m *baseMap) Value() interface{} {
return m.value
}
type mapIterator struct {
*baseIterator
mapValue traits.Mapper
mapKeys []reflect.Value
cursor int
len int
}
func (it *mapIterator) HasNext() ref.Value {
return Bool(it.cursor < it.len)
}
func (it *mapIterator) Next() ref.Value {
if it.HasNext() == True {
index := it.cursor
it.cursor++
refKey := it.mapKeys[index]
return NativeToValue(refKey.Interface())
}
return nil
} | common/types/map.go | 0.686685 | 0.42656 | map.go | starcoder |
package hole
import (
"math/rand"
"strings"
)
var (
notes = [12][2]string{
{"C", "B♯"},
{"C♯", "D♭"},
{"D", "D"},
{"D♯", "E♭"},
{"E", "F♭"},
{"F", "E♯"},
{"F♯", "G♭"},
{"G", "G"},
{"G♯", "A♭"},
{"A", "A"},
{"A♯", "B♭"},
{"B", "C♭"},
}
triadTypes = [4]string{
"°", "m", "", "+",
}
triadSteps = [4][2]int{
{3, 3},
{3, 4},
{4, 3},
{4, 4},
}
orderings = [6][3]int{
{0, 1, 2},
{0, 2, 1},
{1, 0, 2},
{1, 2, 0},
{2, 0, 1},
{2, 1, 0},
}
)
func letterVal(note string) int {
return int(note[0]) - 65
}
func genNotes(rootIdx int, rootNote string, steps [2]int) []string {
thirdIdx := (rootIdx + steps[0]) % 12
fifthIdx := (rootIdx + steps[0] + steps[1]) % 12
thirdNote := notes[thirdIdx][0]
fifthNote := notes[fifthIdx][0]
// Enforce strict spelling. The third should be 2 letters
// above the root, and the fifth should be 4 letters above,
// wrapping at G
if (letterVal(rootNote)+2)%7 != letterVal(thirdNote) {
thirdNote = notes[thirdIdx][1]
}
if (letterVal(rootNote)+4)%7 != letterVal(fifthNote) {
fifthNote = notes[fifthIdx][1]
}
// Return empty if strict spelling is impossible
if (letterVal(rootNote)+2)%7 != letterVal(thirdNote) || (letterVal(rootNote)+4)%7 != letterVal(fifthNote) {
return []string{}
}
return []string{rootNote, thirdNote, fifthNote}
}
func musicalChords() (args []string, out string) {
var outs []string
for rootIdx, rootNames := range notes {
// Loop once for each unique name the note has
uniqueNames := 2
if rootNames[0] == rootNames[1] {
uniqueNames = 1
}
for _, rootNote := range rootNames[:uniqueNames] {
for triadIdx := 0; triadIdx < 4; triadIdx++ {
triad := triadTypes[triadIdx]
steps := triadSteps[triadIdx]
chordNotes := genNotes(rootIdx, rootNote, steps)
if len(chordNotes) > 0 {
chord := rootNote + triad
for _, ordering := range orderings {
rearrangedNotes := []string{chordNotes[ordering[0]], chordNotes[ordering[1]], chordNotes[ordering[2]]}
args = append(args, strings.Join(rearrangedNotes, " "))
outs = append(outs, chord)
}
}
}
}
}
// shuffle args and outputs in the same way
rand.Shuffle(len(args), func(i, j int) {
args[i], args[j] = args[j], args[i]
outs[i], outs[j] = outs[j], outs[i]
})
out = strings.Join(outs, "\n")
return
} | hole/musical-chords.go | 0.526343 | 0.453201 | musical-chords.go | starcoder |
package ckks
import (
"math"
)
// ChebyshevInterpolation is a struct storing the coefficients, degree and range of a Chebyshev interpolation polynomial.
type ChebyshevInterpolation struct {
coeffs map[uint64]complex128
degree uint64
a complex128
b complex128
}
// Approximate computes a Chebyshev approximation of the input function, for the range [-a, b] of degree degree.
// To be used in conjunction with the function EvaluateCheby.
func Approximate(function func(complex128) complex128, a, b complex128, degree int) (cheby *ChebyshevInterpolation) {
cheby = new(ChebyshevInterpolation)
cheby.coeffs = make(map[uint64]complex128)
cheby.a = a
cheby.b = b
cheby.degree = uint64(degree)
nodes := chebyshevNodes(degree+1, a, b)
fi := make([]complex128, len(nodes))
for i := range nodes {
fi[i] = function(nodes[i])
}
coeffs := chebyCoeffs(nodes, fi, a, b)
for i := range coeffs {
cheby.coeffs[uint64(i)] = coeffs[i]
}
return
}
func chebyshevNodes(n int, a, b complex128) (u []complex128) {
u = make([]complex128, n)
var x, y complex128
for k := 1; k < n+1; k++ {
x = 0.5 * (a + b)
y = 0.5 * (b - a)
u[k-1] = x + y*complex(math.Cos((float64(k)-0.5)*(3.141592653589793/float64(n))), 0)
}
return
}
func evaluateChebyshevPolynomial(coeffs []complex128, x complex128, a, b complex128) (y complex128) {
var u, Tprev, Tnext, T complex128
u = (2*x - a - b) / (b - a)
Tprev = 1
T = u
y = coeffs[0]
for i := 1; i < len(coeffs); i++ {
y = y + T*coeffs[i]
Tnext = 2*u*T - Tprev
Tprev = T
T = Tnext
}
return
}
func chebyCoeffs(nodes, fi []complex128, a, b complex128) (coeffs []complex128) {
var u, Tprev, T, Tnext complex128
n := len(nodes)
coeffs = make([]complex128, n)
for i := 0; i < n; i++ {
u = (2*nodes[i] - a - b) / (b - a)
Tprev = 1
T = u
for j := 0; j < n; j++ {
coeffs[j] += fi[i] * Tprev
Tnext = 2*u*T - Tprev
Tprev = T
T = Tnext
}
}
coeffs[0] /= complex(float64(n), 0)
for i := 1; i < n; i++ {
coeffs[i] *= (2.0 / complex(float64(n), 0))
}
return
} | ckks/chebyshev_interpolation.go | 0.79166 | 0.576184 | chebyshev_interpolation.go | starcoder |
package dtls
import (
"encoding/binary"
)
/*
The TLS Record Layer which handles all data transport.
The record layer is assumed to sit directly on top of some
reliable transport such as TCP. The record layer can carry four types of content:
1. Handshake messages—used for algorithm negotiation and key establishment.
2. ChangeCipherSpec messages—really part of the handshake but technically a separate kind of message.
3. Alert messages—used to signal that errors have occurred
4. Application layer data
The DTLS record layer is extremely similar to that of TLS 1.1. The
only change is the inclusion of an explicit sequence number in the
record. This sequence number allows the recipient to correctly
verify the TLS MAC.
https://tools.ietf.org/html/rfc4347#section-4.1
*/
type recordLayer struct {
recordLayerHeader recordLayerHeader
content content
}
func (r *recordLayer) Marshal() ([]byte, error) {
contentRaw, err := r.content.Marshal()
if err != nil {
return nil, err
}
r.recordLayerHeader.contentLen = uint16(len(contentRaw))
r.recordLayerHeader.contentType = r.content.contentType()
headerRaw, err := r.recordLayerHeader.Marshal()
if err != nil {
return nil, err
}
return append(headerRaw, contentRaw...), nil
}
func (r *recordLayer) Unmarshal(data []byte) error {
if err := r.recordLayerHeader.Unmarshal(data); err != nil {
return err
}
hlen := recordLayerHeaderSize
switch contentType(data[0]) {
case contentTypeChangeCipherSpec:
r.content = &changeCipherSpec{}
case contentTypeAlert:
r.content = &alert{}
case contentTypeHandshake:
r.content = &handshake{}
case contentTypeApplicationData:
r.content = &applicationData{}
case contentTypeTLS12Cid:
r.content = &tls12cid{}
hlen += extensionConnectionIdSize
default:
return errInvalidContentType
}
return r.content.Unmarshal(data[hlen:])
}
// Note that as with TLS, multiple handshake messages may be placed in
// the same DTLS record, provided that there is room and that they are
// part of the same flight. Thus, there are two acceptable ways to pack
// two DTLS messages into the same datagram: in the same record or in
// separate records.
// https://tools.ietf.org/html/rfc6347#section-4.2.3
func unpackDatagram(buf []byte) ([][]byte, error) {
out := [][]byte{}
for offset := 0; len(buf) != offset; {
if len(buf)-offset <= recordLayerHeaderSize {
return nil, errDTLSPacketInvalidLength
}
plenOffset := 11
hlen := recordLayerHeaderSize
// take care of optional cid which shifts the length field
// right while extending the total header size
if contentType(buf[offset]) == contentTypeTLS12Cid {
plenOffset += extensionConnectionIdSize
hlen += extensionConnectionIdSize
}
pktLen := (hlen + int(binary.BigEndian.Uint16(buf[offset+plenOffset:])))
out = append(out, buf[offset:offset+pktLen])
offset += pktLen
}
return out, nil
} | pkg/dtls/record_layer.go | 0.680879 | 0.44354 | record_layer.go | starcoder |
package particles
import (
"image/color"
"github.com/gremour/grue"
)
// ParticleData contains set of particle parameters.
type ParticleData struct {
Pos grue.Vec
Size grue.Vec
Color color.Color
}
// Particle describes particle.
type Particle struct {
Initial ParticleData
Current ParticleData
Image string
// Times of spawn and expiration.
Spawned float64
Expires float64
// Processor updates current particle data based on
// time passed since from Born.
Processor func(p *Particle, time float64)
}
// Group of particles with one generator.
type Group struct {
Particles map[string][]Particle
ParticlesHardCap int
Generator Generator
}
// Processor modifies current particle state.
type Processor interface {
Process(p *Particle)
}
// Generator creates new particles.
type Generator interface {
Generate(time float64, curNum int) Particle
}
// Placer generates position for new particle.
type Placer interface {
Place(r grue.Rect, time float64) grue.Vec
}
// Process creates new particles, updates existing and removes expired.
func (g *Group) Process(time float64) {
if g.Particles == nil {
g.Clear()
}
num := 0
// Remove expired particles.
for k, v := range g.Particles {
for i, p := range v {
if p.Expires <= time {
lv := len(v)
if i < lv-1 {
copy(v[i:lv-1], v[i+1:lv])
}
v = v[:lv-1]
}
}
num += len(v)
g.Particles[k] = v
}
cap := g.ParticlesHardCap
if cap == 0 {
cap = 128
}
// Generate new particles.
for {
p := g.Generator.Generate(time, num)
if p.Image == "" || num >= cap {
break
}
num++
g.Particles[p.Image] = append(g.Particles[p.Image], p)
}
// Process existing particles.
for _, v := range g.Particles {
for i := range v {
if v[i].Processor != nil {
v[i].Processor(&v[i], time)
}
}
}
}
// Draw particle on surface.
func (p Particle) Draw(s grue.Surface) {
r := grue.Rect{Max: p.Current.Size}
r = r.SetCenter(p.Current.Pos)
s.DrawImageStretched(p.Image, r, p.Current.Color)
}
// Draw group of particles on surface.
func (g *Group) Draw(s grue.Surface) {
for _, v := range g.Particles {
for _, p := range v {
p.Draw(s)
}
}
}
// Clear removes all particles. This is equivalent
// of creating new group with same generator.
func (g *Group) Clear() {
g.Particles = make(map[string][]Particle, 32)
} | particles/particles.go | 0.643329 | 0.417331 | particles.go | starcoder |
package expect
import (
"fmt"
"path"
"reflect"
"runtime"
"strings"
"testing"
)
func New(t *testing.T) func(target interface{}) *expectation {
return (&expector{T: t}).expect
}
type expector struct {
T *testing.T
}
func (e *expector) expect(target interface{}) *expectation {
return &expectation{T: e.T, target: target}
}
type expectation struct {
T *testing.T
target interface{}
}
func (e *expectation) ToHaveLength(l int) {
realLen, ok := haveLength(e.target, l)
e.expect(ok, "expected %#v to have length %d but was %d", e.target, l, realLen)
}
func (e *expectation) ToContain(element interface{}) {
e.expect(contains(e.target, element), "expected %#v to contain %#v", e.target, element)
}
func (e *expectation) ToHavePrefix(prefix string) {
s := e.expectString()
e.expect(strings.HasPrefix(s, prefix), "expected %q to have prefix %q", s, prefix)
}
func (e *expectation) ToHaveSuffix(prefix string) {
s := e.expectString()
e.expect(strings.HasSuffix(s, prefix), "expected %q to have suffix %q", s, prefix)
}
func (e *expectation) ToNotEqual(i interface{}) {
e.expect(e.target != i, "expected %#v to not equal %#v", e.target, i)
}
func (e *expectation) ToEqual(i interface{}) {
e.expect(equal(e.target, i), "expected %#v to equal %#v", e.target, i)
}
func (e *expectation) ToNotBeNil() {
e.expect(e.target != nil, "expected %q to not be nil", e.target)
}
func (e *expectation) ToBeNil() {
e.expect(e.target == nil, "expected %q to be nil", e.target)
}
// helpers
func equal(target interface{}, other interface{}) bool {
return fmt.Sprint(target) == fmt.Sprint(other)
}
func contains(in interface{}, target interface{}) bool {
switch v := valueOf(in).(type) {
case string:
if s, ok := target.(string); ok {
return strings.Contains(v, s)
}
return false
case []interface{}:
for _, el := range v {
if el == target {
return true
}
}
}
return false
}
func haveLength(in interface{}, l int) (int, bool) {
switch v := valueOf(in).(type) {
case string:
return len(v), len(v) == l
case []interface{}:
return len(v), len(v) == l
}
return 0, false
}
func valueOf(list interface{}) interface{} {
out := []interface{}{}
v := reflect.ValueOf(list)
switch v.Kind() {
case reflect.String:
return v.String()
case reflect.Slice:
for i := 0; i < v.Len(); i++ {
out = append(out, v.Index(i).Interface())
}
return out
}
return nil
}
func (e *expectation) expect(check bool, format string, args ...interface{}) {
if check {
e.pass()
} else {
e.fail(format, args...)
}
}
func (e *expectation) pass() {
fmt.Print(".")
}
func (e *expectation) fail(format string, i ...interface{}) {
_, file, line, _ := runtime.Caller(3)
fmt.Printf("\n\t%s:%d: %s\n", path.Base(file), line, fmt.Sprintf(format, i...))
e.T.FailNow()
}
func (e *expectation) expectString() string {
s, ok := e.target.(string)
if !ok {
e.fail("expected target to be string but was %T", e.target)
}
return s
} | expect/expect.go | 0.673299 | 0.567577 | expect.go | starcoder |
package list
type node struct {
next *node
values []interface{}
}
var maxChunkSize int = 8
var midOfChunk int = maxChunkSize / 2
func newNode(next *node) *node {
return &node{next, make([]interface{}, 0, maxChunkSize)}
}
func (n *node) isFull() bool {
return len(n.values) >= maxChunkSize
}
type Iterator struct {
node *node
currentIdx int
}
func (it *Iterator) MoveNext() *Iterator {
if it.currentIdx+1 < len(it.node.values) {
it.currentIdx++
return it
}
if it.node.next == nil {
return nil
}
it.node = it.node.next
it.currentIdx = 0
return it
}
func (it *Iterator) MoveTo(shift int) *Iterator {
if shift < len(it.node.values)-it.currentIdx {
it.currentIdx += shift
return it
}
for step := len(it.node.values) - it.currentIdx; shift >= step; step = len(it.node.values) {
shift -= step
if it.node.next == nil {
return nil
}
it.node = it.node.next
}
it.currentIdx = shift
return it
}
func (it *Iterator) GetValue() interface{} {
return it.node.values[it.currentIdx]
}
func (it *Iterator) isLastValue() bool {
return it.currentIdx == len(it.node.values)-1
}
type UnrolledForwardList struct {
head *node
length int
}
func NewUnrolledForwardList() *UnrolledForwardList {
return &UnrolledForwardList{nil, 0}
}
func (l *UnrolledForwardList) GetBegin() *Iterator {
if l.head == nil || len(l.head.values) == 0 {
return nil
}
return &Iterator{l.head, 0}
}
func (l UnrolledForwardList) GetLength() int {
return l.length
}
func insertValue(values []interface{}, value interface{}, pos int) []interface{} {
nodeLen := len(values)
values = values[:nodeLen+1]
copy(values[pos+1:], values[pos:nodeLen])
values[pos] = value
return values
}
func (l *UnrolledForwardList) InsertAfter(it *Iterator, v interface{}) {
if it == nil {
panic("insert after nil iterator")
}
l.length++
if !it.node.isFull() {
it.node.values = insertValue(it.node.values, v, it.currentIdx+1)
return
}
it.node.next = newNode(it.node.next)
if it.currentIdx < midOfChunk {
it.node.next.values = append(it.node.next.values, it.node.values[midOfChunk:]...)
it.node.values = insertValue(it.node.values[0:midOfChunk], v, it.currentIdx+1)
return
}
it.node.next.values = append(it.node.next.values, it.node.values[midOfChunk:it.currentIdx+1]...)
insertPos := it.currentIdx - midOfChunk + 1
if insertPos < midOfChunk {
it.node.next.values = append(it.node.next.values, v)
it.node.next.values = append(it.node.next.values, it.node.values[it.currentIdx+1:]...)
} else {
it.node.next.values = append(it.node.next.values, v)
}
it.node.values = it.node.values[0:midOfChunk]
it.node = it.node.next
it.currentIdx -= midOfChunk
}
func (l *UnrolledForwardList) PushFront(v interface{}) {
l.length++
if l.head == nil {
l.head = newNode(nil)
l.head.values = append(l.head.values, v)
return
}
if l.head.isFull() {
l.head = newNode(l.head)
l.head.values = append(l.head.values, v)
l.head.values = append(l.head.values, l.head.next.values[0:midOfChunk]...)
copy(l.head.next.values, l.head.next.values[midOfChunk:])
l.head.next.values = l.head.next.values[:midOfChunk]
return
}
l.head.values = append(l.head.values, 0)
copy(l.head.values[1:], l.head.values)
l.head.values[0] = v
return
}
func canMergeNodes(n *node) bool {
return n.next != nil &&
//NOTE: -1 to make node up to 7 elements
(len(n.values)-1+len(n.next.values)) < maxChunkSize
}
func removeValueFromNode(node *node, idx int) {
if !canMergeNodes(node) {
nodeLen := len(node.values)
copy(node.values[idx:], node.values[idx+1:])
node.values = node.values[:nodeLen-1]
return
}
copy(node.values[idx:], node.values[idx+1:])
currentNodeLen := len(node.values)
node.values = node.values[:currentNodeLen-1+len(node.next.values)]
copy(node.values[currentNodeLen-1:], node.next.values)
node.next = node.next.next
}
func (l *UnrolledForwardList) PopFront() interface{} {
if l.head == nil {
panic("pop on empty list")
}
result := l.head.values[0]
removeValueFromNode(l.head, 0)
if len(l.head.values) == 0 {
l.head = l.head.next
}
l.length--
return result
}
func (l *UnrolledForwardList) RemoveAfter(it *Iterator) {
if !it.isLastValue() {
removeValueFromNode(it.node, it.currentIdx+1)
l.length--
return
}
if it.node.next == nil {
panic("attempt to remove after the last item")
}
removeValueFromNode(it.node.next, 0)
if len(it.node.next.values) == 0 {
it.node.next = it.node.next.next
}
l.length--
} | list/unrolled.go | 0.528777 | 0.47171 | unrolled.go | starcoder |
package ui
import (
"encoding/binary"
"math"
"time"
"gioui.org/ui/f32"
"gioui.org/ui/internal/ops"
)
// Config represents the essential configuration for
// updating and drawing a user interface.
type Config interface {
// Now returns the current animation time.
Now() time.Time
// Px converts a Value to pixels.
Px(v Value) int
}
// InvalidateOp requests a redraw at the given time. Use
// the zero value to request an immediate redraw.
type InvalidateOp struct {
At time.Time
}
// TransformOp applies a transform to later ops.
type TransformOp struct {
Transform Transform
}
type Transform struct {
// TODO: general transforms.
offset f32.Point
}
// Inf is the int value that represents an unbounded maximum constraint.
const Inf = int(^uint(0) >> 1)
func (r InvalidateOp) Add(o *Ops) {
data := make([]byte, ops.TypeRedrawLen)
data[0] = byte(ops.TypeInvalidate)
bo := binary.LittleEndian
// UnixNano cannot represent the zero time.
if t := r.At; !t.IsZero() {
nanos := t.UnixNano()
if nanos > 0 {
bo.PutUint64(data[1:], uint64(nanos))
}
}
o.Write(data)
}
func (r *InvalidateOp) Decode(d []byte) {
bo := binary.LittleEndian
if ops.OpType(d[0]) != ops.TypeInvalidate {
panic("invalid op")
}
if nanos := bo.Uint64(d[1:]); nanos > 0 {
r.At = time.Unix(0, int64(nanos))
}
}
func (t Transform) InvTransform(p f32.Point) f32.Point {
return p.Sub(t.offset)
}
func (t Transform) Transform(p f32.Point) f32.Point {
return p.Add(t.offset)
}
func (t Transform) Mul(t2 Transform) Transform {
return Transform{
offset: t.offset.Add(t2.offset),
}
}
func (t TransformOp) Add(o *Ops) {
data := make([]byte, ops.TypeTransformLen)
data[0] = byte(ops.TypeTransform)
bo := binary.LittleEndian
bo.PutUint32(data[1:], math.Float32bits(t.Transform.offset.X))
bo.PutUint32(data[5:], math.Float32bits(t.Transform.offset.Y))
o.Write(data)
}
func (t *TransformOp) Decode(d []byte) {
bo := binary.LittleEndian
if ops.OpType(d[0]) != ops.TypeTransform {
panic("invalid op")
}
*t = TransformOp{
Transform: Offset(f32.Point{
X: math.Float32frombits(bo.Uint32(d[1:])),
Y: math.Float32frombits(bo.Uint32(d[5:])),
}),
}
}
func Offset(o f32.Point) Transform {
return Transform{o}
} | ui/ui.go | 0.586523 | 0.426501 | ui.go | starcoder |
package render
import (
"blockexchange/core"
"sort"
"github.com/fogleman/gg"
)
type Block struct {
X int
Y int
Z int
Color *Color
Order int
}
type PartRenderer struct {
Mapblock *core.ParsedSchemaPart
Colormapping map[string]*Color
NodeIDStringMapping map[int]string
Blocks []*Block
MaxX int
MaxY int
MaxZ int
Size float64
OffsetX float64
OffsetY float64
}
func NewPartRenderer(mapblock *core.ParsedSchemaPart, cm map[string]*Color, size, offset_x, offset_y float64) *PartRenderer {
// reverse index
idm := make(map[int]string)
for k, v := range mapblock.Meta.NodeMapping {
idm[v] = k
}
return &PartRenderer{
Mapblock: mapblock,
Blocks: make([]*Block, 0),
Colormapping: cm,
NodeIDStringMapping: idm,
MaxX: mapblock.Meta.Size.X - 1,
MaxY: mapblock.Meta.Size.Y - 1,
MaxZ: mapblock.Meta.Size.Z - 1,
Size: size,
OffsetX: offset_x,
OffsetY: offset_y,
}
}
func (r *PartRenderer) GetImagePos(x, y, z float64) (float64, float64) {
xpos := r.OffsetX + (r.Size * x) - (r.Size * z)
ypos := r.OffsetY - (r.Size * tan30 * x) - (r.Size * tan30 * z) - (r.Size * sqrt3div2 * y)
return xpos, ypos
}
func (r *PartRenderer) GetColorAtPos(x, y, z int) *Color {
if x > r.MaxX || y > r.MaxY || z > r.MaxZ || x < 0 || y < 0 || z < 0 {
return nil
}
index := r.Mapblock.GetIndex(x, y, z)
nodeid := int(r.Mapblock.NodeIDS[index])
nodename := r.NodeIDStringMapping[nodeid]
color := r.Colormapping[nodename]
return color
}
func (r *PartRenderer) ProbePosition(x, y, z int) {
color := r.GetColorAtPos(x, y, z)
if color != nil {
block := Block{
X: x,
Y: y,
Z: z,
Color: color,
Order: y + ((r.MaxX - x) * r.MaxX) + ((r.MaxZ - z) + r.MaxZ),
}
r.Blocks = append(r.Blocks, &block)
return
}
next_x := x + 1
next_y := y - 1
next_z := z + 1
if next_x > r.MaxX || next_z > r.MaxZ || next_y < 0 {
// mapblock ends
return
}
r.ProbePosition(next_x, next_y, next_z)
}
func (r *PartRenderer) DrawBlock(dc *gg.Context, block *Block) {
x, y := r.GetImagePos(float64(block.X), float64(block.Y), float64(block.Z))
radius := r.Size
// right side
dc.MoveTo(radius+x, (radius*tan30)+y)
dc.LineTo(x, (radius*sqrt3div2)+y)
dc.LineTo(x, y)
dc.LineTo(radius+x, -(radius*tan30)+y)
dc.ClosePath()
dc.SetRGB255(block.Color.Red, block.Color.Green, block.Color.Blue)
dc.Fill()
// left side
dc.MoveTo(x, (radius*sqrt3div2)+y)
dc.LineTo(-radius+x, (radius*tan30)+y)
dc.LineTo(-radius+x, -(radius*tan30)+y)
dc.LineTo(x, y)
dc.ClosePath()
AdjustAndFill(dc, block.Color.Red, block.Color.Green, block.Color.Blue, -20)
dc.Fill()
// top side
dc.MoveTo(-radius+x, -(radius*tan30)+y)
dc.LineTo(x, -(radius*sqrt3div2)+y)
dc.LineTo(radius+x, -(radius*tan30)+y)
dc.LineTo(x, y)
dc.ClosePath()
AdjustAndFill(dc, block.Color.Red, block.Color.Green, block.Color.Blue, 20)
dc.Fill()
}
func (r *PartRenderer) RenderSchemaPart(dc *gg.Context) error {
for y := 0; y < r.MaxY; y++ {
// right side
for x := r.MaxX; x >= 1; x-- {
r.ProbePosition(x, y, 0)
}
// left side
for z := r.MaxZ; z >= 0; z-- {
r.ProbePosition(0, y, z)
}
}
// top side
for z := r.MaxZ; z >= 0; z-- {
for x := r.MaxX; x >= 0; x-- {
r.ProbePosition(x, r.MaxY, z)
}
}
sort.Slice(r.Blocks, func(i int, j int) bool {
return r.Blocks[i].Order < r.Blocks[j].Order
})
for _, block := range r.Blocks {
r.DrawBlock(dc, block)
}
return nil
} | render/renderer_part.go | 0.646125 | 0.408395 | renderer_part.go | starcoder |
package backoff
import (
"math"
"time"
)
// Algorithm defines a function that calculates a time.Duration based on
// the given retry attempt number.
type Algorithm func(attempt uint) time.Duration
// Incremental creates a Algorithm that increments the initial duration
// by the given increment for each attempt.
func Incremental(initial, increment time.Duration) Algorithm {
return func(attempt uint) time.Duration {
return initial + (increment * time.Duration(attempt))
}
}
// Linear creates a Algorithm that linearly multiplies the factor
// duration by the attempt number for each attempt.
func Linear(factor time.Duration) Algorithm {
return func(attempt uint) time.Duration {
return factor * time.Duration(attempt)
}
}
// Exponential creates a Algorithm that multiplies the factor duration by
// an exponentially increasing factor for each attempt, where the factor is
// calculated as the given base raised to the attempt number.
func Exponential(factor time.Duration, base float64) Algorithm {
return func(attempt uint) time.Duration {
return factor * time.Duration(math.Pow(base, float64(attempt)))
}
}
// BinaryExponential creates a Algorithm that multiplies the factor
// duration by an exponentially increasing factor for each attempt, where the
// factor is calculated as `2` raised to the attempt number (2^attempt).
func BinaryExponential(factor time.Duration) Algorithm {
return Exponential(factor, 2)
}
// Fibonacci creates a Algorithm that multiplies the factor duration by
// an increasing factor for each attempt, where the factor is the Nth number in
// the Fibonacci sequence.
func Fibonacci(factor time.Duration) Algorithm {
return func(attempt uint) time.Duration {
return factor * time.Duration(fibonacciNumber(attempt))
}
}
// fibonacciNumber calculates the Fibonacci sequence number for the given
// sequence position.
func fibonacciNumber(n uint) uint {
if 0 == n {
return 0
} else if 1 == n {
return 1
} else {
return fibonacciNumber(n-1) + fibonacciNumber(n-2)
}
} | backoff/backoff.go | 0.844665 | 0.642951 | backoff.go | starcoder |
package osc
import (
"encoding/json"
)
// HealthCheck Information about the health check configuration.
type HealthCheck struct {
// The number of seconds between two pings (between `5` and `600` both included).
CheckInterval int32 `json:"CheckInterval"`
// The number of consecutive successful pings before considering the VM as healthy (between `2` and `10` both included).
HealthyThreshold int32 `json:"HealthyThreshold"`
// If you use the HTTP or HTTPS protocols, the ping path.
Path *string `json:"Path,omitempty"`
// The port number (between `1` and `65535`, both included).
Port int32 `json:"Port"`
// The protocol for the URL of the VM (`HTTP` \\| `HTTPS` \\| `TCP` \\| `SSL`).
Protocol string `json:"Protocol"`
// The maximum waiting time for a response before considering the VM as unhealthy, in seconds (between `2` and `60` both included).
Timeout int32 `json:"Timeout"`
// The number of consecutive failed pings before considering the VM as unhealthy (between `2` and `10` both included).
UnhealthyThreshold int32 `json:"UnhealthyThreshold"`
}
// NewHealthCheck instantiates a new HealthCheck object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewHealthCheck(checkInterval int32, healthyThreshold int32, port int32, protocol string, timeout int32, unhealthyThreshold int32) *HealthCheck {
this := HealthCheck{}
this.CheckInterval = checkInterval
this.HealthyThreshold = healthyThreshold
this.Port = port
this.Protocol = protocol
this.Timeout = timeout
this.UnhealthyThreshold = unhealthyThreshold
return &this
}
// NewHealthCheckWithDefaults instantiates a new HealthCheck object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewHealthCheckWithDefaults() *HealthCheck {
this := HealthCheck{}
return &this
}
// GetCheckInterval returns the CheckInterval field value
func (o *HealthCheck) GetCheckInterval() int32 {
if o == nil {
var ret int32
return ret
}
return o.CheckInterval
}
// GetCheckIntervalOk returns a tuple with the CheckInterval field value
// and a boolean to check if the value has been set.
func (o *HealthCheck) GetCheckIntervalOk() (*int32, bool) {
if o == nil {
return nil, false
}
return &o.CheckInterval, true
}
// SetCheckInterval sets field value
func (o *HealthCheck) SetCheckInterval(v int32) {
o.CheckInterval = v
}
// GetHealthyThreshold returns the HealthyThreshold field value
func (o *HealthCheck) GetHealthyThreshold() int32 {
if o == nil {
var ret int32
return ret
}
return o.HealthyThreshold
}
// GetHealthyThresholdOk returns a tuple with the HealthyThreshold field value
// and a boolean to check if the value has been set.
func (o *HealthCheck) GetHealthyThresholdOk() (*int32, bool) {
if o == nil {
return nil, false
}
return &o.HealthyThreshold, true
}
// SetHealthyThreshold sets field value
func (o *HealthCheck) SetHealthyThreshold(v int32) {
o.HealthyThreshold = v
}
// GetPath returns the Path field value if set, zero value otherwise.
func (o *HealthCheck) GetPath() string {
if o == nil || o.Path == nil {
var ret string
return ret
}
return *o.Path
}
// GetPathOk returns a tuple with the Path field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *HealthCheck) GetPathOk() (*string, bool) {
if o == nil || o.Path == nil {
return nil, false
}
return o.Path, true
}
// HasPath returns a boolean if a field has been set.
func (o *HealthCheck) HasPath() bool {
if o != nil && o.Path != nil {
return true
}
return false
}
// SetPath gets a reference to the given string and assigns it to the Path field.
func (o *HealthCheck) SetPath(v string) {
o.Path = &v
}
// GetPort returns the Port field value
func (o *HealthCheck) GetPort() int32 {
if o == nil {
var ret int32
return ret
}
return o.Port
}
// GetPortOk returns a tuple with the Port field value
// and a boolean to check if the value has been set.
func (o *HealthCheck) GetPortOk() (*int32, bool) {
if o == nil {
return nil, false
}
return &o.Port, true
}
// SetPort sets field value
func (o *HealthCheck) SetPort(v int32) {
o.Port = v
}
// GetProtocol returns the Protocol field value
func (o *HealthCheck) GetProtocol() string {
if o == nil {
var ret string
return ret
}
return o.Protocol
}
// GetProtocolOk returns a tuple with the Protocol field value
// and a boolean to check if the value has been set.
func (o *HealthCheck) GetProtocolOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Protocol, true
}
// SetProtocol sets field value
func (o *HealthCheck) SetProtocol(v string) {
o.Protocol = v
}
// GetTimeout returns the Timeout field value
func (o *HealthCheck) GetTimeout() int32 {
if o == nil {
var ret int32
return ret
}
return o.Timeout
}
// GetTimeoutOk returns a tuple with the Timeout field value
// and a boolean to check if the value has been set.
func (o *HealthCheck) GetTimeoutOk() (*int32, bool) {
if o == nil {
return nil, false
}
return &o.Timeout, true
}
// SetTimeout sets field value
func (o *HealthCheck) SetTimeout(v int32) {
o.Timeout = v
}
// GetUnhealthyThreshold returns the UnhealthyThreshold field value
func (o *HealthCheck) GetUnhealthyThreshold() int32 {
if o == nil {
var ret int32
return ret
}
return o.UnhealthyThreshold
}
// GetUnhealthyThresholdOk returns a tuple with the UnhealthyThreshold field value
// and a boolean to check if the value has been set.
func (o *HealthCheck) GetUnhealthyThresholdOk() (*int32, bool) {
if o == nil {
return nil, false
}
return &o.UnhealthyThreshold, true
}
// SetUnhealthyThreshold sets field value
func (o *HealthCheck) SetUnhealthyThreshold(v int32) {
o.UnhealthyThreshold = v
}
func (o HealthCheck) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["CheckInterval"] = o.CheckInterval
}
if true {
toSerialize["HealthyThreshold"] = o.HealthyThreshold
}
if o.Path != nil {
toSerialize["Path"] = o.Path
}
if true {
toSerialize["Port"] = o.Port
}
if true {
toSerialize["Protocol"] = o.Protocol
}
if true {
toSerialize["Timeout"] = o.Timeout
}
if true {
toSerialize["UnhealthyThreshold"] = o.UnhealthyThreshold
}
return json.Marshal(toSerialize)
}
type NullableHealthCheck struct {
value *HealthCheck
isSet bool
}
func (v NullableHealthCheck) Get() *HealthCheck {
return v.value
}
func (v *NullableHealthCheck) Set(val *HealthCheck) {
v.value = val
v.isSet = true
}
func (v NullableHealthCheck) IsSet() bool {
return v.isSet
}
func (v *NullableHealthCheck) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableHealthCheck(val *HealthCheck) *NullableHealthCheck {
return &NullableHealthCheck{value: val, isSet: true}
}
func (v NullableHealthCheck) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableHealthCheck) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | v2/model_health_check.go | 0.780746 | 0.499329 | model_health_check.go | starcoder |
package plan
import (
"github.com/liquidata-inc/go-mysql-server/sql"
"github.com/liquidata-inc/go-mysql-server/sql/expression"
)
// TransformUp applies a transformation function to the given tree from the
// bottom up.
func TransformUp(node sql.Node, f sql.TransformNodeFunc) (sql.Node, error) {
if o, ok := node.(sql.OpaqueNode); ok && o.Opaque() {
return f(node)
}
children := node.Children()
if len(children) == 0 {
return f(node)
}
newChildren := make([]sql.Node, len(children))
for i, c := range children {
c, err := TransformUp(c, f)
if err != nil {
return nil, err
}
newChildren[i] = c
}
node, err := node.WithChildren(newChildren...)
if err != nil {
return nil, err
}
return f(node)
}
// TransformExpressionsUp applies a transformation function to all expressions
// on the given tree from the bottom up.
func TransformExpressionsUpWithNode(node sql.Node, f expression.TransformExprWithNodeFunc) (sql.Node, error) {
if o, ok := node.(sql.OpaqueNode); ok && o.Opaque() {
return TransformExpressionsWithNode(node, f)
}
children := node.Children()
if len(children) == 0 {
return TransformExpressionsWithNode(node, f)
}
newChildren := make([]sql.Node, len(children))
for i, c := range children {
c, err := TransformExpressionsUpWithNode(c, f)
if err != nil {
return nil, err
}
newChildren[i] = c
}
node, err := node.WithChildren(newChildren...)
if err != nil {
return nil, err
}
return TransformExpressionsWithNode(node, f)
}
// TransformExpressionsUp applies a transformation function to all expressions
// on the given tree from the bottom up.
func TransformExpressionsUp(node sql.Node, f sql.TransformExprFunc) (sql.Node, error) {
if o, ok := node.(sql.OpaqueNode); ok && o.Opaque() {
return TransformExpressions(node, f)
}
children := node.Children()
if len(children) == 0 {
return TransformExpressions(node, f)
}
newChildren := make([]sql.Node, len(children))
for i, c := range children {
c, err := TransformExpressionsUp(c, f)
if err != nil {
return nil, err
}
newChildren[i] = c
}
node, err := node.WithChildren(newChildren...)
if err != nil {
return nil, err
}
return TransformExpressions(node, f)
}
// TransformExpressions applies a transformation function to all expressions
// on the given node.
func TransformExpressions(node sql.Node, f sql.TransformExprFunc) (sql.Node, error) {
e, ok := node.(sql.Expressioner)
if !ok {
return node, nil
}
exprs := e.Expressions()
if len(exprs) == 0 {
return node, nil
}
newExprs := make([]sql.Expression, len(exprs))
for i, e := range exprs {
e, err := expression.TransformUp(e, f)
if err != nil {
return nil, err
}
newExprs[i] = e
}
return e.WithExpressions(newExprs...)
}
// TransformExpressions applies a transformation function to all expressions
// on the given node.
func TransformExpressionsWithNode(n sql.Node, f expression.TransformExprWithNodeFunc) (sql.Node, error) {
e, ok := n.(sql.Expressioner)
if !ok {
return n, nil
}
exprs := e.Expressions()
if len(exprs) == 0 {
return n, nil
}
newExprs := make([]sql.Expression, len(exprs))
for i, e := range exprs {
e, err := expression.TransformUpWithNode(n, e, f)
if err != nil {
return nil, err
}
newExprs[i] = e
}
return e.WithExpressions(newExprs...)
} | sql/plan/transform.go | 0.628635 | 0.490785 | transform.go | starcoder |
package matchers
import (
"fmt"
"reflect"
"regexp"
"strings"
"github.com/onsi/gomega/format"
"github.com/onsi/gomega/types"
)
func ContainLines(expected ...interface{}) types.GomegaMatcher {
return &containLinesMatcher{
expected: expected,
}
}
type containLinesMatcher struct {
expected []interface{}
}
func (matcher *containLinesMatcher) Match(actual interface{}) (success bool, err error) {
_, ok := actual.(string)
if !ok {
_, ok := actual.(fmt.Stringer)
if !ok {
return false, fmt.Errorf("ContainLinesMatcher requires a string or fmt.Stringer. Got actual: %s", format.Object(actual, 1))
}
}
actualLines := matcher.lines(actual)
for currentActualLineIndex := 0; currentActualLineIndex < len(actualLines); currentActualLineIndex++ {
currentActualLine := actualLines[currentActualLineIndex]
currentExpectedLine := matcher.expected[currentActualLineIndex]
match, err := matcher.compare(currentActualLine, currentExpectedLine)
if err != nil {
return false, err
}
if match {
if currentActualLineIndex+1 == len(matcher.expected) {
return true, nil
}
} else {
if len(actualLines) > 1 {
actualLines = actualLines[1:]
currentActualLineIndex = -1
}
}
}
return false, nil
}
func (matcher *containLinesMatcher) compare(actual string, expected interface{}) (bool, error) {
if m, ok := expected.(types.GomegaMatcher); ok {
match, err := m.Match(actual)
if err != nil {
return false, err
}
return match, nil
}
return reflect.DeepEqual(actual, expected), nil
}
func (matcher *containLinesMatcher) lines(actual interface{}) []string {
raw, ok := actual.(string)
if !ok {
raw = actual.(fmt.Stringer).String()
}
re := regexp.MustCompile(`^\[[a-z]+\]\s`)
var lines []string
for _, line := range strings.Split(raw, "\n") {
lines = append(lines, re.ReplaceAllString(line, ""))
}
return lines
}
func (matcher *containLinesMatcher) FailureMessage(actual interface{}) (message string) {
actualLines := "\n" + strings.Join(matcher.lines(actual), "\n")
missing := matcher.linesMatching(actual, false)
if len(missing) > 0 {
return fmt.Sprintf("Expected\n%s\nto contain lines\n%s\nbut missing\n%s", format.Object(actualLines, 1), format.Object(matcher.expected, 1), format.Object(missing, 1))
}
return fmt.Sprintf("Expected\n%s\nto contain lines\n%s\nall lines appear, but may be misordered", format.Object(actualLines, 1), format.Object(matcher.expected, 1))
}
func (matcher *containLinesMatcher) NegatedFailureMessage(actual interface{}) (message string) {
actualLines := "\n" + strings.Join(matcher.lines(actual), "\n")
missing := matcher.linesMatching(actual, true)
return fmt.Sprintf("Expected\n%s\nnot to contain lines\n%s\nbut includes\n%s", format.Object(actualLines, 1), format.Object(matcher.expected, 1), format.Object(missing, 1))
}
func (matcher *containLinesMatcher) linesMatching(actual interface{}, matching bool) []interface{} {
var set []interface{}
for _, expected := range matcher.expected {
var match bool
for _, line := range matcher.lines(actual) {
if ok, _ := matcher.compare(line, expected); ok {
match = true
}
}
if match == matching {
set = append(set, expected)
}
}
return set
} | vendor/github.com/cloudfoundry/switchblade/matchers/contain_lines.go | 0.705988 | 0.407304 | contain_lines.go | starcoder |
package main
import (
"math"
"math/rand"
)
type (
Individual struct {
genome Genome
location Coord
birthPlace Coord
age uint16
wasBlocked bool // will be true if this individual was not able to do an action last step because it was blocked
brain *NeuralNet
}
// Actions encodes the actions taken by an individual. The offset corresponds to the Action value,
// and the float value at the index says how much an action is taken
Actions = []float64
)
func createIndividual(world *World) *Individual {
genome := makeRandomGenome(rand.Intn(20) + 2)
brain, err := genome.buildNet()
if err == TooSimple {
return createIndividual(world)
}
if err != nil {
panic(err)
}
place := world.randomCoord()
peep := &Individual{
genome: genome,
location: place,
birthPlace: place,
age: 0,
brain: brain,
}
return peep
}
func (i *Individual) step(world *World) Actions {
// First we build the sensor inputs that the brains uses into a slice
inputs := make([]float64, 0, len(i.brain.Sensors))
for _, sensor := range i.brain.Sensors {
value := getSensorValue(i, world, sensor)
inputs = append(inputs, value)
}
actions := make(Actions, NUM_ACTIONS)
var neuronFirings []*Neuron
// this is the function that will be called whenever there is a signal.
// The recipient of the signal can be a neuron, or it can be an action sink
handleFiring := func(to Sink, v float64) {
switch dst := to.(type) {
case ActionSink:
actions[dst.action] += v
case *Neuron:
dst.value += v
for dst.value > 1 {
// a neuron will keep firing until it gets it's internal state under 1
neuronFirings = append(neuronFirings, dst)
dst.value -= 1
}
}
}
// Next step is to fire the connections to the sensor inputs
for _, conn := range i.brain.Connections {
sensor, ok := conn.From.(SensorInput)
if !ok {
continue
}
srcValue := inputs[sensor.idx]
handleFiring(conn.To, conn.multiplier*srcValue)
}
// If neurons received signals in the last step, we could now have new signals that we need to handle
// Since the neural net is not an acyclic graph, we limit the number of signals we allow per step and individual
// We could deal with this in other ways, this method was chosen mostly because it is simple
iterLeft := 10
for len(neuronFirings) > 0 && iterLeft > 0 {
iterLeft--
current := neuronFirings[0]
neuronFirings = neuronFirings[1:]
for _, conn := range i.brain.Connections {
if conn.From != current {
continue
}
handleFiring(conn.To, conn.multiplier*1)
}
}
i.age++
for idx, action := range actions {
actions[idx] = math.Tanh(action)
}
return actions
}
func plusMinusOne() int {
if rand.Intn(2) == 0 {
return -1
}
return 1
}
func (i *Individual) clone() *Individual {
clone := *i
clone.age = 0
var mutant bool
ready := false
for !ready {
clone.genome, mutant = clone.genome.clone()
if mutant {
net, err := clone.genome.buildNet()
if err != nil {
if err != TooSimple {
panic(err)
}
} else {
ready = true
}
clone.brain = net
}
}
return &clone
}
func getSensorValue(i *Individual, w *World, s Sensor) float64 {
switch s {
case LOC_X:
// map current X location to value between 0.0..1.0
return float64(i.location.X) / float64(w.XSize)
case LOC_Y:
// map current Y location to value between 0.0..1.0
return float64(i.location.Y) / float64(w.YSize)
case BOUNDARY_DIST:
// Finds the closest boundary, compares that to the max possible dist
// to a boundary from the center, and converts that linearly to the
// sensor range 0.0..1.0
x := getSensorValue(i, w, BOUNDARY_DIST_X)
y := getSensorValue(i, w, BOUNDARY_DIST_Y)
return math.Min(x, y)
case BOUNDARY_DIST_X:
maxDist := float64(w.XSize / 2)
return float64(min(i.location.X, w.XSize-i.location.X-1)) / maxDist
case BOUNDARY_DIST_Y:
maxDist := float64(w.YSize / 2)
return float64(min(i.location.Y, w.YSize-i.location.Y-1)) / maxDist
case AGE:
// sets the age to a normalized value between 0 and 1
return float64(i.age) / float64(w.StepsPerGeneration)
case BLOCK:
if i.wasBlocked {
return 1
}
return 0
}
panic("oh noes")
}
func min(a, b int) int {
if a < b {
return a
}
return b
} | src/individual.go | 0.753013 | 0.439687 | individual.go | starcoder |
package validator
// MessageMap is a map of string, that can be used as error message for ValidateStruct function.
var MessageMap = map[string]string{
"accepted": "The :attribute must be accepted.",
"activeUrl": "The :attribute is not a valid URL.",
"after": "The :attribute must be a date after :date.",
"afterOrEqual": "The :attribute must be a date after or equal to :date.",
"alpha": "The :attribute may only contain letters.",
"alphaDash": "The :attribute may only contain letters, numbers, dashes and underscores.",
"alphaNum": "The :attribute may only contain letters and numbers.",
"array": "The :attribute must be an array.",
"before": "The :attribute must be a date before :date.",
"beforeOrEqual": "The :attribute must be a date before or equal to :date.",
"between.numeric": "The :attribute must be between :min and :max.",
"between.file": "The :attribute must be between :min and :max kilobytes.",
"between.string": "The :attribute must be between :min and :max characters.",
"between.array": "The :attribute must have between :min and :max items.",
"boolean": "The :attribute field must be true or false.",
"confirmed": "The :attribute confirmation does not match.",
"date": "The :attribute is not a valid date.",
"dateFormat": "The :attribute does not match the format :format.",
"different": "The :attribute and :other must be different.",
"digits": "The :attribute must be :digits digits.",
"digitsBetween": "The :attribute must be between :min and :max digits.",
"dimensions": "The :attribute has invalid image dimensions.",
"distinct": "The :attribute field has a duplicate value.",
"email": "The :attribute must be a valid email address.",
"exists": "The selected :attribute is invalid.",
"file": "The :attribute must be a file.",
"filled": "The :attribute field must have a value.",
"gt.numeric": "The :attribute must be greater than :value.",
"gt.file": "The :attribute must be greater than :value kilobytes.",
"gt.string": "The :attribute must be greater than :value characters.",
"gt.array": "The :attribute must have greater than :value items.",
"gte.numeric": "The :attribute must be greater than or equal :value.",
"gte.file": "The :attribute must be greater than or equal :value kilobytes.",
"gte.string": "The :attribute must be greater than or equal :value characters.",
"gte.array": "The :attribute must have :value items or more.",
"image": "The :attribute must be an image.",
"in": "The selected :attribute is invalid.",
"inArray": "The :attribute field does not exist in :other.",
"integer": "The :attribute must be an integer.",
"ip": "The :attribute must be a valid IP address.",
"ipv4": "The :attribute must be a valid IPv4 address.",
"ipv6": "The :attribute must be a valid IPv6 address.",
"json": "The :attribute must be a valid JSON string.",
"lt.numeric": "The :attribute must be less than :value.",
"lt.file": "The :attribute must be less than :value kilobytes.",
"lt.string": "The :attribute must be less than :value characters.",
"lt.array": "The :attribute must have less than :value items.",
"lte.numeric": "The :attribute must be less than or equal :value.",
"lte.file": "The :attribute must be less than or equal :value kilobytes.",
"lte.string": "The :attribute must be less than or equal :value characters.",
"lte.array": "The :attribute must not have more than :value items.",
"max.numeric": "The :attribute may not be greater than :max.",
"max.file": "The :attribute may not be greater than :max kilobytes.",
"max.string": "The :attribute may not be greater than :max characters.",
"max.array": "The :attribute may not have more than :max items.",
"mimes": "The :attribute must be a file of type: :values.",
"mimetypes": "The :attribute must be a file of type: :values.",
"min.numeric": "The :attribute must be at least :min.",
"min.file": "The :attribute must be at least :min kilobytes.",
"min.string": "The :attribute must be at least :min characters.",
"min.array": "The :attribute must have at least :min items.",
"notIn": "The selected :attribute is invalid.",
"notRegex": "The :attribute format is invalid.",
"numeric": "The :attribute must be a number.",
"present": "The :attribute field must be present.",
"regex": "The :attribute format is invalid.",
"required": "The :attribute field is required.",
"requiredIf": "The :attribute field is required when :other is :value.",
"requiredUnless": "The :attribute field is required unless :other is in :values.",
"requiredWith": "The :attribute field is required when :values is present.",
"requiredWithAll": "The :attribute field is required when :values is present.",
"requiredWithout": "The :attribute field is required when :values is not present.",
"requiredWithoutAll": "The :attribute field is required when none of :values are present.",
"same": "The :attribute and :other must match.",
"size.numeric": "The :attribute must be :size.",
"size.file": "The :attribute must be :size kilobytes.",
"size.string": "The :attribute must be :size characters.",
"size.array": "The :attribute must contain :size items.",
"string": "The :attribute must be a string.",
"timezone": "The :attribute must be a valid zone.",
"unique": "The :attribute has already been taken.",
"uploaded": "The :attribute failed to upload.",
"url": "The :attribute format is invalid.",
} | message.go | 0.641535 | 0.667866 | message.go | starcoder |
package mbr
import (
"bytes"
"encoding/binary"
"io"
"strconv"
"github.com/masahiro331/go-vmdk-parser/pkg/disk/types"
"golang.org/x/xerrors"
)
const (
SIGNATURE = 0xAA55
Sector = 512
)
/*
# Master Boot Record Spec
https://uefi.org/sites/default/files/resources/UEFI%20Spec%202.8B%20May%202020.pdf
p. 112
Master Boot Record always 512 bytes.
+-------------------------------+
| Name | Byte |
+------------------------+------+
| Bootstrap Code Area | 440 |
| UniqueMBRDiskSignature | 4 |
| Unknown | 2 |
| Partion 1 | 16 |
| Partion 2 | 16 |
| Partion 3 | 16 |
| Partion 4 | 16 |
| Boot Recore Sigunature | 2 |
+-------------------------------+
# Partion Spec
+-------------------+------+----------------------------------------------------------+
| Name | Byte | Description |
+-------------------+------+----------------------------------------------------------+
| Boot Indicator | 1 | Boot Partion |
| Staring CHS value | 3 | Starting sector of the partition in Cylinder Head Sector |
| Partition type | 1 | FileSystem used by the partition |
| Ending CHS values | 3 | Ending sector of the partition in Cylinder Head Sector |
| Starting Sector | 4 | Starting sector of the active partition |
| Partition Size | 4 | Represents partition size in sectors |
+-------------------+------+----------------------------------------------------------+
ref: https://www.ijais.org/research/volume10/number8/sadi-2016-ijais-451541.pdf
*/
var InvalidSignature = xerrors.New("Invalid master boot record signature")
type MasterBootRecord struct {
BootCodeArea [440]byte
UniqueMBRDiskSignature [4]byte
Unknown [2]byte
Partitions [4]Partition
Signature uint16
}
type Partition struct {
Boot bool
StartCHS [3]byte
Type byte
EndCHS [3]byte
StartSector uint32
Size uint32
index int
}
func (m *MasterBootRecord) GetPartitions() []types.Partition {
var ps []types.Partition
for _, p := range m.Partitions {
var i types.Partition = p
ps = append(ps, i)
}
return ps
}
func (p Partition) Index() int {
return p.index
}
func (p Partition) Name() string {
// TODO: add extension with type
return strconv.Itoa(int(p.index))
}
func (p Partition) GetType() []byte {
return []byte{p.Type}
}
func (p Partition) GetStartSector() uint64 {
return uint64(p.StartSector)
}
func (p Partition) Bootable() bool {
return p.Boot
}
func (p Partition) GetSize() uint64 {
return uint64(p.Size)
}
func NewMasterBootRecord(reader io.Reader) (*MasterBootRecord, error) {
buf := make([]byte, Sector)
size, err := reader.Read(buf)
if err != nil {
return nil, xerrors.Errorf("failed to read mbr error: %w", err)
}
if size != Sector {
return nil, xerrors.New("binary size error")
}
r := bytes.NewReader(buf)
var mbr MasterBootRecord
if err := binary.Read(r, binary.LittleEndian, &mbr.UniqueMBRDiskSignature); err != nil {
return nil, xerrors.Errorf("failed to parse unique MBR disk signature: %w", err)
}
if err := binary.Read(r, binary.LittleEndian, &mbr.Unknown); err != nil {
return nil, xerrors.Errorf("failed to parse unknown: %w", err)
}
if err := binary.Read(r, binary.LittleEndian, &mbr.BootCodeArea); err != nil {
return nil, xerrors.Errorf("failed to parse boot code: %w", err)
}
for i := 0; i < len(mbr.Partitions); i++ {
if err := binary.Read(r, binary.LittleEndian, &mbr.Partitions[i].Boot); err != nil {
return nil, xerrors.Errorf("failed to parse partition[%d] Boot: %w", i, err)
}
if err := binary.Read(r, binary.LittleEndian, &mbr.Partitions[i].StartCHS); err != nil {
return nil, xerrors.Errorf("failed to parse partition[%d] StartCHS: %w", i, err)
}
if err := binary.Read(r, binary.LittleEndian, &mbr.Partitions[i].Type); err != nil {
return nil, xerrors.Errorf("failed to parse partition[%d] Type: %w", i, err)
}
if err := binary.Read(r, binary.LittleEndian, &mbr.Partitions[i].EndCHS); err != nil {
return nil, xerrors.Errorf("failed to parse partition[%d] EndCHS: %w", i, err)
}
if err := binary.Read(r, binary.LittleEndian, &mbr.Partitions[i].StartSector); err != nil {
return nil, xerrors.Errorf("failed to parse partition[%d] StartSector: %w", i, err)
}
if err := binary.Read(r, binary.LittleEndian, &mbr.Partitions[i].Size); err != nil {
return nil, xerrors.Errorf("failed to parse partition[%d] Size: %w", i, err)
}
mbr.Partitions[i].index = i
}
if err := binary.Read(r, binary.LittleEndian, &mbr.Signature); err != nil {
return nil, xerrors.Errorf("failed to parse signature: %w", err)
}
if mbr.Signature != SIGNATURE {
return nil, InvalidSignature
}
return &mbr, nil
}
func (p Partition) IsSupported() bool {
return true
} | pkg/disk/mbr/mbr.go | 0.621656 | 0.435421 | mbr.go | starcoder |
package dep
import (
"fmt"
"io/ioutil"
"github.com/chewxy/lingo"
"github.com/chewxy/lingo/treebank"
)
// Performance is a tuple that holds performance information from a training session
type Performance struct {
Iter int // which training iteration is this?
UAS float64 // Unlabelled Attachment Score
LAS float64 // Labeled Attachment Score
UEM float64 // Unlabelled Exact Match
Root float64 // Correct Roots Ratio
}
func (p Performance) String() string {
s := `EPO: %d
UAS: %.5f
LAS: %.5f
UEM: %.5f
ROO: %.5f`
return fmt.Sprintf(s, p.Iter, p.UAS, p.LAS, p.UEM, p.Root)
}
// performance evaluation related code goes here
// Evaluate compares predicted trees with the gold standard trees and returns a Performance. It panics if the number of predicted trees and the number of gold trees aren't the same
func Evaluate(predictedTrees, goldTrees []*lingo.Dependency) Performance {
if len(predictedTrees) != len(goldTrees) {
panic(fmt.Sprintf("%d predicted trees; %d gold trees. Unable to compare", len(predictedTrees), len(goldTrees)))
}
var correctLabels, correctHeads, correctTrees, correctRoot, sumArcs float64
var check int
for i, tr := range predictedTrees {
gTr := goldTrees[i]
if len(tr.AnnotatedSentence) != len(gTr.AnnotatedSentence) {
sumArcs += float64(gTr.N())
// log.Printf("WARNING: %q and %q do not have the same length", tr, gTr)
continue
}
var nCorrectHead int
for j, a := range tr.AnnotatedSentence[1:] {
b := gTr.AnnotatedSentence[j+1]
if a.HeadID() == b.HeadID() {
correctHeads++
nCorrectHead++
}
if a.DependencyType == b.DependencyType {
correctLabels++
}
sumArcs++
}
if nCorrectHead == gTr.N() {
correctTrees++
}
if tr.Root() == gTr.Root() {
correctRoot++
}
// check 5 per iteration
if check < 5 {
logf("predictedHeads: \n%v\n%v\n", tr.Heads(), gTr.Heads())
logf("Ns: %v | %v || Correct: %v", tr.N(), gTr.N(), nCorrectHead)
check++
}
}
uas := correctHeads / sumArcs
las := correctLabels / sumArcs
uem := correctTrees / float64(len(predictedTrees))
roo := correctRoot / float64(len(predictedTrees))
return Performance{UAS: uas, LAS: las, UEM: uem, Root: roo}
}
func (t *Trainer) crossValidate(st []treebank.SentenceTag) Performance {
preds := t.predMany(st)
golds := make([]*lingo.Dependency, len(st))
for i, s := range st {
golds[i] = s.Dependency(t)
}
return Evaluate(preds, golds)
}
func (t *Trainer) predMany(sentenceTags []treebank.SentenceTag) []*lingo.Dependency {
retVal := make([]*lingo.Dependency, len(sentenceTags))
for i, st := range sentenceTags {
dep, err := t.pred(st.AnnotatedSentence(t))
if err != nil {
ioutil.WriteFile("fullGraph.dot", []byte(t.nn.g.ToDot()), 0644)
panic(fmt.Sprintf("%+v", err))
}
retVal[i] = dep
}
return retVal
}
func (t *Trainer) pred(as lingo.AnnotatedSentence) (*lingo.Dependency, error) {
d := new(Parser)
d.Model = t.Model
return d.predict(as)
} | dep/evaluation.go | 0.682891 | 0.455622 | evaluation.go | starcoder |
package bincode
import (
"encoding/binary"
"math"
"reflect"
)
type Decoder interface {
Decode(bz []byte, data interface{})
}
type decoder struct {
order binary.ByteOrder
buf []byte
offset int // next read offset in data
}
func NewDecoder() Decoder {
return &decoder{}
}
func (d *decoder) Decode(bz []byte, data interface{}) {
v := reflect.ValueOf(data)
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
d.init(bz)
d.value(v)
}
func (d *decoder) init(data []byte) *decoder {
d.order = binary.LittleEndian
d.buf = data
d.offset = 0
return d
}
func (d *decoder) value(v reflect.Value) {
switch v.Kind() {
case reflect.Bool:
v.SetBool(d.bool())
case reflect.Int8:
v.SetInt(int64(d.int8()))
case reflect.Int16:
v.SetInt(int64(d.int16()))
case reflect.Int32:
v.SetInt(int64(d.int32()))
case reflect.Int64:
v.SetInt(d.int64())
case reflect.Uint8:
v.SetUint(uint64(d.uint8()))
case reflect.Uint16:
v.SetUint(uint64(d.uint16()))
case reflect.Uint32:
v.SetUint(uint64(d.uint32()))
case reflect.Uint64:
v.SetUint(d.uint64())
case reflect.Float32:
v.SetFloat(float64(math.Float32frombits(d.uint32())))
case reflect.Float64:
v.SetFloat(math.Float64frombits(d.uint64()))
case reflect.Array:
len := v.Len()
for i := 0; i < len; i++ {
d.value(v.Index(i))
}
case reflect.String:
len := d.uint64()
tmp := reflect.MakeSlice(reflect.TypeOf([]byte{}), int(len), int(len))
for i := 0; i < int(len); i++ {
d.value(tmp.Index(i))
}
v.Set(tmp.Convert(v.Type()))
case reflect.Slice:
len := d.uint64()
typ := v.Type()
v.Set(reflect.MakeSlice(typ, int(len), int(len)))
for i := 0; i < int(len); i++ {
d.value(v.Index(i))
}
case reflect.Struct:
t := v.Type()
for i := 0; i < v.NumField(); i++ {
if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
d.value(v)
}
}
case reflect.Ptr:
if d.bool() {
ptrValue := reflect.New(v.Type().Elem())
d.value(ptrValue.Elem())
v.Set(ptrValue)
}
}
}
func (d *decoder) bool() bool {
x := d.buf[d.offset]
d.offset++
return x != 0
}
func (d *decoder) uint8() uint8 {
x := d.buf[d.offset]
d.offset++
return x
}
func (d *decoder) uint16() uint16 {
x := d.order.Uint16(d.buf[d.offset : d.offset+2])
d.offset += 2
return x
}
func (d *decoder) uint32() uint32 {
x := d.order.Uint32(d.buf[d.offset : d.offset+4])
d.offset += 4
return x
}
func (d *decoder) uint64() uint64 {
x := d.order.Uint64(d.buf[d.offset : d.offset+8])
d.offset += 8
return x
}
func (d *decoder) int8() int8 { return int8(d.uint8()) }
func (d *decoder) int16() int16 { return int16(d.uint16()) }
func (d *decoder) int32() int32 { return int32(d.uint32()) }
func (d *decoder) int64() int64 { return int64(d.uint64()) } | solana/bincode/decode.go | 0.511473 | 0.427815 | decode.go | starcoder |
package main
import (
"fmt"
"github.com/codingbeard/cberrors"
"github.com/codingbeard/cberrors/iowriterprovider"
"github.com/codingbeard/cblog"
"github.com/codingbeard/tfkg/callback"
"github.com/codingbeard/tfkg/data"
"github.com/codingbeard/tfkg/layer"
"github.com/codingbeard/tfkg/metric"
"github.com/codingbeard/tfkg/model"
"github.com/codingbeard/tfkg/optimizer"
"github.com/codingbeard/tfkg/preprocessor"
tf "github.com/galeone/tensorflow/tensorflow/go"
"os"
"path/filepath"
"time"
)
func main() {
// This is where the trained model will be saved
saveDir := filepath.Join("../../logs", fmt.Sprintf("multiple-inputs-%d", time.Now().Unix()))
e := os.MkdirAll(saveDir, os.ModePerm)
if e != nil {
panic(e)
}
// Create a logger pointed at the save dir
logger, e := cblog.NewLogger(cblog.LoggerConfig{
LogLevel: cblog.DebugLevel,
Format: "%{time:2006-01-02 15:04:05.000} : %{file}:%{line} : %{message}",
LogToFile: true,
FilePath: filepath.Join(saveDir, "training.log"),
FilePerm: os.ModePerm,
LogToStdOut: true,
SetAsDefaultLogger: true,
})
if e != nil {
panic(e)
}
// Error handler with stack traces
errorHandler := cberrors.NewErrorContainer(iowriterprovider.New(logger))
// Define four inputs, each going into one of four dense layers. One set for each of the data points in the iris dataset
input1 := layer.Input().
SetInputShape(tf.MakeShape(-1, 1)).
SetDtype(layer.Float32)
dense1 := layer.Dense(10).
SetDtype(layer.Float32).
SetName("dense_1").
SetActivation("swish").
SetInputs(input1)
input2 := layer.Input().
SetInputShape(tf.MakeShape(-1, 1)).
SetDtype(layer.Float32)
dense2 := layer.Dense(10).
SetDtype(layer.Float32).
SetName("dense_2").
SetActivation("swish").
SetInputs(input2)
input3 := layer.Input().
SetInputShape(tf.MakeShape(-1, 1)).
SetDtype(layer.Float32)
dense3 := layer.Dense(10).
SetDtype(layer.Float32).
SetName("dense_3").
SetActivation("swish").
SetInputs(input3)
input4 := layer.Input().
SetInputShape(tf.MakeShape(-1, 1)).
SetDtype(layer.Float32)
dense4 := layer.Dense(10).
SetDtype(layer.Float32).
SetName("dense_4").
SetActivation("swish").
SetInputs(input4)
// Concatenate all the dense layers into a single layer
concat := layer.Concatenate().SetInputs(dense1, dense2, dense3, dense4)
// Pass the concatenated into a simple dense network
denseMerged := layer.Dense(100).
SetDtype(layer.Float32).
SetName("dense_merged").
SetActivation("swish").
SetInputs(concat)
denseMerged2 := layer.Dense(100).
SetDtype(layer.Float32).
SetName("dense_merged_2").
SetActivation("swish").
SetInputs(denseMerged)
// Define the output as having three units, as there are three classes to predict
output := layer.Dense(3).
SetDtype(layer.Float32).
SetName("output").
SetActivation("softmax").
SetInputs(denseMerged2)
// Define a simple keras style Functional model
// Note that you don't need to pass in the inputs, the output variable contains all the other nodes as long as you use the same syntax of layer.New()(input)
m := model.NewModel(
logger,
errorHandler,
output,
)
// This part is pretty nasty under the hood. Effectively it will generate some python code for our model and execute it to save the model in a format we can load and train
// A python binary must be available to use for this to work
// The batchSize used in CompileAndLoad must match the BatchSize used in Fit
batchSize := 3
e = m.CompileAndLoad(model.CompileConfig{
Loss: model.LossSparseCategoricalCrossentropy,
Optimizer: optimizer.Adam(),
ModelInfoSaveDir: saveDir,
BatchSize: batchSize,
})
if e != nil {
return
}
// Where the cached tokenizers and divisors will go, if you change your data you'll need to clear this
cacheDir := "training-cache"
// Create a dataset for training and evaluation. iris.data is in the format: float32, float32, float32, float32, className
// This means our categoryOffset is 4. The dataset will automatically pass this value in as the label Tensor when training and evaluating
// If the category is not an int, a tokenizer will be created to automatically convert string categories to ints in a sparse categorical format
// We allocate 80% of the data to training (TrainPercent: 0.8)
// We allocate 10% of the data to validation (ValPercent: 0.1)
// We allocate 10% of the data to testing (TestPercent: 0.1)
// We define four data processors for the four float32 data points. The name will be used for the tokenizer or divisor cache file
// The lineOffset is the offset in the data file
// The preprocessor.NewDivisor(errorHandler) will scale the floats to between 0 and 1
// We use a preprocessor.ReadCsvFloat32s because under the hood a lineOffset: 0 dataLength: 4 will grab the first four elements of the csv row and return them as a csv string. It will convert the string to a slice of float32 values
// We use a preprocessor.ConvertDivisorToFloat32SliceTensor to convert that slice of floats into a tensorflow Tensor. The output of this function will be passed to the model for training and evaluating
dataset, e := data.NewSingleFileDataset(
logger,
errorHandler,
data.SingleFileDatasetConfig{
FilePath: "data/iris.data",
CacheDir: cacheDir,
TrainPercent: 0.8,
ValPercent: 0.1,
TestPercent: 0.1,
IgnoreParseErrors: true,
},
preprocessor.NewSparseCategoricalTokenizingYProcessor(
errorHandler,
cacheDir,
4,
),
preprocessor.NewProcessor(
errorHandler,
"sepal_length",
preprocessor.ProcessorConfig{
CacheDir: cacheDir,
LineOffset: 0,
RequiresFit: true,
Divisor: preprocessor.NewDivisor(errorHandler),
Reader: preprocessor.ReadCsvFloat32s,
Converter: preprocessor.ConvertDivisorToFloat32SliceTensor,
},
),
preprocessor.NewProcessor(
errorHandler,
"sepal_width",
preprocessor.ProcessorConfig{
CacheDir: cacheDir,
LineOffset: 1,
RequiresFit: true,
Divisor: preprocessor.NewDivisor(errorHandler),
Reader: preprocessor.ReadCsvFloat32s,
Converter: preprocessor.ConvertDivisorToFloat32SliceTensor,
},
),
preprocessor.NewProcessor(
errorHandler,
"petal_length",
preprocessor.ProcessorConfig{
CacheDir: cacheDir,
LineOffset: 2,
RequiresFit: true,
Divisor: preprocessor.NewDivisor(errorHandler),
Reader: preprocessor.ReadCsvFloat32s,
Converter: preprocessor.ConvertDivisorToFloat32SliceTensor,
},
),
preprocessor.NewProcessor(
errorHandler,
"petal_width",
preprocessor.ProcessorConfig{
CacheDir: cacheDir,
LineOffset: 3,
RequiresFit: true,
Divisor: preprocessor.NewDivisor(errorHandler),
Reader: preprocessor.ReadCsvFloat32s,
Converter: preprocessor.ConvertDivisorToFloat32SliceTensor,
},
),
)
if e != nil {
errorHandler.Error(e)
return
}
// This will save our divisor under savePath
e = dataset.SaveProcessors(saveDir)
if e != nil {
return
}
logger.InfoF("main", "Shuffling dataset")
// This will shuffle the data in a deterministic fashion, change 1 to time.Now().UnixNano() for a different shuffle each training session
dataset.Shuffle(1)
logger.InfoF("main", "Training model: %s", saveDir)
// Train the model.
// Most of this should look familiar to anyone who has used tensorflow/keras
// The key points are:
// The batchSize MUST match the batch size in the call to CompileAndLoad
// We pass the data through 10 times (Epochs: 10)
// We enable validation, which will evaluate the model on the validation portion of the dataset above (Validation: true)
// We continuously (and concurrently) pre-fetch 10 batches to speed up training, though with 150 samples this has almost no effect
// We calculate the accuracy of the model on training and validation datasets (metric.SparseCategoricalAccuracy)
// We log the training results to stdout (Verbose:1, callback.Logger)
// We save the best model based on the accuracy metric at the end of the validation stage of each epoch (callback.Checkpoint)
m.Fit(
dataset,
model.FitConfig{
Epochs: 10,
Validation: true,
BatchSize: batchSize,
PreFetch: 10,
Verbose: 1,
Metrics: []metric.Metric{
&metric.SparseCategoricalAccuracy{
Name: "acc",
Confidence: 0.5,
Average: true,
},
},
Callbacks: []callback.Callback{
&callback.Logger{
FileLogger: logger,
},
&callback.Checkpoint{
OnEvent: callback.EventEnd,
OnMode: callback.ModeVal,
MetricName: "val_acc",
Compare: callback.CheckpointCompareMax,
SaveDir: saveDir,
},
},
},
)
logger.InfoF("main", "Finished training")
// You do not need to load the model right after training, but this shows the weights were saved
m, e = model.LoadModel(errorHandler, logger, saveDir)
if e != nil {
errorHandler.Error(e)
return
}
// Create an inference provider, with four processors which will accept our inputs of [][]float32 and turn it into a tensor
// We pass in the location of the processors we saved above in dataset.SaveProcessors
// Note that the name of the processor must match the name used in the dataset above, as that will load the correct divisor config
inference, e := data.NewInference(
logger,
errorHandler,
saveDir,
preprocessor.NewProcessor(
errorHandler,
"sepal_length",
preprocessor.ProcessorConfig{
Converter: preprocessor.ConvertDivisorToFloat32SliceTensor,
},
),
preprocessor.NewProcessor(
errorHandler,
"sepal_width",
preprocessor.ProcessorConfig{
Converter: preprocessor.ConvertDivisorToFloat32SliceTensor,
},
),
preprocessor.NewProcessor(
errorHandler,
"petal_length",
preprocessor.ProcessorConfig{
Converter: preprocessor.ConvertDivisorToFloat32SliceTensor,
},
),
preprocessor.NewProcessor(
errorHandler,
"petal_width",
preprocessor.ProcessorConfig{
Converter: preprocessor.ConvertDivisorToFloat32SliceTensor,
},
),
)
if e != nil {
return
}
// This will take our inputs and pass it through the processors defined above to create tensors
// Note that we are passing in [][]float32 values as m.Predict is designed to be able to predict on multiple samples
inputTensors, e := inference.GenerateInputs(
[][]float32{{6.0}},
[][]float32{{3.0}},
[][]float32{{4.8}},
[][]float32{{1.8}},
)
if e != nil {
return
}
// Predict the class of the input (should be Iris-virginica / 2)
// Note that due to the automatic conversion of the labels in the dataset the classes are: Iris-setosa: 0, Iris-versicolor: 1, Iris-virginica: 2
// These are the order of the classes in the unshuffled csv dataset
outputTensor, e := m.Predict(inputTensors...)
if e != nil {
return
}
// Cast the tensor to [][]float32
outputValues := outputTensor.Value().([][]float32)
logger.InfoF(
"main",
"Predicted classes: %s: %f, %s: %f, %s: %f",
"Iris-setosa",
outputValues[0][0],
"Iris-versicolor",
outputValues[0][1],
"Iris-virginica",
outputValues[0][2],
)
/*
Example output:
2021-12-08 18:01:29.880 : log.go:147 : Logger initialised
2021-12-08 18:01:29.885 : model.go:715 : Compiling and loading model. If anything goes wrong python error messages will be printed out.
Initialising model
Tracing learn
Tracing evaluate
Tracing predict
Saving model
Completed model base
2021-12-08 18:01:51.506 : single_file_dataset.go:66 : Initialising single file dataset at: examples/iris/data/iris.data
2021-12-08 18:01:51.515 : single_file_dataset.go:140 : Loading line offsets and stats from cache file
2021-12-08 18:01:51.517 : single_file_dataset.go:146 : Found 151 rows. Got class counts: map[int]int{0:50, 1:50, 2:50}
2021-12-08 18:01:51.520 : single_file_dataset.go:253 : Loaded Pre-Processor: sepal_length
2021-12-08 18:01:51.522 : single_file_dataset.go:253 : Loaded Pre-Processor: sepal_width
2021-12-08 18:01:51.524 : single_file_dataset.go:253 : Loaded Pre-Processor: petal_length
2021-12-08 18:01:51.527 : single_file_dataset.go:253 : Loaded Pre-Processor: petal_width
2021-12-08 18:01:51.528 : single_file_dataset.go:261 : Loaded All Pre-Processors
2021-12-08 18:01:51.530 : main.go:187 : Shuffling dataset
2021-12-08 18:01:51.532 : main.go:191 : Training model: examples/multiple_inputs/saved_models/trained_model
2021-12-08 18:01:53.134 : logger.go:102 : End 1 5/5 (1s/1s) loss: 1.0580 acc: 0.0000 val_loss: 1.0550 val_acc: 0.0000
2021-12-08 18:01:53.342 : logger.go:102 : End 2 5/5 (0s/0s) loss: 0.9135 acc: 0.0682 val_loss: 0.8033 val_acc: 0.2000
2021-12-08 18:01:53.763 : logger.go:79 : Saved
2021-12-08 18:01:53.974 : logger.go:102 : End 3 5/5 (0s/0s) loss: 0.6254 acc: 0.5682 val_loss: 0.4964 val_acc: 0.6667
2021-12-08 18:01:54.023 : logger.go:79 : Saved
2021-12-08 18:01:54.237 : logger.go:102 : End 4 5/5 (0s/0s) loss: 0.4571 acc: 0.6591 val_loss: 0.3813 val_acc: 0.6667
2021-12-08 18:01:54.447 : logger.go:102 : End 5 5/5 (0s/0s) loss: 0.3710 acc: 0.8258 val_loss: 0.2941 val_acc: 0.8667
2021-12-08 18:01:54.499 : logger.go:79 : Saved
2021-12-08 18:01:54.709 : logger.go:102 : End 6 5/5 (0s/0s) loss: 0.2864 acc: 0.9091 val_loss: 0.1828 val_acc: 1.0000
2021-12-08 18:01:54.761 : logger.go:79 : Saved
2021-12-08 18:01:54.971 : logger.go:102 : End 7 5/5 (0s/0s) loss: 0.2162 acc: 0.9470 val_loss: 0.1189 val_acc: 1.0000
2021-12-08 18:01:55.182 : logger.go:102 : End 8 5/5 (1s/1s) loss: 0.1735 acc: 0.9545 val_loss: 0.0837 val_acc: 1.0000
2021-12-08 18:01:55.389 : logger.go:102 : End 9 5/5 (0s/0s) loss: 0.1454 acc: 0.9621 val_loss: 0.0662 val_acc: 1.0000
2021-12-08 18:01:55.599 : logger.go:102 : End 10 5/5 (0s/0s) loss: 0.1281 acc: 0.9621 val_loss: 0.0579 val_acc: 1.0000
2021-12-08 18:01:55.601 : main.go:233 : Finished training
2021-12-08 18:01:56.864 : main.go:319 : Predicted classes: Iris-setosa: 0.000334, Iris-versicolor: 0.318527, Iris-virginica: 0.681140
*/
} | examples/multiple_inputs/main.go | 0.580709 | 0.417628 | main.go | starcoder |
package measurements
import "fmt"
const DegreeSign = "°"
type TemperatureUnit int32
const (
Celsius TemperatureUnit = iota
Fahrenheit
Kelvin
)
var TemperatureUnitTypeName = map[TemperatureUnit]string{
Celsius: "C",
Fahrenheit: "F",
Kelvin: "K",
}
var TemperatureUnitTypeValue = map[string]TemperatureUnit{
"C": Celsius,
"F": Fahrenheit,
"K": Kelvin,
}
func (s TemperatureUnit) String() string {
return TemperatureUnitTypeName[s]
}
type Temperature interface {
Unit() TemperatureUnit
Value() float64
To(unit TemperatureUnit) Temperature
ToCelsius() Temperature
ToFahrenheit() Temperature
ToKelvin() Temperature
String() string
}
func NewTemperature(unit TemperatureUnit, value float64) Temperature {
return &temperature{
unit: unit,
value: value,
}
}
type temperature struct {
unit TemperatureUnit
value float64
}
func (s *temperature) Unit() TemperatureUnit {
return s.unit
}
func (s *temperature) Value() float64 {
return s.value
}
func FromCelsius(value float64) Temperature {
return &temperature{value: value, unit: Celsius}
}
func FromFahrenheit(value float64) Temperature {
return &temperature{value: value, unit: Fahrenheit}
}
func FromKelvin(value float64) Temperature {
return &temperature{value: value, unit: Kelvin}
}
func (s *temperature) To(unit TemperatureUnit) Temperature {
switch unit {
case Fahrenheit:
return s.ToFahrenheit()
case Kelvin:
return s.ToKelvin()
default:
return s.ToCelsius()
}
}
func (s *temperature) ToCelsius() Temperature {
switch s.Unit() {
case Fahrenheit:
return FromCelsius((s.Value() - 32) * 5 / 9)
case Kelvin:
return FromCelsius(s.Value() - 273.15)
default:
return FromCelsius(s.Value())
}
}
func (s *temperature) ToFahrenheit() Temperature {
switch s.Unit() {
case Fahrenheit:
return FromFahrenheit(s.Value())
case Kelvin:
return FromFahrenheit((s.Value()-273.15)*9/5 + 32)
default:
return FromFahrenheit((s.Value() * 9 / 5) + 32)
}
}
func (s *temperature) ToKelvin() Temperature {
switch s.Unit() {
case Fahrenheit:
return FromKelvin((s.Value()-32)*5/9 + 273.15)
case Kelvin:
return FromKelvin(s.Value())
default:
return FromKelvin(s.Value() + 273.15)
}
}
func (s temperature) String() string {
if s.unit == Kelvin {
return fmt.Sprintf("%.2f %s", s.value, s.unit)
}
return fmt.Sprintf("%.2f %s%s", s.value, DegreeSign, s.unit)
} | temperature.go | 0.843348 | 0.441071 | temperature.go | starcoder |
package callrecords
import (
i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e "time"
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// MediaStream
type MediaStream struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// Average Network Mean Opinion Score degradation for stream. Represents how much the network loss and jitter has impacted the quality of received audio.
averageAudioDegradation *float32
// Average jitter for the stream computed as specified in [RFC 3550][], denoted in [ISO 8601][] format. For example, 1 second is denoted as 'PT1S', where 'P' is the duration designator, 'T' is the time designator, and 'S' is the second designator.
averageAudioNetworkJitter *i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration
// Average estimated bandwidth available between two endpoints in bits per second.
averageBandwidthEstimate *int64
// Average jitter for the stream computed as specified in [RFC 3550][], denoted in [ISO 8601][] format. For example, 1 second is denoted as 'PT1S', where 'P' is the duration designator, 'T' is the time designator, and 'S' is the second designator.
averageJitter *i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration
// Average packet loss rate for stream.
averagePacketLossRate *float32
// Ratio of the number of audio frames with samples generated by packet loss concealment to the total number of audio frames.
averageRatioOfConcealedSamples *float32
// Average frames per second received for all video streams computed over the duration of the session.
averageReceivedFrameRate *float32
// Average network propagation round-trip time computed as specified in [RFC 3550][], denoted in [ISO 8601][] format. For example, 1 second is denoted as 'PT1S', where 'P' is the duration designator, 'T' is the time designator, and 'S' is the second designator.
averageRoundTripTime *i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration
// Average percentage of video frames lost as displayed to the user.
averageVideoFrameLossPercentage *float32
// Average frames per second received for a video stream, computed over the duration of the session.
averageVideoFrameRate *float32
// Average fraction of packets lost, as specified in [RFC 3550][], computed over the duration of the session.
averageVideoPacketLossRate *float32
// UTC time when the stream ended. The DateTimeOffset type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z
endDateTime *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time
// Fraction of the call where frame rate is less than 7.5 frames per second.
lowFrameRateRatio *float32
// Fraction of the call that the client is running less than 70% expected video processing capability.
lowVideoProcessingCapabilityRatio *float32
// Maximum of audio network jitter computed over each of the 20 second windows during the session, denoted in [ISO 8601][] format. For example, 1 second is denoted as 'PT1S', where 'P' is the duration designator, 'T' is the time designator, and 'S' is the second designator.
maxAudioNetworkJitter *i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration
// Maximum jitter for the stream computed as specified in RFC 3550, denoted in [ISO 8601][] format. For example, 1 second is denoted as 'PT1S', where 'P' is the duration designator, 'T' is the time designator, and 'S' is the second designator.
maxJitter *i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration
// Maximum packet loss rate for the stream.
maxPacketLossRate *float32
// Maximum ratio of packets concealed by the healer.
maxRatioOfConcealedSamples *float32
// Maximum network propagation round-trip time computed as specified in [RFC 3550][], denoted in [ISO 8601][] format. For example, 1 second is denoted as 'PT1S', where 'P' is the duration designator, 'T' is the time designator, and 'S' is the second designator.
maxRoundTripTime *i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration
// Packet count for the stream.
packetUtilization *int64
// Packet loss rate after FEC has been applied aggregated across all video streams and codecs.
postForwardErrorCorrectionPacketLossRate *float32
// UTC time when the stream started. The DateTimeOffset type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z
startDateTime *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time
// Indicates the direction of the media stream. Possible values are: callerToCallee, calleeToCaller.
streamDirection *MediaStreamDirection
// Unique identifier for the stream.
streamId *string
// True if the media stream bypassed the Mediation Server and went straight between client and PSTN Gateway/PBX, false otherwise.
wasMediaBypassed *bool
}
// NewMediaStream instantiates a new mediaStream and sets the default values.
func NewMediaStream()(*MediaStream) {
m := &MediaStream{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreateMediaStreamFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateMediaStreamFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewMediaStream(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *MediaStream) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetAverageAudioDegradation gets the averageAudioDegradation property value. Average Network Mean Opinion Score degradation for stream. Represents how much the network loss and jitter has impacted the quality of received audio.
func (m *MediaStream) GetAverageAudioDegradation()(*float32) {
if m == nil {
return nil
} else {
return m.averageAudioDegradation
}
}
// GetAverageAudioNetworkJitter gets the averageAudioNetworkJitter property value. Average jitter for the stream computed as specified in [RFC 3550][], denoted in [ISO 8601][] format. For example, 1 second is denoted as 'PT1S', where 'P' is the duration designator, 'T' is the time designator, and 'S' is the second designator.
func (m *MediaStream) GetAverageAudioNetworkJitter()(*i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration) {
if m == nil {
return nil
} else {
return m.averageAudioNetworkJitter
}
}
// GetAverageBandwidthEstimate gets the averageBandwidthEstimate property value. Average estimated bandwidth available between two endpoints in bits per second.
func (m *MediaStream) GetAverageBandwidthEstimate()(*int64) {
if m == nil {
return nil
} else {
return m.averageBandwidthEstimate
}
}
// GetAverageJitter gets the averageJitter property value. Average jitter for the stream computed as specified in [RFC 3550][], denoted in [ISO 8601][] format. For example, 1 second is denoted as 'PT1S', where 'P' is the duration designator, 'T' is the time designator, and 'S' is the second designator.
func (m *MediaStream) GetAverageJitter()(*i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration) {
if m == nil {
return nil
} else {
return m.averageJitter
}
}
// GetAveragePacketLossRate gets the averagePacketLossRate property value. Average packet loss rate for stream.
func (m *MediaStream) GetAveragePacketLossRate()(*float32) {
if m == nil {
return nil
} else {
return m.averagePacketLossRate
}
}
// GetAverageRatioOfConcealedSamples gets the averageRatioOfConcealedSamples property value. Ratio of the number of audio frames with samples generated by packet loss concealment to the total number of audio frames.
func (m *MediaStream) GetAverageRatioOfConcealedSamples()(*float32) {
if m == nil {
return nil
} else {
return m.averageRatioOfConcealedSamples
}
}
// GetAverageReceivedFrameRate gets the averageReceivedFrameRate property value. Average frames per second received for all video streams computed over the duration of the session.
func (m *MediaStream) GetAverageReceivedFrameRate()(*float32) {
if m == nil {
return nil
} else {
return m.averageReceivedFrameRate
}
}
// GetAverageRoundTripTime gets the averageRoundTripTime property value. Average network propagation round-trip time computed as specified in [RFC 3550][], denoted in [ISO 8601][] format. For example, 1 second is denoted as 'PT1S', where 'P' is the duration designator, 'T' is the time designator, and 'S' is the second designator.
func (m *MediaStream) GetAverageRoundTripTime()(*i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration) {
if m == nil {
return nil
} else {
return m.averageRoundTripTime
}
}
// GetAverageVideoFrameLossPercentage gets the averageVideoFrameLossPercentage property value. Average percentage of video frames lost as displayed to the user.
func (m *MediaStream) GetAverageVideoFrameLossPercentage()(*float32) {
if m == nil {
return nil
} else {
return m.averageVideoFrameLossPercentage
}
}
// GetAverageVideoFrameRate gets the averageVideoFrameRate property value. Average frames per second received for a video stream, computed over the duration of the session.
func (m *MediaStream) GetAverageVideoFrameRate()(*float32) {
if m == nil {
return nil
} else {
return m.averageVideoFrameRate
}
}
// GetAverageVideoPacketLossRate gets the averageVideoPacketLossRate property value. Average fraction of packets lost, as specified in [RFC 3550][], computed over the duration of the session.
func (m *MediaStream) GetAverageVideoPacketLossRate()(*float32) {
if m == nil {
return nil
} else {
return m.averageVideoPacketLossRate
}
}
// GetEndDateTime gets the endDateTime property value. UTC time when the stream ended. The DateTimeOffset type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z
func (m *MediaStream) GetEndDateTime()(*i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time) {
if m == nil {
return nil
} else {
return m.endDateTime
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *MediaStream) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["averageAudioDegradation"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat32Value()
if err != nil {
return err
}
if val != nil {
m.SetAverageAudioDegradation(val)
}
return nil
}
res["averageAudioNetworkJitter"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetISODurationValue()
if err != nil {
return err
}
if val != nil {
m.SetAverageAudioNetworkJitter(val)
}
return nil
}
res["averageBandwidthEstimate"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetInt64Value()
if err != nil {
return err
}
if val != nil {
m.SetAverageBandwidthEstimate(val)
}
return nil
}
res["averageJitter"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetISODurationValue()
if err != nil {
return err
}
if val != nil {
m.SetAverageJitter(val)
}
return nil
}
res["averagePacketLossRate"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat32Value()
if err != nil {
return err
}
if val != nil {
m.SetAveragePacketLossRate(val)
}
return nil
}
res["averageRatioOfConcealedSamples"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat32Value()
if err != nil {
return err
}
if val != nil {
m.SetAverageRatioOfConcealedSamples(val)
}
return nil
}
res["averageReceivedFrameRate"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat32Value()
if err != nil {
return err
}
if val != nil {
m.SetAverageReceivedFrameRate(val)
}
return nil
}
res["averageRoundTripTime"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetISODurationValue()
if err != nil {
return err
}
if val != nil {
m.SetAverageRoundTripTime(val)
}
return nil
}
res["averageVideoFrameLossPercentage"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat32Value()
if err != nil {
return err
}
if val != nil {
m.SetAverageVideoFrameLossPercentage(val)
}
return nil
}
res["averageVideoFrameRate"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat32Value()
if err != nil {
return err
}
if val != nil {
m.SetAverageVideoFrameRate(val)
}
return nil
}
res["averageVideoPacketLossRate"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat32Value()
if err != nil {
return err
}
if val != nil {
m.SetAverageVideoPacketLossRate(val)
}
return nil
}
res["endDateTime"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetTimeValue()
if err != nil {
return err
}
if val != nil {
m.SetEndDateTime(val)
}
return nil
}
res["lowFrameRateRatio"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat32Value()
if err != nil {
return err
}
if val != nil {
m.SetLowFrameRateRatio(val)
}
return nil
}
res["lowVideoProcessingCapabilityRatio"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat32Value()
if err != nil {
return err
}
if val != nil {
m.SetLowVideoProcessingCapabilityRatio(val)
}
return nil
}
res["maxAudioNetworkJitter"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetISODurationValue()
if err != nil {
return err
}
if val != nil {
m.SetMaxAudioNetworkJitter(val)
}
return nil
}
res["maxJitter"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetISODurationValue()
if err != nil {
return err
}
if val != nil {
m.SetMaxJitter(val)
}
return nil
}
res["maxPacketLossRate"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat32Value()
if err != nil {
return err
}
if val != nil {
m.SetMaxPacketLossRate(val)
}
return nil
}
res["maxRatioOfConcealedSamples"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat32Value()
if err != nil {
return err
}
if val != nil {
m.SetMaxRatioOfConcealedSamples(val)
}
return nil
}
res["maxRoundTripTime"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetISODurationValue()
if err != nil {
return err
}
if val != nil {
m.SetMaxRoundTripTime(val)
}
return nil
}
res["packetUtilization"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetInt64Value()
if err != nil {
return err
}
if val != nil {
m.SetPacketUtilization(val)
}
return nil
}
res["postForwardErrorCorrectionPacketLossRate"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat32Value()
if err != nil {
return err
}
if val != nil {
m.SetPostForwardErrorCorrectionPacketLossRate(val)
}
return nil
}
res["startDateTime"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetTimeValue()
if err != nil {
return err
}
if val != nil {
m.SetStartDateTime(val)
}
return nil
}
res["streamDirection"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetEnumValue(ParseMediaStreamDirection)
if err != nil {
return err
}
if val != nil {
m.SetStreamDirection(val.(*MediaStreamDirection))
}
return nil
}
res["streamId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetStreamId(val)
}
return nil
}
res["wasMediaBypassed"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetWasMediaBypassed(val)
}
return nil
}
return res
}
// GetLowFrameRateRatio gets the lowFrameRateRatio property value. Fraction of the call where frame rate is less than 7.5 frames per second.
func (m *MediaStream) GetLowFrameRateRatio()(*float32) {
if m == nil {
return nil
} else {
return m.lowFrameRateRatio
}
}
// GetLowVideoProcessingCapabilityRatio gets the lowVideoProcessingCapabilityRatio property value. Fraction of the call that the client is running less than 70% expected video processing capability.
func (m *MediaStream) GetLowVideoProcessingCapabilityRatio()(*float32) {
if m == nil {
return nil
} else {
return m.lowVideoProcessingCapabilityRatio
}
}
// GetMaxAudioNetworkJitter gets the maxAudioNetworkJitter property value. Maximum of audio network jitter computed over each of the 20 second windows during the session, denoted in [ISO 8601][] format. For example, 1 second is denoted as 'PT1S', where 'P' is the duration designator, 'T' is the time designator, and 'S' is the second designator.
func (m *MediaStream) GetMaxAudioNetworkJitter()(*i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration) {
if m == nil {
return nil
} else {
return m.maxAudioNetworkJitter
}
}
// GetMaxJitter gets the maxJitter property value. Maximum jitter for the stream computed as specified in RFC 3550, denoted in [ISO 8601][] format. For example, 1 second is denoted as 'PT1S', where 'P' is the duration designator, 'T' is the time designator, and 'S' is the second designator.
func (m *MediaStream) GetMaxJitter()(*i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration) {
if m == nil {
return nil
} else {
return m.maxJitter
}
}
// GetMaxPacketLossRate gets the maxPacketLossRate property value. Maximum packet loss rate for the stream.
func (m *MediaStream) GetMaxPacketLossRate()(*float32) {
if m == nil {
return nil
} else {
return m.maxPacketLossRate
}
}
// GetMaxRatioOfConcealedSamples gets the maxRatioOfConcealedSamples property value. Maximum ratio of packets concealed by the healer.
func (m *MediaStream) GetMaxRatioOfConcealedSamples()(*float32) {
if m == nil {
return nil
} else {
return m.maxRatioOfConcealedSamples
}
}
// GetMaxRoundTripTime gets the maxRoundTripTime property value. Maximum network propagation round-trip time computed as specified in [RFC 3550][], denoted in [ISO 8601][] format. For example, 1 second is denoted as 'PT1S', where 'P' is the duration designator, 'T' is the time designator, and 'S' is the second designator.
func (m *MediaStream) GetMaxRoundTripTime()(*i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration) {
if m == nil {
return nil
} else {
return m.maxRoundTripTime
}
}
// GetPacketUtilization gets the packetUtilization property value. Packet count for the stream.
func (m *MediaStream) GetPacketUtilization()(*int64) {
if m == nil {
return nil
} else {
return m.packetUtilization
}
}
// GetPostForwardErrorCorrectionPacketLossRate gets the postForwardErrorCorrectionPacketLossRate property value. Packet loss rate after FEC has been applied aggregated across all video streams and codecs.
func (m *MediaStream) GetPostForwardErrorCorrectionPacketLossRate()(*float32) {
if m == nil {
return nil
} else {
return m.postForwardErrorCorrectionPacketLossRate
}
}
// GetStartDateTime gets the startDateTime property value. UTC time when the stream started. The DateTimeOffset type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z
func (m *MediaStream) GetStartDateTime()(*i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time) {
if m == nil {
return nil
} else {
return m.startDateTime
}
}
// GetStreamDirection gets the streamDirection property value. Indicates the direction of the media stream. Possible values are: callerToCallee, calleeToCaller.
func (m *MediaStream) GetStreamDirection()(*MediaStreamDirection) {
if m == nil {
return nil
} else {
return m.streamDirection
}
}
// GetStreamId gets the streamId property value. Unique identifier for the stream.
func (m *MediaStream) GetStreamId()(*string) {
if m == nil {
return nil
} else {
return m.streamId
}
}
// GetWasMediaBypassed gets the wasMediaBypassed property value. True if the media stream bypassed the Mediation Server and went straight between client and PSTN Gateway/PBX, false otherwise.
func (m *MediaStream) GetWasMediaBypassed()(*bool) {
if m == nil {
return nil
} else {
return m.wasMediaBypassed
}
}
// Serialize serializes information the current object
func (m *MediaStream) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteFloat32Value("averageAudioDegradation", m.GetAverageAudioDegradation())
if err != nil {
return err
}
}
{
err := writer.WriteISODurationValue("averageAudioNetworkJitter", m.GetAverageAudioNetworkJitter())
if err != nil {
return err
}
}
{
err := writer.WriteInt64Value("averageBandwidthEstimate", m.GetAverageBandwidthEstimate())
if err != nil {
return err
}
}
{
err := writer.WriteISODurationValue("averageJitter", m.GetAverageJitter())
if err != nil {
return err
}
}
{
err := writer.WriteFloat32Value("averagePacketLossRate", m.GetAveragePacketLossRate())
if err != nil {
return err
}
}
{
err := writer.WriteFloat32Value("averageRatioOfConcealedSamples", m.GetAverageRatioOfConcealedSamples())
if err != nil {
return err
}
}
{
err := writer.WriteFloat32Value("averageReceivedFrameRate", m.GetAverageReceivedFrameRate())
if err != nil {
return err
}
}
{
err := writer.WriteISODurationValue("averageRoundTripTime", m.GetAverageRoundTripTime())
if err != nil {
return err
}
}
{
err := writer.WriteFloat32Value("averageVideoFrameLossPercentage", m.GetAverageVideoFrameLossPercentage())
if err != nil {
return err
}
}
{
err := writer.WriteFloat32Value("averageVideoFrameRate", m.GetAverageVideoFrameRate())
if err != nil {
return err
}
}
{
err := writer.WriteFloat32Value("averageVideoPacketLossRate", m.GetAverageVideoPacketLossRate())
if err != nil {
return err
}
}
{
err := writer.WriteTimeValue("endDateTime", m.GetEndDateTime())
if err != nil {
return err
}
}
{
err := writer.WriteFloat32Value("lowFrameRateRatio", m.GetLowFrameRateRatio())
if err != nil {
return err
}
}
{
err := writer.WriteFloat32Value("lowVideoProcessingCapabilityRatio", m.GetLowVideoProcessingCapabilityRatio())
if err != nil {
return err
}
}
{
err := writer.WriteISODurationValue("maxAudioNetworkJitter", m.GetMaxAudioNetworkJitter())
if err != nil {
return err
}
}
{
err := writer.WriteISODurationValue("maxJitter", m.GetMaxJitter())
if err != nil {
return err
}
}
{
err := writer.WriteFloat32Value("maxPacketLossRate", m.GetMaxPacketLossRate())
if err != nil {
return err
}
}
{
err := writer.WriteFloat32Value("maxRatioOfConcealedSamples", m.GetMaxRatioOfConcealedSamples())
if err != nil {
return err
}
}
{
err := writer.WriteISODurationValue("maxRoundTripTime", m.GetMaxRoundTripTime())
if err != nil {
return err
}
}
{
err := writer.WriteInt64Value("packetUtilization", m.GetPacketUtilization())
if err != nil {
return err
}
}
{
err := writer.WriteFloat32Value("postForwardErrorCorrectionPacketLossRate", m.GetPostForwardErrorCorrectionPacketLossRate())
if err != nil {
return err
}
}
{
err := writer.WriteTimeValue("startDateTime", m.GetStartDateTime())
if err != nil {
return err
}
}
if m.GetStreamDirection() != nil {
cast := (*m.GetStreamDirection()).String()
err := writer.WriteStringValue("streamDirection", &cast)
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("streamId", m.GetStreamId())
if err != nil {
return err
}
}
{
err := writer.WriteBoolValue("wasMediaBypassed", m.GetWasMediaBypassed())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *MediaStream) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetAverageAudioDegradation sets the averageAudioDegradation property value. Average Network Mean Opinion Score degradation for stream. Represents how much the network loss and jitter has impacted the quality of received audio.
func (m *MediaStream) SetAverageAudioDegradation(value *float32)() {
if m != nil {
m.averageAudioDegradation = value
}
}
// SetAverageAudioNetworkJitter sets the averageAudioNetworkJitter property value. Average jitter for the stream computed as specified in [RFC 3550][], denoted in [ISO 8601][] format. For example, 1 second is denoted as 'PT1S', where 'P' is the duration designator, 'T' is the time designator, and 'S' is the second designator.
func (m *MediaStream) SetAverageAudioNetworkJitter(value *i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration)() {
if m != nil {
m.averageAudioNetworkJitter = value
}
}
// SetAverageBandwidthEstimate sets the averageBandwidthEstimate property value. Average estimated bandwidth available between two endpoints in bits per second.
func (m *MediaStream) SetAverageBandwidthEstimate(value *int64)() {
if m != nil {
m.averageBandwidthEstimate = value
}
}
// SetAverageJitter sets the averageJitter property value. Average jitter for the stream computed as specified in [RFC 3550][], denoted in [ISO 8601][] format. For example, 1 second is denoted as 'PT1S', where 'P' is the duration designator, 'T' is the time designator, and 'S' is the second designator.
func (m *MediaStream) SetAverageJitter(value *i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration)() {
if m != nil {
m.averageJitter = value
}
}
// SetAveragePacketLossRate sets the averagePacketLossRate property value. Average packet loss rate for stream.
func (m *MediaStream) SetAveragePacketLossRate(value *float32)() {
if m != nil {
m.averagePacketLossRate = value
}
}
// SetAverageRatioOfConcealedSamples sets the averageRatioOfConcealedSamples property value. Ratio of the number of audio frames with samples generated by packet loss concealment to the total number of audio frames.
func (m *MediaStream) SetAverageRatioOfConcealedSamples(value *float32)() {
if m != nil {
m.averageRatioOfConcealedSamples = value
}
}
// SetAverageReceivedFrameRate sets the averageReceivedFrameRate property value. Average frames per second received for all video streams computed over the duration of the session.
func (m *MediaStream) SetAverageReceivedFrameRate(value *float32)() {
if m != nil {
m.averageReceivedFrameRate = value
}
}
// SetAverageRoundTripTime sets the averageRoundTripTime property value. Average network propagation round-trip time computed as specified in [RFC 3550][], denoted in [ISO 8601][] format. For example, 1 second is denoted as 'PT1S', where 'P' is the duration designator, 'T' is the time designator, and 'S' is the second designator.
func (m *MediaStream) SetAverageRoundTripTime(value *i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration)() {
if m != nil {
m.averageRoundTripTime = value
}
}
// SetAverageVideoFrameLossPercentage sets the averageVideoFrameLossPercentage property value. Average percentage of video frames lost as displayed to the user.
func (m *MediaStream) SetAverageVideoFrameLossPercentage(value *float32)() {
if m != nil {
m.averageVideoFrameLossPercentage = value
}
}
// SetAverageVideoFrameRate sets the averageVideoFrameRate property value. Average frames per second received for a video stream, computed over the duration of the session.
func (m *MediaStream) SetAverageVideoFrameRate(value *float32)() {
if m != nil {
m.averageVideoFrameRate = value
}
}
// SetAverageVideoPacketLossRate sets the averageVideoPacketLossRate property value. Average fraction of packets lost, as specified in [RFC 3550][], computed over the duration of the session.
func (m *MediaStream) SetAverageVideoPacketLossRate(value *float32)() {
if m != nil {
m.averageVideoPacketLossRate = value
}
}
// SetEndDateTime sets the endDateTime property value. UTC time when the stream ended. The DateTimeOffset type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z
func (m *MediaStream) SetEndDateTime(value *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time)() {
if m != nil {
m.endDateTime = value
}
}
// SetLowFrameRateRatio sets the lowFrameRateRatio property value. Fraction of the call where frame rate is less than 7.5 frames per second.
func (m *MediaStream) SetLowFrameRateRatio(value *float32)() {
if m != nil {
m.lowFrameRateRatio = value
}
}
// SetLowVideoProcessingCapabilityRatio sets the lowVideoProcessingCapabilityRatio property value. Fraction of the call that the client is running less than 70% expected video processing capability.
func (m *MediaStream) SetLowVideoProcessingCapabilityRatio(value *float32)() {
if m != nil {
m.lowVideoProcessingCapabilityRatio = value
}
}
// SetMaxAudioNetworkJitter sets the maxAudioNetworkJitter property value. Maximum of audio network jitter computed over each of the 20 second windows during the session, denoted in [ISO 8601][] format. For example, 1 second is denoted as 'PT1S', where 'P' is the duration designator, 'T' is the time designator, and 'S' is the second designator.
func (m *MediaStream) SetMaxAudioNetworkJitter(value *i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration)() {
if m != nil {
m.maxAudioNetworkJitter = value
}
}
// SetMaxJitter sets the maxJitter property value. Maximum jitter for the stream computed as specified in RFC 3550, denoted in [ISO 8601][] format. For example, 1 second is denoted as 'PT1S', where 'P' is the duration designator, 'T' is the time designator, and 'S' is the second designator.
func (m *MediaStream) SetMaxJitter(value *i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration)() {
if m != nil {
m.maxJitter = value
}
}
// SetMaxPacketLossRate sets the maxPacketLossRate property value. Maximum packet loss rate for the stream.
func (m *MediaStream) SetMaxPacketLossRate(value *float32)() {
if m != nil {
m.maxPacketLossRate = value
}
}
// SetMaxRatioOfConcealedSamples sets the maxRatioOfConcealedSamples property value. Maximum ratio of packets concealed by the healer.
func (m *MediaStream) SetMaxRatioOfConcealedSamples(value *float32)() {
if m != nil {
m.maxRatioOfConcealedSamples = value
}
}
// SetMaxRoundTripTime sets the maxRoundTripTime property value. Maximum network propagation round-trip time computed as specified in [RFC 3550][], denoted in [ISO 8601][] format. For example, 1 second is denoted as 'PT1S', where 'P' is the duration designator, 'T' is the time designator, and 'S' is the second designator.
func (m *MediaStream) SetMaxRoundTripTime(value *i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration)() {
if m != nil {
m.maxRoundTripTime = value
}
}
// SetPacketUtilization sets the packetUtilization property value. Packet count for the stream.
func (m *MediaStream) SetPacketUtilization(value *int64)() {
if m != nil {
m.packetUtilization = value
}
}
// SetPostForwardErrorCorrectionPacketLossRate sets the postForwardErrorCorrectionPacketLossRate property value. Packet loss rate after FEC has been applied aggregated across all video streams and codecs.
func (m *MediaStream) SetPostForwardErrorCorrectionPacketLossRate(value *float32)() {
if m != nil {
m.postForwardErrorCorrectionPacketLossRate = value
}
}
// SetStartDateTime sets the startDateTime property value. UTC time when the stream started. The DateTimeOffset type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z
func (m *MediaStream) SetStartDateTime(value *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time)() {
if m != nil {
m.startDateTime = value
}
}
// SetStreamDirection sets the streamDirection property value. Indicates the direction of the media stream. Possible values are: callerToCallee, calleeToCaller.
func (m *MediaStream) SetStreamDirection(value *MediaStreamDirection)() {
if m != nil {
m.streamDirection = value
}
}
// SetStreamId sets the streamId property value. Unique identifier for the stream.
func (m *MediaStream) SetStreamId(value *string)() {
if m != nil {
m.streamId = value
}
}
// SetWasMediaBypassed sets the wasMediaBypassed property value. True if the media stream bypassed the Mediation Server and went straight between client and PSTN Gateway/PBX, false otherwise.
func (m *MediaStream) SetWasMediaBypassed(value *bool)() {
if m != nil {
m.wasMediaBypassed = value
}
} | models/callrecords/media_stream.go | 0.781414 | 0.587026 | media_stream.go | starcoder |
package props
import (
"github.com/rs/zerolog/log"
)
// Enumeration is a type to store different encoding schemes of an enumeration.
type Enumeration struct {
// JSON encodes the enumeration as a string.
JSON string
// Binary encodes the enumeration as a uint8.
Binary uint8
}
// Health is the known health of a resource, which is encoded as enumeration and based on Redfish.
type Health Enumeration
var (
// HealthCritical indicates that a critical condition requires immediate attention.
HealthCritical Health = Health{"Critical", 0}
// HealthOK indicates that no conditions require special attention.
HealthOK Health = Health{"OK", 1}
// HealthWarning indicates that a condition requires attention.
HealthWarning Health = Health{"Warning", 2}
)
// State is the known state of the resource, which is encoded as enumeration and based on Redfish.
type State Enumeration
var (
// StateAbsent indicates that this function or resource is either not present or detected.
StateAbsent State = State{"Absent", 0}
// StateDeferring indicates that the element does not process any commands but queues new requests.
StateDeferring State = State{"Deferring", 1}
// StateDisabled indicates that this function or resource is disabled.
StateDisabled State = State{"Disabled", 2}
// StateEnabled indicates that this function or resource is enabled.
StateEnabled State = State{"Enabled", 3}
// StateInTest indicates that this function or resource is undergoing testing, or is in the process of capturing information for debugging.
StateInTest State = State{"InTest", 4}
// StateQualified indicates that the element quality is within the acceptable range of operation.
StateQualified State = State{"Qualified", 5}
// StateQuiesced indicates that the element is enabled but only processes a restricted set of commands.
StateQuiesced State = State{"Quiesced", 6}
// StateStandbyOffline indicates that this function or resource is enabled but awaits an external action to activate it.
StateStandbyOffline State = State{"StandbyOffline", 7}
// StateStandbySpare indicates that this function or resource is part of a redundancy set and awaits a failover or other external action to activate it.
StateStandbySpare State = State{"StandbySpare", 8}
// StateStarting indicates that this function or resource is starting.
StateStarting State = State{"Starting", 9}
// StateUnavailableOffline indicates that this function or resource is present but cannot be used.
StateUnavailableOffline State = State{"UnavailableOffline", 10}
// StateUpdating indicates that the element is updating and might be unavailable or degraded.
StateUpdating State = State{"Updating", 11}
)
// ManagerType is the type of manager that this resource represents, which is encoded as enumeration and based on Redfish.
type ManagerType Enumeration
var (
// ManagerTypeAuxiliaryController is a controller that provides management functions for a particular subsystem or group of devices.
ManagerTypeAuxiliaryController ManagerType = ManagerType{"AuxiliaryController", 0}
// ManagerTypeBMC is a controller that provides management functions for a single computer system.
ManagerTypeBMC ManagerType = ManagerType{"BMC", 1}
// ManagerTypeEnclosureManager is a controller that provides management functions for a chassis or group of devices or systems.
ManagerTypeEnclosureManager ManagerType = ManagerType{"EnclosureManager", 2}
// ManagerTypeManagementController is a controller that primarily monitors or manages the operation of a device or system.
ManagerTypeManagementController ManagerType = ManagerType{"ManagementController", 3}
// ManagerTypeRackManager is a controller that provides management functions for a whole or part of a rack.
ManagerTypeRackManager ManagerType = ManagerType{"RackManager", 4}
// ManagerTypeService is software-based service that provides management functions.
ManagerTypeService ManagerType = ManagerType{"Service", 5}
)
var managerTypeSlice = []*ManagerType{
&ManagerTypeAuxiliaryController,
&ManagerTypeBMC,
&ManagerTypeEnclosureManager,
&ManagerTypeManagementController,
&ManagerTypeRackManager,
&ManagerTypeService,
}
var managerTypeMap = map[string]*ManagerType{
ManagerTypeAuxiliaryController.JSON: &ManagerTypeAuxiliaryController,
ManagerTypeBMC.JSON: &ManagerTypeBMC,
ManagerTypeEnclosureManager.JSON: &ManagerTypeEnclosureManager,
ManagerTypeManagementController.JSON: &ManagerTypeManagementController,
ManagerTypeRackManager.JSON: &ManagerTypeRackManager,
ManagerTypeService.JSON: &ManagerTypeService,
}
func NewManagerType(value interface{}) ManagerType {
stringValue, ok := value.(string)
if ok {
return *managerTypeMap[stringValue]
}
intValue, ok := value.(int)
if ok {
return *managerTypeSlice[intValue]
}
log.Error().Msgf("❌ NewManagerType: failed to parse: invalid value: %v", value)
return ManagerTypeAuxiliaryController
} | pkg/props/manager_type.go | 0.553264 | 0.441673 | manager_type.go | starcoder |
package main
import (
"bufio"
"bytes"
"fmt"
"os"
"runtime"
"sort"
)
// seqString is a sequence of nucleotides as a string: "ACGT..."
type seqString string
// seqChars is a sequence of nucleotides as chars: 'A', 'C', 'G', 'T'...
type seqChars []byte
// seqBits is a sequence of nucleotides as 2 low bits per byte: 0, 1, 3, 2...
type seqBits []byte
// toBits converts *in-place*
func (seq seqChars) toBits() seqBits {
for i := 0; i < len(seq); i++ {
// 'A' => 0, 'C' => 1, 'T' => 2, 'G' => 3
seq[i] = seq[i] >> 1 & 3
}
return seqBits(seq)
}
func (seq seqString) seqBits() seqBits {
return seqChars(seq).toBits()
}
// seq32 is a short (<= 16) sequence of nucleotides in a compact form
// length is not embedded
type seq32 uint32
// seq64 is a short (17..32) sequence of nucleotides in a compact form
// length is not embedded
type seq64 uint64
// seq32 converts a seqBits to a seq32
func (seq seqBits) seq32() seq32 {
var num seq32
for _, char := range seq {
num = num<<2 | seq32(char)
}
return num
}
// seq64 converts a seqBits to a seq64
func (seq seqBits) seq64() seq64 {
var num seq64
for _, char := range seq {
num = num<<2 | seq64(char)
}
return num
}
// seqString converts a seq32 to a human readable string
func (num seq32) seqString(length int) seqString {
sequence := make(seqChars, length)
for i := 0; i < length; i++ {
sequence[length-i-1] = "ACTG"[num&3]
num = num >> 2
}
return seqString(sequence)
}
type counter uint32
func (dna seqBits) count32(length int) map[seq32]*counter {
counts := make(map[seq32]*counter)
key := dna[0 : length-1].seq32()
mask := seq32(1)<<uint(2*length) - 1
for index := length - 1; index < len(dna); index++ {
key = key<<2&mask | seq32(dna[index])
pointer := counts[key]
if pointer == nil {
n := counter(1)
counts[key] = &n
} else {
*pointer++
}
}
return counts
}
func (dna seqBits) count64(length int) map[seq64]*counter {
counts := make(map[seq64]*counter)
key := dna[0 : length-1].seq64()
mask := seq64(1)<<uint(2*length) - 1
for index := length - 1; index < len(dna); index++ {
key = key<<2&mask | seq64(dna[index])
pointer := counts[key]
if pointer == nil {
n := counter(1)
counts[key] = &n
} else {
*pointer++
}
}
return counts
}
type job struct {
run func(dna seqBits)
result chan string
}
func makeJob(j func(dna seqBits) string) job {
r := make(chan string, 1)
return job{
run: func(dna seqBits) {
r <- j(dna)
},
result: r,
}
}
func frequencyReportJob(length int) job {
return makeJob(func(dna seqBits) string {
return frequencyReport(dna, length)
})
}
func sequenceReportJob(sequence seqString) job {
return makeJob(func(dna seqBits) string {
return sequenceReport(dna, sequence)
})
}
var jobs = [...]job{
frequencyReportJob(1),
frequencyReportJob(2),
sequenceReportJob("GGT"),
sequenceReportJob("GGTA"),
sequenceReportJob("GGTATT"),
sequenceReportJob("GGTATTTTAATT"),
sequenceReportJob("GGTATTTTAATTTATAGT"),
}
func main() {
dna := input()
scheduleJobs(dna)
for i := range jobs {
fmt.Println(<-jobs[i].result)
}
}
func scheduleJobs(dna seqBits) {
command := make(chan int, len(jobs))
for i := runtime.NumCPU(); i > 0; i-- {
go worker(dna, command)
}
for i := range jobs {
// longest job first, shortest job last
command <- len(jobs) - 1 - i
}
close(command)
}
func worker(dna seqBits, command <-chan int) {
for k := range command {
jobs[k].run(dna)
}
}
func input() (data seqBits) {
return readSequence(">THREE").toBits()
}
func readSequence(prefix string) (data seqChars) {
in, lineCount := findSequence(prefix)
data = make(seqChars, 0, lineCount*61)
for {
line, err := in.ReadSlice('\n')
if len(line) <= 1 || line[0] == '>' {
break
}
last := len(line) - 1
if line[last] == '\n' {
line = line[0:last]
}
data = append(data, seqChars(line)...)
if err != nil {
break
}
}
return
}
func findSequence(prefix string) (in *bufio.Reader, lineCount int) {
pfx := []byte(prefix)
in = bufio.NewReaderSize(os.Stdin, 1<<20)
for {
line, err := in.ReadSlice('\n')
if err != nil {
panic("read error")
}
lineCount++
if line[0] == '>' && bytes.HasPrefix(line, pfx) {
break
}
}
return
}
type seqCount struct {
seq seqString
count counter
}
type seqCounts []seqCount
func (ss seqCounts) Len() int { return len(ss) }
func (ss seqCounts) Swap(i, j int) { ss[i], ss[j] = ss[j], ss[i] }
// Less order descending by count then seq
func (ss seqCounts) Less(i, j int) bool {
if ss[i].count == ss[j].count {
return ss[i].seq > ss[j].seq
}
return ss[i].count > ss[j].count
}
func frequencyReport(dna seqBits, length int) string {
counts := dna.count32(length)
sortedSeqs := make(seqCounts, 0, len(counts))
for num, pointer := range counts {
sortedSeqs = append(
sortedSeqs,
seqCount{num.seqString(length), *pointer},
)
}
sort.Sort(sortedSeqs)
var buf bytes.Buffer
buf.Grow((8 + length) * len(sortedSeqs))
var scale float32 = 100.0 / float32(len(dna)-length+1)
for _, sequence := range sortedSeqs {
buf.WriteString(fmt.Sprintf(
"%v %.3f\n", sequence.seq,
float32(sequence.count)*scale),
)
}
return buf.String()
}
func sequenceReport(dna seqBits, sequence seqString) string {
var pointer *counter
seq := sequence.seqBits()
if len(sequence) <= 16 {
counts := dna.count32(len(sequence))
pointer = counts[seq.seq32()]
} else {
counts := dna.count64(len(sequence))
pointer = counts[seq.seq64()]
}
var sequenceCount counter
if pointer != nil {
sequenceCount = *pointer
}
return fmt.Sprintf("%v\t%v", sequenceCount, sequence)
} | knucleotide/knucleotide.go-3.go | 0.614625 | 0.426859 | knucleotide.go-3.go | starcoder |
package login
import (
"context"
"testing"
"github.com/ory/x/assertx"
"github.com/ory/kratos/ui/container"
"github.com/bxcodec/faker/v3"
"github.com/gofrs/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/ory/kratos/selfservice/flow"
"github.com/ory/kratos/x"
)
type (
FlowPersister interface {
UpdateLoginFlow(context.Context, *Flow) error
CreateLoginFlow(context.Context, *Flow) error
GetLoginFlow(context.Context, uuid.UUID) (*Flow, error)
ForceLoginFlow(ctx context.Context, id uuid.UUID) error
}
FlowPersistenceProvider interface {
LoginFlowPersister() FlowPersister
}
)
func TestFlowPersister(ctx context.Context, p FlowPersister) func(t *testing.T) {
var clearids = func(r *Flow) {
r.ID = uuid.UUID{}
}
return func(t *testing.T) {
t.Run("case=should error when the login flow does not exist", func(t *testing.T) {
_, err := p.GetLoginFlow(ctx, x.NewUUID())
require.Error(t, err)
})
var newFlow = func(t *testing.T) *Flow {
var r Flow
require.NoError(t, faker.FakeData(&r))
clearids(&r)
return &r
}
t.Run("case=should create with set ids", func(t *testing.T) {
var r Flow
require.NoError(t, faker.FakeData(&r))
require.NoError(t, p.CreateLoginFlow(ctx, &r))
})
t.Run("case=should create a new login flow and properly set IDs", func(t *testing.T) {
r := newFlow(t)
err := p.CreateLoginFlow(ctx, r)
require.NoError(t, err, "%#v", err)
assert.NotEqual(t, uuid.Nil, r.ID)
})
t.Run("case=should create and fetch a login flow", func(t *testing.T) {
expected := newFlow(t)
err := p.CreateLoginFlow(ctx, expected)
require.NoError(t, err)
actual, err := p.GetLoginFlow(ctx, expected.ID)
require.NoError(t, err)
assert.EqualValues(t, expected.ID, actual.ID)
x.AssertEqualTime(t, expected.IssuedAt, actual.IssuedAt)
x.AssertEqualTime(t, expected.ExpiresAt, actual.ExpiresAt)
assert.EqualValues(t, expected.RequestURL, actual.RequestURL)
assert.EqualValues(t, expected.Active, actual.Active)
assertx.EqualAsJSON(t, expected.UI, actual.UI, "expected:\t%s\nactual:\t%s", expected.UI, actual.UI)
})
t.Run("case=should properly set the flow type", func(t *testing.T) {
expected := newFlow(t)
expected.Forced = true
expected.Type = flow.TypeAPI
expected.UI = container.New("ory-sh")
err := p.CreateLoginFlow(ctx, expected)
require.NoError(t, err)
actual, err := p.GetLoginFlow(ctx, expected.ID)
require.NoError(t, err)
assert.Equal(t, flow.TypeAPI, actual.Type)
actual.UI = container.New("not-ory-sh")
actual.Type = flow.TypeBrowser
actual.Forced = true
require.NoError(t, p.UpdateLoginFlow(ctx, actual))
actual, err = p.GetLoginFlow(ctx, actual.ID)
require.NoError(t, err)
assert.Equal(t, flow.TypeBrowser, actual.Type)
assert.True(t, actual.Forced)
assert.Equal(t, "not-ory-sh", actual.UI.Action)
})
t.Run("case=should not cause data loss when updating a request without changes", func(t *testing.T) {
expected := newFlow(t)
err := p.CreateLoginFlow(ctx, expected)
require.NoError(t, err)
actual, err := p.GetLoginFlow(ctx, expected.ID)
require.NoError(t, err)
require.NoError(t, p.UpdateLoginFlow(ctx, actual))
actual, err = p.GetLoginFlow(ctx, expected.ID)
require.NoError(t, err)
assertx.EqualAsJSON(t, expected.UI, actual.UI)
})
}
} | selfservice/flow/login/persistence.go | 0.502686 | 0.634388 | persistence.go | starcoder |
package luhn
// Utility functions for generating valid luhn strings and validating against the Luhn algorithm
import (
"math/rand"
"strconv"
"strings"
"time"
)
// Valid returns a boolean indicating if the argument was valid according to the Luhn algorithm.
func Valid(luhnString string) bool {
checksumMod := calculateChecksum(luhnString, false) % 10
return checksumMod == 0
}
// Generate creates and returns a string of the length of the argument targetSize.
// The returned string is valid according to the Luhn algorithm.
func Generate(size int) string {
random := randomString(size - 1)
controlDigit := strconv.Itoa(generateControlDigit(random))
return random + controlDigit
}
// GenerateWithPrefix creates and returns a string of the length of the argument targetSize
// but prefixed with the second argument.
// The returned string is valid according to the Luhn algorithm.
func GenerateWithPrefix(size int, prefix string) string {
size = size - 1 - len(prefix)
random := prefix + randomString(size)
controlDigit := strconv.Itoa(generateControlDigit(random))
return random + controlDigit
}
func randomString(size int) string {
rand.Seed(time.Now().UTC().UnixNano())
source := make([]int, size)
for i := 0; i < size; i++ {
source[i] = rand.Intn(9)
}
return integersToString(source)
}
func generateControlDigit(luhnString string) int {
controlDigit := calculateChecksum(luhnString, true) % 10
if controlDigit != 0 {
controlDigit = 10 - controlDigit
}
return controlDigit
}
func calculateChecksum(luhnString string, double bool) int {
source := strings.Split(luhnString, "")
checksum := 0
for i := len(source) - 1; i > -1; i-- {
t, _ := strconv.ParseInt(source[i], 10, 8)
n := int(t)
if double {
n = n * 2
}
double = !double
if n >= 10 {
n = n - 9
}
checksum += n
}
return checksum
}
func integersToString(integers []int) string {
result := make([]string, len(integers))
for i, number := range integers {
result[i] = strconv.Itoa(number)
}
return strings.Join(result, "")
} | luhn.go | 0.830594 | 0.50116 | luhn.go | starcoder |
package astilibav
import (
"github.com/asticode/go-astikit"
"github.com/asticode/goav/avutil"
)
// FrameRestamper represents an object capable of restamping frames
type FrameRestamper interface {
Restamp(f *avutil.Frame)
}
type frameRestamperWithValue struct {
lastValue *int64
}
func newFrameRestamperWithValue() *frameRestamperWithValue {
return &frameRestamperWithValue{}
}
func (r *frameRestamperWithValue) restamp(f *avutil.Frame, fn func(v *int64) *int64) {
// Compute new value
v := fn(r.lastValue)
// Restamp
f.SetPts(*v)
// Store new value
r.lastValue = v
}
type frameRestamperWithFrameDuration struct {
*frameRestamperWithValue
frameDuration int64
}
// NewFrameRestamperWithFrameDuration creates a new frame restamper that starts timestamps from 0 and increments them
// of frameDuration
// frameDuration must be a duration in frame time base
func NewFrameRestamperWithFrameDuration(frameDuration int64) FrameRestamper {
return &frameRestamperWithFrameDuration{
frameRestamperWithValue: newFrameRestamperWithValue(),
frameDuration: frameDuration,
}
}
// Restamp implements the FrameRestamper interface
func (r *frameRestamperWithFrameDuration) Restamp(f *avutil.Frame) {
r.restamp(f, func(v *int64) *int64 {
if v != nil {
return astikit.Int64Ptr(*v + r.frameDuration)
}
return astikit.Int64Ptr(0)
})
}
type frameRestamperWithModulo struct {
*frameRestamperWithValue
frameDuration int64
lastRealValue int64
}
// NewFrameRestamperWithModulo creates a new frame restamper that makes sure that PTS % frame duration = 0
// frameDuration must be a duration in frame time base
func NewFrameRestamperWithModulo(frameDuration int64) FrameRestamper {
return &frameRestamperWithModulo{
frameRestamperWithValue: newFrameRestamperWithValue(),
frameDuration: frameDuration,
}
}
// Restamp implements the FrameRestamper interface
func (r *frameRestamperWithModulo) Restamp(f *avutil.Frame) {
r.restamp(f, func(v *int64) *int64 {
defer func() { r.lastRealValue = f.Pts() }()
if v != nil {
nv := astikit.Int64Ptr(f.Pts() - (f.Pts() % r.frameDuration))
if *nv <= *v {
nv = astikit.Int64Ptr(*v + r.frameDuration)
}
return nv
}
return astikit.Int64Ptr(f.Pts() - (f.Pts() % r.frameDuration))
})
} | libav/frame_restamper.go | 0.713731 | 0.411466 | frame_restamper.go | starcoder |
package iterator
import "github.com/tsingson/gonum/graph"
// OrderedLines implements the graph.Lines and graph.LineSlicer interfaces.
// The iteration order of OrderedLines is the order of lines passed to
// NewLineIterator.
type OrderedLines struct {
idx int
lines []graph.Line
}
// NewOrderedLines returns an OrderedLines initialized with the provided lines.
func NewOrderedLines(lines []graph.Line) *OrderedLines {
return &OrderedLines{idx: -1, lines: lines}
}
// Len returns the remaining number of lines to be iterated over.
func (e *OrderedLines) Len() int {
if e.idx >= len(e.lines) {
return 0
}
if e.idx <= 0 {
return len(e.lines)
}
return len(e.lines[e.idx:])
}
// Next returns whether the next call of Line will return a valid line.
func (e *OrderedLines) Next() bool {
if uint(e.idx)+1 < uint(len(e.lines)) {
e.idx++
return true
}
e.idx = len(e.lines)
return false
}
// Line returns the current line of the iterator. Next must have been
// called prior to a call to Line.
func (e *OrderedLines) Line() graph.Line {
if e.idx >= len(e.lines) || e.idx < 0 {
return nil
}
return e.lines[e.idx]
}
// LineSlice returns all the remaining lines in the iterator and advances
// the iterator.
func (e *OrderedLines) LineSlice() []graph.Line {
if e.idx >= len(e.lines) {
return nil
}
idx := e.idx
if idx == -1 {
idx = 0
}
e.idx = len(e.lines)
return e.lines[idx:]
}
// Reset returns the iterator to its initial state.
func (e *OrderedLines) Reset() {
e.idx = -1
}
// OrderedWeightedLines implements the graph.Lines and graph.LineSlicer interfaces.
// The iteration order of OrderedWeightedLines is the order of lines passed to
// NewLineIterator.
type OrderedWeightedLines struct {
idx int
lines []graph.WeightedLine
}
// NewWeightedLineIterator returns an OrderedWeightedLines initialized with the provided lines.
func NewOrderedWeightedLines(lines []graph.WeightedLine) *OrderedWeightedLines {
return &OrderedWeightedLines{idx: -1, lines: lines}
}
// Len returns the remaining number of lines to be iterated over.
func (e *OrderedWeightedLines) Len() int {
if e.idx >= len(e.lines) {
return 0
}
if e.idx <= 0 {
return len(e.lines)
}
return len(e.lines[e.idx:])
}
// Next returns whether the next call of WeightedLine will return a valid line.
func (e *OrderedWeightedLines) Next() bool {
if uint(e.idx)+1 < uint(len(e.lines)) {
e.idx++
return true
}
e.idx = len(e.lines)
return false
}
// WeightedLine returns the current line of the iterator. Next must have been
// called prior to a call to WeightedLine.
func (e *OrderedWeightedLines) WeightedLine() graph.WeightedLine {
if e.idx >= len(e.lines) || e.idx < 0 {
return nil
}
return e.lines[e.idx]
}
// WeightedLineSlice returns all the remaining lines in the iterator and advances
// the iterator.
func (e *OrderedWeightedLines) WeightedLineSlice() []graph.WeightedLine {
if e.idx >= len(e.lines) {
return nil
}
idx := e.idx
if idx == -1 {
idx = 0
}
e.idx = len(e.lines)
return e.lines[idx:]
}
// Reset returns the iterator to its initial state.
func (e *OrderedWeightedLines) Reset() {
e.idx = -1
} | graph/iterator/lines.go | 0.829768 | 0.412412 | lines.go | starcoder |
package linear
import (
"fmt"
"math"
"time"
"github.com/m3db/m3/src/query/executor/transform"
)
const (
// DayOfMonthType returns the day of the month for each of the given times in UTC.
// Returned values are from 1 to 31.
DayOfMonthType = "day_of_month"
// DayOfWeekType returns the day of the week for each of the given times in UTC.
// Returned values are from 0 to 6, where 0 means Sunday etc.
DayOfWeekType = "day_of_week"
// DaysInMonthType returns number of days in the month for each of the given times in UTC.
// Returned values are from 28 to 31.
DaysInMonthType = "days_in_month"
// HourType returns the hour of the day for each of the given times in UTC.
// Returned values are from 0 to 23.
HourType = "hour"
// MinuteType returns the minute of the hour for each of the given times in UTC.
// Returned values are from 0 to 59.
MinuteType = "minute"
// MonthType returns the month of the year for each of the given times in UTC.
// Returned values are from 1 to 12, where 1 means January etc.
MonthType = "month"
// YearType returns the year for each of the given times in UTC.
YearType = "year"
)
var (
datetimeFuncs = map[string]func(time.Time) float64{
DayOfMonthType: func(t time.Time) float64 { return float64(t.Day()) },
DayOfWeekType: func(t time.Time) float64 { return float64(t.Weekday()) },
DaysInMonthType: func(t time.Time) float64 {
return float64(32 - time.Date(t.Year(), t.Month(), 32, 0, 0, 0, 0, time.UTC).Day())
},
HourType: func(t time.Time) float64 { return float64(t.Hour()) },
MinuteType: func(t time.Time) float64 { return float64(t.Minute()) },
MonthType: func(t time.Time) float64 { return float64(t.Month()) },
YearType: func(t time.Time) float64 { return float64(t.Year()) },
}
)
// NewDateOp creates a new date op based on the type
func NewDateOp(optype string) (BaseOp, error) {
if _, ok := datetimeFuncs[optype]; !ok {
return emptyOp, fmt.Errorf("unknown date type: %s", optype)
}
return BaseOp{
operatorType: optype,
processorFn: newDateNode,
}, nil
}
func newDateNode(op BaseOp, controller *transform.Controller) Processor {
return &dateNode{op: op, controller: controller, dateFn: datetimeFuncs[op.operatorType]}
}
type dateNode struct {
op BaseOp
dateFn func(t time.Time) float64
controller *transform.Controller
}
func (d *dateNode) Process(values []float64) []float64 {
for i := range values {
if math.IsNaN(values[i]) {
values[i] = math.NaN()
continue
}
t := time.Unix(int64(values[i]), 0).UTC()
values[i] = d.dateFn(t)
}
return values
} | src/query/functions/linear/datetime.go | 0.812198 | 0.783782 | datetime.go | starcoder |
package semt
import (
"encoding/xml"
"github.com/fairxio/finance-messaging/iso20022"
)
type Document04100102 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:semt.041.001.02 Document"`
Message *SecuritiesBalanceTransparencyReportV02 `xml:"SctiesBalTrnsprncyRpt"`
}
func (d *Document04100102) AddMessage() *SecuritiesBalanceTransparencyReportV02 {
d.Message = new(SecuritiesBalanceTransparencyReportV02)
return d.Message
}
// Scope
// The SecuritiesBalanceTransparencyReport message is sent by an account servicer, such as a custodian, central securities depository or international central securities depository, to the account owner to provide holdings information for the accounts that it services, to disclose underlying details of holdings on an omnibus account that the sender owns or operates at the receiver. The receiver may also be a custodian, central securities depository, international central securities depository, and the ultimate receiver may be a registrar, transfer agent, fund company, official agent of the reported instrument(s) and/or other parties.
// The SecuritiesBalanceTransparencyReport message provides transparency of holdings through layers of custody chains in a consolidated statement, to allow for an efficient gathering of investor data, which, in turn, may be used to measure marketing effectiveness, validation of compliance with prospectuses and regulatory requirements, and the calculation of trailer fees and other retrocessions.
// Usage
// The SecuritiesBalanceTransparencyReport message is used to provide aggregated holdings information and a breakdown of holdings information.
// A sender of the SecuritiesBalanceTransparencyReport message will identify its own safekeeping account (for example, an omnibus account in the ledger of the receiver) and holdings information at the level of account(s) for which the sender is the account servicer (that is, in the ledger of the sender). When relevant, the sender will aggregate its holdings information with holdings information of one or more sub levels and sub-sub levels of accounts, that is, with holdings information the sender has received from the owner(s) of the account(s) for which the sender is the account servicer.
// A sender of the SecuritiesBalanceTransparencyReport message may also use it to send statements to its account owning customers, and these can be enrichments of statements that the respective account owners have previously provided to the sender.
// Ultimately, the statement reaches the relevant fund company, for example, the transfer agent, that may use it for obtaining information about the custodians, distributors and commercial agreement references associated with holdings on an omnibus account at the ultimate place of safekeeping, for example, a central securities depository (CSD) or a register of shareholders.
// When the message is sent by the owner of the account specified in SafekeepingAccountAndHoldings/AccountIdentification, the message will disclose holding details of the underlying owner(s) of the sender’s holdings with the receiver. This direction is commonly referred to as ‘downstream’.
// When the sender is the account servicer of an account owned by the receiver, for example, the account in AccountSubLevel1/AccountIdentification or AccountSubLevel2/AccountIdentification, the message is providing a statement of the receiver’s holdings with sender. This direction is commonly referred to as ‘upstream’, and the safekeeping account should identify the ultimate place of safekeeping (for example, an account in a transfer agent's register of shareholders).
type SecuritiesBalanceTransparencyReportV02 struct {
// Unique and unambiguous identification of the message. When the report has multiple pages, one message equals one page. Therefore, the MessageIdentification uniquely identifies the page.
MessageIdentification *iso20022.MessageIdentification1 `xml:"MsgId"`
// Identification of the party that is the sender of the message.
SenderIdentification *iso20022.PartyIdentification100 `xml:"SndrId"`
// Identification of the party that is the receiver of the message.
ReceiverIdentification *iso20022.PartyIdentification100 `xml:"RcvrId,omitempty"`
// Page number of the message (within a statement) and continuation indicator to indicate that the statement is to continue or that the message is the last page of the statement.
Pagination *iso20022.Pagination `xml:"Pgntn"`
// Provides general information on the statement.
StatementGeneralDetails *iso20022.Statement59 `xml:"StmtGnlDtls"`
// Details of the account, account sub-levels and the holdings.
SafekeepingAccountAndHoldings []*iso20022.SafekeepingAccount7 `xml:"SfkpgAcctAndHldgs,omitempty"`
// Additional information that cannot be captured in the structured elements and/or any other specific block.
SupplementaryData []*iso20022.SupplementaryData1 `xml:"SplmtryData,omitempty"`
}
func (s *SecuritiesBalanceTransparencyReportV02) AddMessageIdentification() *iso20022.MessageIdentification1 {
s.MessageIdentification = new(iso20022.MessageIdentification1)
return s.MessageIdentification
}
func (s *SecuritiesBalanceTransparencyReportV02) AddSenderIdentification() *iso20022.PartyIdentification100 {
s.SenderIdentification = new(iso20022.PartyIdentification100)
return s.SenderIdentification
}
func (s *SecuritiesBalanceTransparencyReportV02) AddReceiverIdentification() *iso20022.PartyIdentification100 {
s.ReceiverIdentification = new(iso20022.PartyIdentification100)
return s.ReceiverIdentification
}
func (s *SecuritiesBalanceTransparencyReportV02) AddPagination() *iso20022.Pagination {
s.Pagination = new(iso20022.Pagination)
return s.Pagination
}
func (s *SecuritiesBalanceTransparencyReportV02) AddStatementGeneralDetails() *iso20022.Statement59 {
s.StatementGeneralDetails = new(iso20022.Statement59)
return s.StatementGeneralDetails
}
func (s *SecuritiesBalanceTransparencyReportV02) AddSafekeepingAccountAndHoldings() *iso20022.SafekeepingAccount7 {
newValue := new(iso20022.SafekeepingAccount7)
s.SafekeepingAccountAndHoldings = append(s.SafekeepingAccountAndHoldings, newValue)
return newValue
}
func (s *SecuritiesBalanceTransparencyReportV02) AddSupplementaryData() *iso20022.SupplementaryData1 {
newValue := new(iso20022.SupplementaryData1)
s.SupplementaryData = append(s.SupplementaryData, newValue)
return newValue
} | iso20022/semt/SecuritiesBalanceTransparencyReportV02.go | 0.678007 | 0.438064 | SecuritiesBalanceTransparencyReportV02.go | starcoder |
package to
import "github.com/MaxSlyugrov/cldr"
var calendar = cldr.Calendar{
Formats: cldr.CalendarFormats{
Date: cldr.CalendarDateFormat{Full: "EEEE d MMMM y", Long: "d MMMM y", Medium: "d MMM y", Short: "d/M/yy"},
Time: cldr.CalendarDateFormat{Full: "h:mm:ss a zzzz", Long: "h:mm:ss a z", Medium: "h:mm:ss a", Short: "h:mm a"},
DateTime: cldr.CalendarDateFormat{Full: "{1}, {0}", Long: "{1}, {0}", Medium: "{1}, {0}", Short: "{1} {0}"},
},
FormatNames: cldr.CalendarFormatNames{
Months: cldr.CalendarMonthFormatNames{
Abbreviated: cldr.CalendarMonthFormatNameValue{Jan: "Sān", Feb: "Fēp", Mar: "Maʻa", Apr: "ʻEpe", May: "Mē", Jun: "Sun", Jul: "Siu", Aug: "ʻAok", Sep: "Sep", Oct: "ʻOka", Nov: "Nōv", Dec: "Tīs"},
Narrow: cldr.CalendarMonthFormatNameValue{Jan: "S", Feb: "F", Mar: "M", Apr: "E", May: "M", Jun: "S", Jul: "S", Aug: "A", Sep: "S", Oct: "O", Nov: "N", Dec: "T"},
Short: cldr.CalendarMonthFormatNameValue{},
Wide: cldr.CalendarMonthFormatNameValue{Jan: "Sānuali", Feb: "Fēpueli", Mar: "Maʻasi", Apr: "ʻEpeleli", May: "Mē", Jun: "Sune", Jul: "Siulai", Aug: "ʻAokosi", Sep: "Sepitema", Oct: "ʻOkatopa", Nov: "Nōvema", Dec: "Tīsema"},
},
Days: cldr.CalendarDayFormatNames{
Abbreviated: cldr.CalendarDayFormatNameValue{Sun: "Sāp", Mon: "Mōn", Tue: "Tūs", Wed: "Pul", Thu: "Tuʻa", Fri: "Fal", Sat: "Tok"},
Narrow: cldr.CalendarDayFormatNameValue{Sun: "S", Mon: "M", Tue: "T", Wed: "P", Thu: "T", Fri: "F", Sat: "T"},
Short: cldr.CalendarDayFormatNameValue{Sun: "Sāp", Mon: "Mōn", Tue: "Tūs", Wed: "Pul", Thu: "Tuʻa", Fri: "Fal", Sat: "Tok"},
Wide: cldr.CalendarDayFormatNameValue{Sun: "Sāpate", Mon: "Mōnite", Tue: "Tūsite", Wed: "Pulelulu", Thu: "Tuʻapulelulu", Fri: "Falaite", Sat: "Tokonaki"},
},
Periods: cldr.CalendarPeriodFormatNames{
Abbreviated: cldr.CalendarPeriodFormatNameValue{},
Narrow: cldr.CalendarPeriodFormatNameValue{AM: "AM", PM: "PM"},
Short: cldr.CalendarPeriodFormatNameValue{},
Wide: cldr.CalendarPeriodFormatNameValue{AM: "AM", PM: "PM"},
},
},
} | resources/locales/to/calendar.go | 0.516352 | 0.430925 | calendar.go | starcoder |
// Test concurrency primitives: power series.
package ps
// ===========================================================================
// Specific power series
/*
--- https://en.wikipedia.org/wiki/Formal_power_series
1 / (1-x) <=> a(n) = 1
1 / (1+x) <=> a(n) = (-1)^n
x / (1-x)^2 <=> a(n) = n
--- e^x
U := a0 a1 a2 ...
e^x = c0 c1 c2 ...
- ci = 1 / i!
--- Sinus
U := a0 a1 a2 ...
Sin(U) = c0 c1 c2 ...
- c2i = 0
- c2i+1 = (-1)^i / (2i+1)!
--- Cosinus
U := a0 a1 a2 ...
Cos(U) = c0 c1 c2 ...
- c2i = (-1)^i / (2i)!
- c2i+1 = 0
--- Power-Exponentiation
U := a0 a1 a2 ...
U^n = c0 c1 c2 ...
- c0 = a0^n
- ci = 1/(i*a0) * sum k=1...i[ ( k*i - i + k ) * ak * ci-k ]
--- Division
U := a0 a1 a2 ...
V := b0 b1 b2 ...
U/V = c0 c1 c2 ...
- c0 = 1 / a0
- ci = (1/b0) * ( ai - sum k=1...i[ bk - ci-k ] )
--- Exponential
U := a0 a1 a2 ...
Exp(U) = c0 c1 c2 ...
- c0 = 1
- ci = 1 / i!
--- Composition
Subst == Composition
--- https://en.wikipedia.org/wiki/Formal_power_series#The_Lagrange_inversion_formula
*/
// ===========================================================================
// Ones are 1 1 1 1 1 ... = `1/(1-x)` with a simple pole at `x=1`.
func Ones() PS {
return AdInfinitum(NewCoefficient(1, 1))
}
// Twos are 2 2 2 2 2 ... just for samples.
func Twos() PS {
return AdInfinitum(NewCoefficient(2, 1))
}
// AdInfinitum repeats coefficient `c` ad infinitum
// and returns `c^i`.
func AdInfinitum(c Coefficient) PS {
Z := New()
go func(Z PS, c Coefficient) {
for Z.Put(c) {
}
}(Z, c)
return Z
}
// ===========================================================================
// Factorials starting from zero: 1, 1, 2, 6, 24, 120, 720, 5040 ...
func Factorials() PS {
Z := New()
go func(Z PS) {
curr := aOne()
for i := 1; Z.Put(curr); i++ {
curr = curr.Mul(curr, ratIby1(i))
}
}(Z)
return Z
}
// OneByFactorial starting from zero: 1/1, 1/1, 1/2, 1/6, 1/120 ...
func OneByFactorial() PS {
Z := New()
go func(Z PS) {
curr := aOne()
for i := 1; Z.Put(cInv()(curr)); i++ {
curr = curr.Mul(curr, ratIby1(i))
}
}(Z)
return Z
}
// Fibonaccis starting from zero: 1, 2, 3, 5, 8, 13, 21, 34, 55, 89 ...
func Fibonaccis() PS {
Z := New()
go func(Z PS) {
prev, curr := aZero(), aOne()
for Z.Put(curr) {
prev, curr = curr, cAdd(curr)(prev)
}
}(Z)
return Z
}
// OneByFibonacci starting from zero: 1/1, 1/2, 1/3, 1/5, 1/8, 1/13 ...
func OneByFibonacci() PS {
Z := New()
go func(Z PS) {
prev, curr := aZero(), aOne()
for Z.Put(cInv()(curr)) {
prev, curr = curr, cAdd(curr)(prev)
}
}(Z)
return Z
}
// Harmonics: 1, 1+ 1/2, 1+ 1/2+ 1/3, 1+ 1/2+ 1/3+ 1/4 ...
// `1/(1-x) * ln( 1/(1-x) )`
func Harmonics() PS {
Z := New()
go func(Z PS) {
curr := aOne()
for i := 2; Z.Put(curr); i++ {
curr = cAdd(curr)(rat1byI(i))
}
}(Z)
return Z
}
// Sincos returns the power series for sine and cosine (in radians).
func Sincos() (Sin PS, Cos PS) {
Sin = New()
Cos = New()
U := OneByFactorial()
U1, U2 := U.Split()
f := func(Z PS, U PS, odd bool) {
var minus bool
for {
if u, ok := Z.NextGetFrom(U); ok {
if odd {
if minus {
Z.Send(u.Neg(u))
} else {
Z.Send(u)
}
minus = !minus
} else {
Z.Send(aZero())
}
odd = !odd
} else {
return
}
}
}
go f(Sin, U1, false)
go f(Cos, U2, true)
return
}
// Sin returns the power series for sine (in radians).
func Sin() PS {
U, V := Sincos()
V.Drop()
return U
}
// Cos returns the power series for cosine (in radians).
func Cos() PS {
U, V := Sincos()
U.Drop()
return V
}
// Sec returns the power series for secans (in radians)
// as `1/Cos`.
func Sec() PS {
U, V := Sincos()
U.Drop()
return V.Recip()
}
// CscX returns the power series for cosecans (in radians) * x
// as `1/(Sin*1/x)`.
func CscX() PS {
U, V := Sincos()
V.Drop()
U.Receive()
return U.Recip()
}
// Tan returns the power series for tangens (in radians)
// as `Sin/Cos`.
func Tan() PS {
U, V := Sincos()
return U.Times(V.Recip())
}
// CotX returns the power series for cotangens (in radians) * x
// as `Cos/(Sin*1/x)`.
func CotX() PS {
U, V := Sincos()
U.Receive()
return V.Times(U.Recip())
}
// =========================================================================== | series.go | 0.81457 | 0.640017 | series.go | starcoder |
package try
/*
Simplifies control flow by panicking on non-nil errors. Should be used in
conjunction with `Rec`.
If the error doesn't already have a stacktrace, adds one
via "github.com/pkg/errors". Stacktraces are essential for such exception-like
control flow. Without them, debugging would be incredibly tedious.
*/
func To(err error) {
if err != nil {
panic(WithStack(err))
}
}
// A "try" function that takes and returns a value of type `interface{} value.
func Interface(val interface{}, err error) interface{} {
To(err)
return val
}
// A "try" function that takes and returns a value of type `bool`.
func Bool(val bool, err error) bool {
To(err)
return val
}
// A "try" function that takes and returns a value of type `uint8`.
func Uint8(val uint8, err error) uint8 {
To(err)
return val
}
// A "try" function that takes and returns a value of type `uint16`.
func Uint16(val uint16, err error) uint16 {
To(err)
return val
}
// A "try" function that takes and returns a value of type `uint32`.
func Uint32(val uint32, err error) uint32 {
To(err)
return val
}
// A "try" function that takes and returns a value of type `uint64`.
func Uint64(val uint64, err error) uint64 {
To(err)
return val
}
// A "try" function that takes and returns a value of type `byte`.
func Byte(val byte, err error) byte {
To(err)
return val
}
// A "try" function that takes and returns a value of type `int8`.
func Int8(val int8, err error) int8 {
To(err)
return val
}
// A "try" function that takes and returns a value of type `int16`.
func Int16(val int16, err error) int16 {
To(err)
return val
}
// A "try" function that takes and returns a value of type `rune`.
func Rune(val rune, err error) rune {
To(err)
return val
}
// A "try" function that takes and returns a value of type `int32`.
func Int32(val int32, err error) int32 {
To(err)
return val
}
// A "try" function that takes and returns a value of type `int64`.
func Int64(val int64, err error) int64 {
To(err)
return val
}
// A "try" function that takes and returns a value of type `float32`.
func Float32(val float32, err error) float32 {
To(err)
return val
}
// A "try" function that takes and returns a value of type `float64`.
func Float64(val float64, err error) float64 {
To(err)
return val
}
// A "try" function that takes and returns a value of type `complex64`.
func Complex64(val complex64, err error) complex64 {
To(err)
return val
}
// A "try" function that takes and returns a value of type `complex128`.
func Complex128(val complex128, err error) complex128 {
To(err)
return val
}
// A "try" function that takes and returns a value of type `string`.
func String(val string, err error) string {
To(err)
return val
}
// A "try" function that takes and returns a value of type `int`.
func Int(val int, err error) int {
To(err)
return val
}
// A "try" function that takes and returns a value of type `uint`.
func Uint(val uint, err error) uint {
To(err)
return val
}
// A "try" function that takes and returns a value of type `uintptr`.
func Uintptr(val uintptr, err error) uintptr {
To(err)
return val
}
// A "try" function that takes and returns a value of type `[]interface{}`.
func InterfaceSlice(val []interface{}, err error) []interface{} {
To(err)
return val
}
// A "try" function that takes and returns a value of type `[]bool`.
func BoolSlice(val []bool, err error) []bool {
To(err)
return val
}
// A "try" function that takes and returns a value of type `[]uint8`.
func Uint8Slice(val []uint8, err error) []uint8 {
To(err)
return val
}
// A "try" function that takes and returns a value of type `[]uint16`.
func Uint16Slice(val []uint16, err error) []uint16 {
To(err)
return val
}
// A "try" function that takes and returns a value of type `[]uint32`.
func Uint32Slice(val []uint32, err error) []uint32 {
To(err)
return val
}
// A "try" function that takes and returns a value of type `[]uint64`.
func Uint64Slice(val []uint64, err error) []uint64 {
To(err)
return val
}
// A "try" function that takes and returns a value of type `[]byte`.
func ByteSlice(val []byte, err error) []byte {
To(err)
return val
}
// A "try" function that takes and returns a value of type `[]int8`.
func Int8Slice(val []int8, err error) []int8 {
To(err)
return val
}
// A "try" function that takes and returns a value of type `[]int16`.
func Int16Slice(val []int16, err error) []int16 {
To(err)
return val
}
// A "try" function that takes and returns a value of type `[]rune`.
func RuneSlice(val []rune, err error) []rune {
To(err)
return val
}
// A "try" function that takes and returns a value of type `[]int32`.
func Int32Slice(val []int32, err error) []int32 {
To(err)
return val
}
// A "try" function that takes and returns a value of type `[]int64`.
func Int64Slice(val []int64, err error) []int64 {
To(err)
return val
}
// A "try" function that takes and returns a value of type `[]float32`.
func Float32Slice(val []float32, err error) []float32 {
To(err)
return val
}
// A "try" function that takes and returns a value of type `[]float64`.
func Float64Slice(val []float64, err error) []float64 {
To(err)
return val
}
// A "try" function that takes and returns a value of type `[]complex64`.
func Complex64Slice(val []complex64, err error) []complex64 {
To(err)
return val
}
// A "try" function that takes and returns a value of type `[]complex128`.
func Complex128Slice(val []complex128, err error) []complex128 {
To(err)
return val
}
// A "try" function that takes and returns a value of type `[]string`.
func StringSlice(val []string, err error) []string {
To(err)
return val
}
// A "try" function that takes and returns a value of type `[]int`.
func IntSlice(val []int, err error) []int {
To(err)
return val
}
// A "try" function that takes and returns a value of type `[]uint`.
func UintSlice(val []uint, err error) []uint {
To(err)
return val
}
// A "try" function that takes and returns a value of type `[]uintptr`.
func UintptrSlice(val []uintptr, err error) []uintptr {
To(err)
return val
} | try_to.go | 0.717111 | 0.565959 | try_to.go | starcoder |
package numerology
import (
"strconv"
"time"
)
// Days of the week values for use in date searches. Values are similar to time.Weekday.
const (
Sunday = iota
Monday
Tuesday
Wednesday
Thursday
Friday
Saturday
)
// DateNumerology stores required information to calculate the numerological values
// of dates.
type DateNumerology struct {
// Date is either the date to use to calculate numerological values or the date
// used to start searches.
Date time.Time
// Pointers are used so that these options can be shared amongst a number of
// DateNumerology objects.
*DateOpts
*DateSearchOpts
}
// Event calculates the numerological number for a given date. Unlike LifePath calculations,
// Master Numbers are always reduced. This calculation is generally used for events like
// weddings or other special occasions.
func (d DateNumerology) Event() (event NumerologicalResult) {
return calculateDate(d.Date, d.MasterNumbers, false)
}
// LifePath calculates the numerological number for a given date, but stops reducing at
// Master Numbers. This calculation is generally used with one's date of birth.
func (d DateNumerology) LifePath() (lifePath NumerologicalResult) {
return calculateDate(d.Date, d.MasterNumbers, true)
}
// Search executes a forward looking search of dates to find ones that satisfy given
// numerological criteria. The argument opts contains the searching criteria. Offset in the
// output is the offset to be used to get the next batch of results using the same query. If
// offset is 0 then there are no more results.
func (d DateNumerology) Search(opts DateSearchOpts) (searchResults []DateNumerology, offset int64) {
return dateSearch(d.Date, d.MasterNumbers, &opts)
}
// NewDate is a wrapper to easily create a time.Time variable without entering hour, min, sec, nsec, loc
// since they are not important for any date calculation. Note: Golang time.Time always has a timezone.
// UTC is used, and the hour used is midday (12:00). This should help avoid situations where time.Time
// gets converted to a new time zone accidentally so the date isn't inadvertently changed.
func NewDate(year, month, day int) (newDate time.Time) {
return time.Date(year, time.Month(month), day, 12, 0, 0, 0, time.UTC)
}
// letterValuesFromNumbers is a simplified function to map the numbers from a date into a breakdown.
func letterValuesFromNumber(n int, masterNumbers []int) (letterValues []letterValue) {
// Do not treat Master Numbers as a string of numbers
if isMasterNumber(n, masterNumbers) {
return append(letterValues, letterValue{Letter: strconv.Itoa(n), Value: n})
}
// Split number into array of individual numbers. ex 2020 -> 2,0,2,0
ints := splitNumber(uint64(n))
for _, i := range ints {
letterValues = append(letterValues, letterValue{Letter: strconv.Itoa(i), Value: i})
}
return
}
// calculateDate outputs the numerological result of a date calculation.
func calculateDate(date time.Time, masterNumbers []int, lifePath bool) (result NumerologicalResult) {
var totalValue int
var breakdown []Breakdown
for _, val := range []int{date.Year(), int(date.Month()), date.Day()} {
reduceSteps := reduceNumbers(val, masterNumbers, []int{})
calc := Breakdown{
Value: reduceSteps[len(reduceSteps)-1],
ReduceSteps: reduceSteps,
LetterValues: letterValuesFromNumber(val, masterNumbers),
}
breakdown = append(breakdown, calc)
totalValue += calc.Value
}
var reduceSteps []int
if lifePath {
reduceSteps = reduceNumbers(totalValue, masterNumbers, []int{})
} else {
reduceSteps = reduceNumbers(totalValue, []int{}, []int{})
}
return NumerologicalResult{
Value: reduceSteps[len(reduceSteps)-1],
ReduceSteps: reduceSteps,
Breakdown: breakdown,
}
}
// Date returns a DateNumerology object that can be used to calculate
// numerological values or search for dates with numerological significance.
func Date(date time.Time, masterNumbers []int) (result DateNumerology) {
return DateNumerology{Date: date, DateOpts: &DateOpts{MasterNumbers: masterNumbers}}
}
// Dates returns a slice of DateNumerology objects that can be used to
// calculate numerological values or search for dates with numerological significance.
func Dates(dates []time.Time, masterNumbers []int) (results []DateNumerology) {
opts := DateOpts{MasterNumbers: masterNumbers}
results = []DateNumerology{}
for _, date := range dates {
results = append(results, DateNumerology{Date: date, DateOpts: &opts})
}
return results
}
// dateSearch is the function that does the actual date searching.
func dateSearch(startDate time.Time, masterNumbers []int, opts *DateSearchOpts) (searchResults []DateNumerology, offset int64) {
endDate := startDate.AddDate(0, opts.MonthsForward, 0)
for d := startDate.Add(time.Duration(24*opts.Offset) * time.Hour); d.Before(endDate); d = d.AddDate(0, 0, 1) {
if len(opts.Dow) > 0 && !inIntSlice(int(d.Weekday()), opts.Dow) {
continue
}
calc := calculateDate(d, masterNumbers, opts.LifePath)
if inIntSlice(calc.Value, opts.Match) || len(opts.Match) == 0 {
if len(searchResults) == opts.Count {
offset = int64(d.Sub(startDate).Hours() / 24)
break
}
searchResults = append(searchResults, DateNumerology{
Date: d,
DateOpts: &DateOpts{masterNumbers},
DateSearchOpts: opts,
})
}
}
return searchResults, offset
} | numerology/calculateDates.go | 0.76869 | 0.599544 | calculateDates.go | starcoder |
package iso20022
// Specifies rates.
type CorporateActionRate46 struct {
// Cash dividend amount per equity before deductions or allowances have been made.
GrossDividendRate []*GrossDividendRateFormat10Choice `xml:"GrssDvddRate,omitempty"`
// Cash dividend amount per equity after deductions or allowances have been made.
NetDividendRate []*NetDividendRateFormat12Choice `xml:"NetDvddRate,omitempty"`
// Public index rate applied to the amount paid to adjust it to inflation.
IndexFactor *RateAndAmountFormat5Choice `xml:"IndxFctr,omitempty"`
// Actual interest rate used for the payment of the interest for the specified interest period.
InterestRateUsedForPayment []*InterestRateUsedForPaymentFormat2Choice `xml:"IntrstRateUsdForPmt,omitempty"`
// A maximum percentage of shares available through the over subscription privilege, usually a percentage of the basic subscription shares, for example, an account owner subscribing to 100 shares may over subscribe to a maximum of 50 additional shares when the over subscription maximum is 50 percent.
MaximumAllowedOversubscriptionRate *PercentageRate `xml:"MaxAllwdOvrsbcptRate,omitempty"`
// Proportionate allocation used for the offer.
ProrationRate *PercentageRate `xml:"PrratnRate,omitempty"`
// Percentage of the gross dividend rate on which tax must be paid .
TaxRelatedRate []*RateTypeAndAmountAndStatus6 `xml:"TaxRltdRate,omitempty"`
// Percentage of a cash distribution that will be withheld by a tax authority.
WithholdingTaxRate []*RateFormat11Choice `xml:"WhldgTaxRate,omitempty"`
// Rate used for additional tax that cannot be categorised.
AdditionalTax *RateAndAmountFormat5Choice `xml:"AddtlTax,omitempty"`
// Rate at which the income will be withheld by the jurisdiction to which the income was originally paid, for which relief at source and/or reclaim may be possible.
WithholdingOfForeignTax []*RateAndAmountFormat21Choice `xml:"WhldgOfFrgnTax,omitempty"`
// Amount included in the dividend/NAV that is identified as gains directly or indirectly derived from interest payments, for example, in the context of the EU Savings directive.
TaxableIncomePerDividendShare []*RateTypeAndAmountAndStatus11 `xml:"TaxblIncmPerDvddShr,omitempty"`
}
func (c *CorporateActionRate46) AddGrossDividendRate() *GrossDividendRateFormat10Choice {
newValue := new(GrossDividendRateFormat10Choice)
c.GrossDividendRate = append(c.GrossDividendRate, newValue)
return newValue
}
func (c *CorporateActionRate46) AddNetDividendRate() *NetDividendRateFormat12Choice {
newValue := new(NetDividendRateFormat12Choice)
c.NetDividendRate = append(c.NetDividendRate, newValue)
return newValue
}
func (c *CorporateActionRate46) AddIndexFactor() *RateAndAmountFormat5Choice {
c.IndexFactor = new(RateAndAmountFormat5Choice)
return c.IndexFactor
}
func (c *CorporateActionRate46) AddInterestRateUsedForPayment() *InterestRateUsedForPaymentFormat2Choice {
newValue := new(InterestRateUsedForPaymentFormat2Choice)
c.InterestRateUsedForPayment = append(c.InterestRateUsedForPayment, newValue)
return newValue
}
func (c *CorporateActionRate46) SetMaximumAllowedOversubscriptionRate(value string) {
c.MaximumAllowedOversubscriptionRate = (*PercentageRate)(&value)
}
func (c *CorporateActionRate46) SetProrationRate(value string) {
c.ProrationRate = (*PercentageRate)(&value)
}
func (c *CorporateActionRate46) AddTaxRelatedRate() *RateTypeAndAmountAndStatus6 {
newValue := new(RateTypeAndAmountAndStatus6)
c.TaxRelatedRate = append(c.TaxRelatedRate, newValue)
return newValue
}
func (c *CorporateActionRate46) AddWithholdingTaxRate() *RateFormat11Choice {
newValue := new(RateFormat11Choice)
c.WithholdingTaxRate = append(c.WithholdingTaxRate, newValue)
return newValue
}
func (c *CorporateActionRate46) AddAdditionalTax() *RateAndAmountFormat5Choice {
c.AdditionalTax = new(RateAndAmountFormat5Choice)
return c.AdditionalTax
}
func (c *CorporateActionRate46) AddWithholdingOfForeignTax() *RateAndAmountFormat21Choice {
newValue := new(RateAndAmountFormat21Choice)
c.WithholdingOfForeignTax = append(c.WithholdingOfForeignTax, newValue)
return newValue
}
func (c *CorporateActionRate46) AddTaxableIncomePerDividendShare() *RateTypeAndAmountAndStatus11 {
newValue := new(RateTypeAndAmountAndStatus11)
c.TaxableIncomePerDividendShare = append(c.TaxableIncomePerDividendShare, newValue)
return newValue
} | CorporateActionRate46.go | 0.842086 | 0.608158 | CorporateActionRate46.go | starcoder |
package engine
import (
"fmt"
. "github.com/tsatke/lua/internal/engine/value"
)
func (e *Engine) cmpEqual(left, right Value) ([]Value, error) {
if left.Type() == TypeTable && right.Type() == TypeTable {
if left == right {
// primitive equal check
return values(True), nil
}
// use metamethod if available
results, ok, err := e.binaryMetaMethodOperation("__eq", left, right)
if !ok {
if err != nil {
return nil, err
}
} else {
result := results[0]
if e.valueIsLogicallyTrue(result) {
return values(True), nil
}
return values(False), nil
}
}
if left.Type() != right.Type() {
return values(False), nil
}
switch left.Type() {
case TypeNil:
return values(Boolean(left == right)), nil
case TypeNumber:
leftNum := left.(Number).Value()
rightNum := right.(Number).Value()
return values(Boolean(leftNum == rightNum)), nil
case TypeString:
leftStr := left.(String).String()
rightStr := right.(String).String()
return values(Boolean(leftStr == rightStr)), nil
case TypeBoolean:
leftBool := left.(Boolean)
rightBool := right.(Boolean)
return values(Boolean(leftBool == rightBool)), nil
}
return nil, fmt.Errorf("cannot compare %s", left.Type())
}
func (e *Engine) less(left, right Value) (bool, error) {
if left.Type() != right.Type() {
return false, nil
}
switch left.Type() {
case TypeNumber:
leftVal, rightVal := left.(Number).Value(), right.(Number).Value()
return leftVal < rightVal, nil
case TypeString:
leftVal, rightVal := left.(String).String(), right.(String).String()
return leftVal < rightVal, nil
}
return false, fmt.Errorf("%s is not comparable", left.Type())
}
func (e *Engine) lessEqual(left, right Value) (bool, error) {
if left.Type() != right.Type() {
return false, nil
}
switch left.Type() {
case TypeNumber:
leftVal, rightVal := left.(Number).Value(), right.(Number).Value()
return leftVal <= rightVal, nil
case TypeString:
leftVal, rightVal := left.(String).String(), right.(String).String()
return leftVal <= rightVal, nil
}
return false, fmt.Errorf("%s is not comparable", left.Type())
}
func (e *Engine) equal(left, right Value) (bool, error) {
if left.Type() != right.Type() {
return false, nil
}
switch left.Type() {
case TypeNumber,
TypeString,
TypeBoolean:
return left == right, nil
}
return false, fmt.Errorf("%s can not be checked for equality", left.Type())
} | internal/engine/compare.go | 0.669853 | 0.543954 | compare.go | starcoder |
package pso
import (
"log"
"math"
"math/rand"
G "gorgonia.org/gorgonia"
T "gorgonia.org/tensor"
)
//Data x
type Data struct {
Input []float64
Output []float64
}
type particle struct {
feats int
n *nn
velocities []float64
bestErrorLoss float64
bestLocalPositions []float64
}
func newParticle(feats, hiddenSize, outputs, bucketSize int) *particle {
n := newNN(feats, hiddenSize, bucketSize)
var lossVal, predVal G.Value
G.Read(n.pred, &predVal) // read the predicted value out into something that can be accessed by Go
G.Read(n.loss, &lossVal) // read the loss value out into something that can be accessed by Go
p := particle{
feats: feats,
n: n,
bestErrorLoss: math.MaxFloat64,
}
p.bestLocalPositions = p.n.exportPositions()
p.velocities = make([]float64, len(p.bestLocalPositions))
randomizeUniform(p.velocities, randRange)
return &p
}
func (p *particle) Train(trainingData []Data, iterations int) {
n := p.n
trainingDataSize := len(trainingData)
xSize := len(trainingData[0].Input)
ySize := len(trainingData[0].Output)
Xs := make([]float64, trainingDataSize*xSize)
Ys := make([]float64, trainingDataSize*ySize)
for i, d := range trainingData {
for j, x := range d.Input {
Xs[i*xSize+j] = x
}
for j, y := range d.Output {
Ys[i*ySize+j] = y
}
}
// training
yVal := T.New(T.WithShape(trainingDataSize), T.WithBacking(Ys)) // make Ys into a Tensor. No additional allocations
xVal := T.New(T.WithShape(trainingDataSize, p.feats), T.WithBacking(Xs)) // make Xs into a Tensor. No additional allocations are made.
G.Let(n.x, xVal)
G.Let(n.y, yVal)
m := G.NewTapeMachine(n.g)
for i := 0; i < iterations; i++ {
currentPositions := n.exportPositions()
revisedPositions := make([]float64, len(currentPositions)) //just for debugging I'm not overriding the currentPosition slice
rowCount := len(trainingData)
xTestVal := T.New(T.WithShape(rowCount, p.feats), T.WithBacking(T.Random(T.Float64, rowCount*p.feats)))
if err := n.fwd(m, xTestVal); err != nil {
log.Fatal(err)
}
m.Reset()
lossf64 := n.loss.Value().Data().(float64)
if lossf64 < p.bestErrorLoss {
copy(p.bestLocalPositions, currentPositions)
p.bestErrorLoss = lossf64
log.Printf("<%d> New best local error found: %f", i, lossf64)
}
//update positions+1
for i, currentVelocity := range p.velocities {
currentPosition := currentPositions[i]
bestLocalPosition := p.bestLocalPositions[i]
oldVelocityFactor := inertiaWeight * currentVelocity
localRandomness := rand.Float64()
bestLocationDelta := bestLocalPosition - currentPosition
localPositionFactor := cognitiveWeight * localRandomness * bestLocationDelta
revisedVelocity := oldVelocityFactor + localPositionFactor
p.velocities[i] = revisedVelocity
revisedPositions[i] = math.Max(-randRange, math.Min(currentPosition+revisedVelocity, randRange))
}
n.importPositions(revisedPositions)
dieThreshold := maxDeathProbability * (1 - float64(i)/float64(iterations))
dieF := rand.Float64()
shouldDie := dieF < dieThreshold
if shouldDie {
log.Printf("<%d> Death %f:%f", i, dieF, dieThreshold)
randomizeUniform(revisedPositions, randRange)
randomizeUniform(p.velocities, 1)
n.importPositions(revisedPositions)
}
}
log.Printf("Final error: %f", p.bestErrorLoss)
}
func (p *particle) Predict(inputs ...float64) []float64 {
xTestVal := T.New(T.WithShape(1, p.feats), T.WithBacking(T.Random(T.Float64, len(inputs))))
G.Let(p.n.x, xTestVal)
m := G.NewTapeMachine(p.n.g)
if err := p.n.fwd(m, xTestVal); err != nil {
log.Fatal(err)
}
m.Reset()
output := p.n.last.Value().Data().(float64)
return []float64{output}
} | pso/particle.go | 0.553988 | 0.487063 | particle.go | starcoder |
package set
// Of[T] a set of elements of type T.
// The zero value of Of is not safe for use.
// Create one with New instead.
type Of[T comparable] map[T]struct{}
// New produces a new set containing the given values.
func New[T comparable](vals ...T) Of[T] {
s := Of[T](make(map[T]struct{}))
for _, val := range vals {
s.Add(val)
}
return Of[T](s)
}
// Add adds the given values to the set.
// Items already present in the set are silently ignored.
func (s Of[T]) Add(vals ...T) {
for _, val := range vals {
s[val] = struct{}{}
}
}
// Has tells whether the given value is in the set.
func (s Of[T]) Has(val T) bool {
_, ok := s[val]
return ok
}
// Del removes the given items from the set.
// Items already absent from the set are silently ignored.
func (s Of[T]) Del(vals ...T) {
for _, val := range vals {
delete(s, val)
}
}
// Len tells the number of distinct values in the set.
func (s Of[T]) Len() int {
return len(s)
}
// Each calls a function on each element of the set in an indeterminate order.
// It is safe to add and remove items during a call to Each,
// but that can affect the sequence of values seen later during the same Each call.
func (s Of[T]) Each(f func(T) error) error {
for val := range s {
err := f(val)
if err != nil {
return err
}
}
return nil
}
// Intersect produces a new set containing only items that appear in all the given sets.
func Intersect[T comparable](sets ...Of[T]) Of[T] {
s := New[T]()
if len(sets) == 0 {
return s
}
sets[0].Each(func(val T) error {
for _, other := range sets[1:] {
if !other.Has(val) {
return nil
}
}
s.Add(val)
return nil
})
return s
}
// Union produces a new set containing all the items in all the given sets.
func Union[T comparable](sets ...Of[T]) Of[T] {
result := New[T]()
for _, s := range sets {
s.Each(func(val T) error {
result.Add(val)
return nil
})
}
return result
}
// Diff produces a new set containing the items in s1 that are not also in s2.
func Diff[T comparable](s1, s2 Of[T]) Of[T] {
s := New[T]()
s1.Each(func(val T) error {
if !s2.Has(val) {
s.Add(val)
}
return nil
})
return s
} | set.go | 0.73307 | 0.65062 | set.go | starcoder |
package dfl
import (
"fmt"
"reflect"
"strings"
"github.com/pkg/errors"
"github.com/spatialcurrent/go-reader-writer/pkg/io"
"github.com/spatialcurrent/go-try-get/pkg/gtg"
)
func parseExtractPath(path string) (int, int, int, int, error) {
index_questionmark := -1
index_period := -1
index_start := -1
index_end := -1
for i, c := range path {
if c == '?' {
index_questionmark = i
if i+1 < len(path) && path[i+1] == '.' {
index_period = i + 1
break
} else {
return 0, 0, 0, 0, errors.New("Invalid path " + path)
}
} else if c == '.' {
index_period = i
break
} else if c == '[' {
index_start = i
leftsquarebrackets := 1
rightsquarebrackets := 0
for j, c := range path[i+1 : len(path)] {
if c == '[' {
leftsquarebrackets += 1
} else if c == ']' {
rightsquarebrackets += 1
if leftsquarebrackets == rightsquarebrackets {
index_end = j
break
}
}
}
break
}
}
return index_questionmark, index_period, index_start, index_end, nil
}
func checkStartIndex(obj interface{}, start int) error {
t := reflect.TypeOf(obj)
if t.Kind() == reflect.Array || t.Kind() == reflect.Slice || t.Kind() == reflect.String {
s := reflect.ValueOf(obj)
if start >= s.Len() {
return errors.New("start index " + fmt.Sprint(start) + " is greater than or equal to the length of the object " + fmt.Sprint(s.Len()))
}
}
return nil
}
func checkStartAndEndIndex(obj interface{}, start int, end int) error {
t := reflect.TypeOf(obj)
if t.Kind() == reflect.Array || t.Kind() == reflect.Slice || t.Kind() == reflect.String {
s := reflect.ValueOf(obj)
if end >= s.Len() {
return errors.New("end index " + fmt.Sprint(end) + " greater than or equal to the length of the object " + fmt.Sprint(s.Len()))
} else if start > end {
return errors.New("start index " + fmt.Sprint(start) + " is greater than end index " + fmt.Sprint(end) + " for object")
}
}
return nil
}
// Extract is a function to extract a value from an object.
// Extract supports a standard dot (.) and null-safe (?.) indexing.
// Extract also supports wildcard indexing using *.
// Extract also support array indexing, including [A], [A:B], [A:], and [:B].
func Extract(path string, obj interface{}, vars map[string]interface{}, ctx interface{}, funcs FunctionMap, quotes []string) (interface{}, error) {
index_questionmark, index_period, slice_start_index, slice_end_index, err := parseExtractPath(path)
if err != nil {
return Null{}, errors.Wrap(err, "error parsing extract path")
}
if index_period != -1 {
if index_questionmark != -1 {
key := path[0:index_questionmark]
remainder := path[index_period+1 : len(path)]
t := reflect.TypeOf(obj)
if t.Kind() == reflect.Map {
m := reflect.ValueOf(obj)
if m.Len() == 0 {
return Null{}, nil
}
if key == "*" {
values := reflect.MakeSlice(reflect.SliceOf(t.Elem()), 0, 0)
for _, k := range m.MapKeys() {
v, err := Extract(remainder, m.MapIndex(k).Interface(), vars, ctx, funcs, quotes)
if err != nil {
return v, err
}
values = reflect.Append(values, reflect.ValueOf(v))
}
return values.Interface(), nil
}
value := m.MapIndex(reflect.ValueOf(key))
if !value.IsValid() {
return Null{}, nil
}
return Extract(remainder, value.Interface(), vars, ctx, funcs, quotes)
}
switch o := obj.(type) {
case *Context:
if o.Has(key) {
value := o.Get(key)
if value == nil {
return Null{}, nil
}
return Extract(remainder, value, vars, ctx, funcs, quotes)
}
return Null{}, nil
}
return Null{}, errors.New("object is invalid type " + reflect.TypeOf(obj).String())
} else {
key := path[0:index_period]
remainder := path[index_period+1 : len(path)]
t := reflect.TypeOf(obj)
if t.Kind() == reflect.Map {
m := reflect.ValueOf(obj)
if m.Len() == 0 {
return Null{}, errors.New("value " + key + " is null.")
}
if key == "*" {
values := reflect.MakeSlice(reflect.SliceOf(t.Elem()), 0, 0)
for _, k := range m.MapKeys() {
v, err := Extract(remainder, m.MapIndex(k).Interface(), vars, ctx, funcs, quotes)
if err != nil {
return v, err
}
values = reflect.Append(values, reflect.ValueOf(v))
}
return values.Interface(), nil
}
value := m.MapIndex(reflect.ValueOf(key))
if !value.IsValid() {
return Null{}, errors.New("value " + key + " is null.")
}
return Extract(remainder, value.Interface(), vars, ctx, funcs, quotes)
}
switch o := obj.(type) {
case *Context:
if key == "*" {
if o.Len() == 0 {
return Null{}, errors.New("value " + key + " is null.")
}
values := o.Values()
results := make([]interface{}, 0, len(values))
for _, v := range values {
r, err := Extract(remainder, v, vars, ctx, funcs, quotes)
if err != nil {
return r, err
}
results = append(results, r)
}
return results, nil
} else if o.Has(key) {
value := o.Get(key)
if value == nil {
return Null{}, errors.New("value " + key + " is null.")
}
return Extract(remainder, value, vars, ctx, funcs, quotes)
}
return Null{}, errors.New("value " + key + " is null.")
}
return Null{}, errors.New("object is invalid type " + reflect.TypeOf(obj).String())
}
} else if slice_start_index != -1 && slice_end_index != -1 {
if slice_start_index == 0 {
remainder := path[slice_end_index+2:]
pair := strings.Split(path[1:slice_end_index+1], ":")
if len(pair) == 2 {
start := 0
if len(pair[0]) > 0 {
_, s, err := ParseCompileEvaluateInt(pair[0], vars, ctx, funcs, quotes)
if err != nil {
return Null{}, errors.New("slice start \"" + pair[0] + "\" is invalid")
}
err = checkStartIndex(obj, s)
if err != nil {
return Null{}, err
}
start = s
}
if len(pair[1]) > 0 {
_, end, err := ParseCompileEvaluateInt(pair[1], vars, ctx, funcs, quotes)
if err != nil {
return Null{}, errors.New("slice end \"" + pair[1] + "\" is invalid")
}
err = checkStartAndEndIndex(obj, start, end)
if err != nil {
return Null{}, err
}
t := reflect.TypeOf(obj)
if t.Kind() == reflect.Slice || t.Kind() == reflect.String {
v := reflect.ValueOf(obj).Slice(start, end)
if len(remainder) > 0 {
return Extract(remainder, v.Interface(), vars, ctx, funcs, quotes)
}
return v.Interface(), nil
}
if o, ok := obj.(io.ByteReadCloser); ok {
return o.ReadRange(start, end-1)
}
} else {
t := reflect.TypeOf(obj)
if t.Kind() == reflect.Slice || t.Kind() == reflect.String {
s := reflect.ValueOf(obj)
if start >= s.Len() {
return Null{}, errors.New("slice start index " + fmt.Sprint(start) + " greater than or equal to the length of the slice " + fmt.Sprint(s.Len()))
}
return s.Slice(start, s.Len()).Interface(), nil
}
if _, ok := obj.(io.ByteReadCloser); ok {
return make([]byte, 0), errors.New("Reader cannot evaluate [start:]")
}
}
return Null{}, errors.New("object \"" + fmt.Sprint(obj) + "\" (" + reflect.TypeOf(obj).String() + ") is not a slice.")
} else if len(pair) == 1 {
_, i, err := ParseCompileEvaluate(pair[0], vars, ctx, funcs, quotes)
if err != nil {
return Null{}, errors.Wrap(err, "slice index \""+pair[0]+"\" is invalid ")
}
t := reflect.TypeOf(obj)
if t.Kind() == reflect.Array || t.Kind() == reflect.Slice || t.Kind() == reflect.String {
slice_index := 0
switch i.(type) {
case int:
slice_index = i.(int)
default:
return Null{}, errors.New("slice index \"" + pair[0] + "\" is invalid type " + fmt.Sprint(reflect.TypeOf(i)))
}
s := reflect.ValueOf(obj)
if slice_index >= s.Len() {
return Null{}, errors.New("slice index " + fmt.Sprint(slice_index) + " greater than or equal to the length of the slice " + fmt.Sprint(s.Len()))
}
if len(remainder) > 0 {
return Extract(remainder, s.Index(slice_index).Interface(), vars, ctx, funcs, quotes)
}
return s.Index(slice_index).Interface(), nil
} else if t.Kind() == reflect.Map {
m := reflect.ValueOf(obj)
if len(remainder) > 0 {
if t.Key().Kind() == reflect.String {
return Extract(remainder, m.MapIndex(reflect.ValueOf(fmt.Sprint(i))).Interface(), vars, ctx, funcs, quotes)
} else {
return Extract(remainder, m.MapIndex(reflect.ValueOf(i)).Interface(), vars, ctx, funcs, quotes)
}
} else {
if t.Key().Kind() == reflect.String {
return m.MapIndex(reflect.ValueOf(fmt.Sprint(i))).Interface(), nil
} else {
return m.MapIndex(reflect.ValueOf(i)).Interface(), nil
}
}
}
if o, ok := obj.(io.ByteReadCloser); ok {
slice_index := 0
switch i.(type) {
case int:
slice_index = i.(int)
default:
return Null{}, errors.New("slice index \"" + pair[0] + "\" is invalid type " + fmt.Sprint(reflect.TypeOf(i)))
}
values, err := o.ReadRange(slice_index, slice_index)
if err != nil {
return make([]byte, 0), err
}
return values[0], nil
}
return Null{}, errors.New("object \"" + fmt.Sprint(obj) + "\" (" + reflect.TypeOf(obj).String() + ") is not a slice.")
}
return Null{}, errors.New("slice range \"" + (path[1:slice_end_index]) + "\" is invalid ")
} else {
key := path[0:slice_start_index]
remainder := path[slice_start_index:len(path)]
t := reflect.TypeOf(obj)
if t.Kind() == reflect.Map {
m := reflect.ValueOf(obj)
v := m.MapIndex(reflect.ValueOf(key))
if !v.IsValid() {
return Null{}, errors.New("value " + key + " is not present.")
}
if v.IsNil() {
return Null{}, errors.New("value " + key + " is nil.")
}
return Extract(remainder, v.Interface(), vars, ctx, funcs, quotes)
}
switch o := obj.(type) {
case *Context:
if o.Has(key) {
value := o.Get(key)
if value == nil {
return Null{}, errors.New("value " + key + " is null.")
}
return Extract(remainder, value, vars, ctx, funcs, quotes)
}
return Null{}, errors.New("value " + key + " is null.")
}
return Null{}, errors.New("object is invalid type " + reflect.TypeOf(obj).String())
}
}
if obj == nil {
return Null{}, nil
}
t := reflect.TypeOf(obj)
if t.Kind() == reflect.Map {
m := reflect.ValueOf(obj)
if m.Len() == 0 {
return Null{}, nil
}
if path == "*" {
values := reflect.MakeSlice(reflect.SliceOf(t.Elem()), 0, 0)
for _, k := range m.MapKeys() {
values = reflect.Append(values, m.MapIndex(k))
}
return values.Interface(), nil
}
value := gtg.TryGet(obj, path, nil)
if value == nil {
return Null{}, nil
}
return value, nil
} else if t.Kind() == reflect.Struct {
if path == "*" {
return Null{}, errors.New("object is invalid type " + reflect.TypeOf(obj).String())
}
value := gtg.TryGet(obj, path, nil)
if value == nil {
return Null{}, nil
}
return value, nil
}
switch o := obj.(type) {
case *Context:
if o.Has(path) {
value := o.Get(path)
if value == nil {
return Null{}, nil
}
return value, nil
}
return Null{}, nil
}
return Null{}, errors.New("object is invalid type " + reflect.TypeOf(obj).String())
} | pkg/dfl/Extract.go | 0.535584 | 0.416678 | Extract.go | starcoder |
package gotween
import (
"errors"
"math"
)
var version string = "0.0.1"
// EasingFunc defines a common interface that most of the easing functions
// conform to. The few that don't can be easily wrapped as required so that
// they match the interface.
type EasingFunc func(float64) (float64, error)
// GetPointOnLine returns the (x, y) coordinates of the point that has
// progressed a proportion n along the line defined by a pair of coordinates.
func GetPointOnLine(x1, y1, x2, y2, n float64) (float64, float64) {
x := ((x2 - x1) * n) + x1
y := ((y2 - y1) * n) + y1
return x, y
}
// Linear is a linear tween function
func Linear(n float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
return n, nil
}
// EaseInQuad is a quadratic tween function that begins slow and then
// accelerates.
func EaseInQuad(n float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
return math.Pow(n, 2.0), nil
}
// EaseOutQuad is a quadratic tween function that begins fast and then
// decelerates.
func EaseOutQuad(n float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
return -n * (n - 2), nil
}
// EaseInOutQuad is a quadratic tween function that accelerates, reaches the
// midpoint, and then decelerates.
func EaseInOutQuad(n float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
if n < 0.5 {
return 2 * math.Pow(n, 2.0), nil
} else {
n = n*2 - 1
return -0.5 * (n*(n-2) - 1), nil
}
}
// EaseInCubic is a cubic tween function that begins slow and then accelerates.
func EaseInCubic(n float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
return math.Pow(n, 3.0), nil
}
// EaseOutCubic is a cubic tween function that begins fast and then
// decelerates.
func EaseOutCubic(n float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
n = n - 1
return math.Pow(n, 3.0) + 1, nil
}
// EaseInOutCubic is a cubic tween function that accelerates, reaches the
// midpoint, and then decelerates.
func EaseInOutCubic(n float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
n = 2 * n
if n < 1 {
return 0.5 * math.Pow(n, 3.0), nil
} else {
n = n - 2
return 0.5 * (math.Pow(n, 3.0) + 2), nil
}
}
// EaseInQuart is a quartic tween function that begins slow and then
// accelerates.
func EaseInQuart(n float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
return math.Pow(n, 4.0), nil
}
// EaseOutQuart is a quartic tween function that begins fast and then
// decelerates.
func EaseOutQuart(n float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
n = n - 1
return -(math.Pow(n, 4.0) - 1), nil
}
// EaseInOutQuart is a quartic tween function that accelerates, reaches the
// midpoint, and then decelerates.
func EaseInOutQuart(n float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
n = 2 * n
if n < 1 {
return 0.5 * math.Pow(n, 4.0), nil
} else {
n = n - 2
return -0.5 * (math.Pow(n, 4.0) - 2), nil
}
}
// EaseInQuint is a quintic tween function that begins slow and then
// accelerates.
func EaseInQuint(n float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
return math.Pow(n, 5.0), nil
}
// EaseOutQuint is a quintic tween function that begins fast and then
// decelerates.
func EaseOutQuint(n float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
n = n - 1
return math.Pow(n, 5.0) + 1, nil
}
// EaseInOutQuint is a quintic tween function that accelerates, reaches the
// midpoint, and then decelerates.
func EaseInOutQuint(n float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
n = 2 * n
if n < 1 {
return 0.5 * math.Pow(n, 5.0), nil
} else {
n = n - 2
return 0.5 * (math.Pow(n, 5.0) + 2), nil
}
}
// EaseInSine is a sinusoidal tween function that begins slow and then
// accelerates.
func EaseInSine(n float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
return -1*math.Cos(n*math.Pi/2) + 1, nil
}
// EaseOutSine is a sinusoidal tween function that begins fast and then
// decelerates.
func EaseOutSine(n float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
return math.Sin(n * math.Pi / 2), nil
}
// EaseInOutSine is a sinusoidal tween function that accelerates, reaches the
// midpoint, and then decelerates.
func EaseInOutSine(n float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
return -0.5 * (math.Cos(math.Pi*n) - 1), nil
}
// EaseInExpo is an exponential tween function that begins slow and then
// accelerates.
func EaseInExpo(n float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
if n == 0 {
return 0, nil
} else {
return math.Pow(2.0, (10 * (n - 1))), nil
}
}
// EaseOutExpo is an exponential tween function that begins fast and then
// decelerates.
func EaseOutExpo(n float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
if n == 1 {
return 1, nil
} else {
return -(math.Pow(2.0, (-10 * n))) + 1, nil
}
}
// EaseInOutExpo is an exponential tween function that accelerates, reaches the
// midpoint, and then decelerates.
func EaseInOutExpo(n float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
if n == 0 {
return 0, nil
} else if n == 1 {
return 1, nil
} else {
n = n * 2
if n < 1 {
return 0.5 * math.Pow(2.0, (10*(n-1))), nil
} else {
n -= 1
return 0.5 * (-1*(math.Pow(2.0, (-10*n))) + 2), nil
}
}
}
// EaseInCirc is a circular tween function that begins slow and then
// accelerates.
func EaseInCirc(n float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
return -1 * (math.Sqrt(1-n*n) - 1), nil
}
// EaseOutCirc is a circular tween function that begins fast and then
// decelerates.
func EaseOutCirc(n float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
n -= 1
return math.Sqrt(1 - (n * n)), nil
}
// EaseInOutCirc is a circular tween function that accelerates, reaches the
// midpoint, and then decelerates.
func EaseInOutCirc(n float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
n = n * 2
if n < 1 {
return -0.5 * (math.Sqrt(1-math.Pow(n, 2.0)) - 1), nil
} else {
n = n - 2
return 0.5 * (math.Sqrt(1-math.Pow(n, 2.0)) + 1), nil
}
}
// EaseInElastic is an elastic tween function that begins with an increasing
// wobble and then snaps into the destination.
func EaseInElastic(n, amplitude, period float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
if period == 0.0 {
period = 0.3
}
if amplitude == 0.0 {
amplitude = 1
}
var s float64
if amplitude < 1 {
amplitude = 1
s = period / 4
} else {
s = period / (2 * math.Pi) * math.Asin(1/amplitude)
}
n -= 1
return -1 * (amplitude * math.Pow(2.0, (10*n)) * math.Sin((n-s)*(2*math.Pi)/period)), nil
}
// EaseOutElastic is an elastic tween function that overshoots the destination
// and then "rubber bands" into the destination.
func EaseOutElastic(n, amplitude, period float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
if period == 0.0 {
period = 0.3
}
if amplitude == 0.0 {
amplitude = 1
}
var s float64
if amplitude < 1 {
amplitude = 1
s = period / 4
} else {
s = period / (2 * math.Pi) * math.Asin(1/amplitude)
}
return amplitude*math.Pow(2.0, (-10*n))*math.Sin((n-s)*(2*math.Pi/period)) + 1, nil
}
// EaseInOutElastic is an elastic tween function wobbles towards the midpoint.
func EaseInOutElastic(n, amplitude, period float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
if period == 0.0 {
period = 0.5
}
if amplitude == 0.0 {
amplitude = 1
}
var s float64
if amplitude < 1 {
amplitude = 1
s = period / 4
} else {
s = period / (2 * math.Pi) * math.Asin(1/amplitude)
}
n *= 2
if n < 1 {
n = n - 1
return -0.5 * (amplitude * math.Pow(2, (10*n)) * math.Sin((n-s)*2*math.Pi/period)), nil
} else {
n = n - 1
return amplitude*math.Pow(2, (-10*n))*math.Sin((n-s)*2*math.Pi/period)*0.5 + 1, nil
}
}
// EaseInBack is a tween function that backs up first at the start and then
// goes to the destination.
func EaseInBack(n, s float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
if s == 0 {
s = 1.70158
}
return n * n * ((s+1)*n - s), nil
}
// EaseOutBack is a tween function that overshoots the destination a little and
// then backs into the destination.
func EaseOutBack(n, s float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
if s == 0 {
s = 1.70158
}
n = n - 1
return n*n*((s+1)*n+s) + 1, nil
}
// EaseInOutBounce is a "back-in" tween function that overshoots both the start
// and destination.
func EaseInOutBack(n, s float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
if s == 0 {
s = 1.70158
}
n = n * 2
if n < 1 {
s *= 1.525
return 0.5 * (n * n * ((s+1)*n - s)), nil
} else {
n -= 2
s *= 1.525
return 0.5 * (n*n*((s+1)*n+s) + 2), nil
}
}
// EaseInBounce is a bouncing tween function that begins bouncing and then
// jumps to the destination.
func EaseInBounce(n float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
eob, err := EaseOutBounce(1 - n)
if err != nil {
return 0.0, err
}
return 1 - eob, nil
}
// EaseOutBounce is a bouncing tween function that hits the destination and
// then bounces to rest.
func EaseOutBounce(n float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
if n < (1 / 2.75) {
return 7.5625 * n * n, nil
} else if n < (2 / 2.75) {
n -= (1.5 / 2.75)
return 7.5625*n*n + 0.75, nil
} else if n < (2.5 / 2.75) {
n -= (2.25 / 2.75)
return 7.5625*n*n + 0.9375, nil
} else {
n -= (2.65 / 2.75)
return 7.5625*n*n + 0.984375, nil
}
}
// EaseInOutBounce is a bouncing tween function that bounces at the start and
// end.
func EaseInOutBounce(n float64) (float64, error) {
if n < 0.0 || n > 1.0 {
return 0.0, errors.New("`n` must be between 0.0 and 1.0.")
}
if n < 0.5 {
eob, err := EaseOutBounce(n * 2)
if err != nil {
return 0.0, err
}
return eob * 0.5, nil
} else {
eob, err := EaseOutBounce(n*2 - 1)
if err != nil {
return 0.0, err
}
return eob*0.5 + 0.5, nil
}
} | gotween.go | 0.81571 | 0.603143 | gotween.go | starcoder |
package pinapi
import (
"encoding/json"
)
// SpecialsFixturesEvent Optional event asscoaited with the special.
type SpecialsFixturesEvent struct {
// Event Id
Id *int `json:"id,omitempty"`
// The period of the match. For example in soccer 0 (Game), 1 (1st Half) & 2 (2nd Half)
PeriodNumber *int `json:"periodNumber,omitempty"`
// Home team name.
Home *string `json:"home,omitempty"`
// Away team name.
Away *string `json:"away,omitempty"`
}
// NewSpecialsFixturesEvent instantiates a new SpecialsFixturesEvent object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewSpecialsFixturesEvent() *SpecialsFixturesEvent {
this := SpecialsFixturesEvent{}
return &this
}
// NewSpecialsFixturesEventWithDefaults instantiates a new SpecialsFixturesEvent object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewSpecialsFixturesEventWithDefaults() *SpecialsFixturesEvent {
this := SpecialsFixturesEvent{}
return &this
}
// GetId returns the Id field value if set, zero value otherwise.
func (o *SpecialsFixturesEvent) GetId() int {
if o == nil || o.Id == nil {
var ret int
return ret
}
return *o.Id
}
// GetIdOk returns a tuple with the Id field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *SpecialsFixturesEvent) GetIdOk() (*int, bool) {
if o == nil || o.Id == nil {
return nil, false
}
return o.Id, true
}
// HasId returns a boolean if a field has been set.
func (o *SpecialsFixturesEvent) HasId() bool {
if o != nil && o.Id != nil {
return true
}
return false
}
// SetId gets a reference to the given int and assigns it to the Id field.
func (o *SpecialsFixturesEvent) SetId(v int) {
o.Id = &v
}
// GetPeriodNumber returns the PeriodNumber field value if set, zero value otherwise.
func (o *SpecialsFixturesEvent) GetPeriodNumber() int {
if o == nil || o.PeriodNumber == nil {
var ret int
return ret
}
return *o.PeriodNumber
}
// GetPeriodNumberOk returns a tuple with the PeriodNumber field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *SpecialsFixturesEvent) GetPeriodNumberOk() (*int, bool) {
if o == nil || o.PeriodNumber == nil {
return nil, false
}
return o.PeriodNumber, true
}
// HasPeriodNumber returns a boolean if a field has been set.
func (o *SpecialsFixturesEvent) HasPeriodNumber() bool {
if o != nil && o.PeriodNumber != nil {
return true
}
return false
}
// SetPeriodNumber gets a reference to the given int and assigns it to the PeriodNumber field.
func (o *SpecialsFixturesEvent) SetPeriodNumber(v int) {
o.PeriodNumber = &v
}
// GetHome returns the Home field value if set, zero value otherwise.
func (o *SpecialsFixturesEvent) GetHome() string {
if o == nil || o.Home == nil {
var ret string
return ret
}
return *o.Home
}
// GetHomeOk returns a tuple with the Home field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *SpecialsFixturesEvent) GetHomeOk() (*string, bool) {
if o == nil || o.Home == nil {
return nil, false
}
return o.Home, true
}
// HasHome returns a boolean if a field has been set.
func (o *SpecialsFixturesEvent) HasHome() bool {
if o != nil && o.Home != nil {
return true
}
return false
}
// SetHome gets a reference to the given string and assigns it to the Home field.
func (o *SpecialsFixturesEvent) SetHome(v string) {
o.Home = &v
}
// GetAway returns the Away field value if set, zero value otherwise.
func (o *SpecialsFixturesEvent) GetAway() string {
if o == nil || o.Away == nil {
var ret string
return ret
}
return *o.Away
}
// GetAwayOk returns a tuple with the Away field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *SpecialsFixturesEvent) GetAwayOk() (*string, bool) {
if o == nil || o.Away == nil {
return nil, false
}
return o.Away, true
}
// HasAway returns a boolean if a field has been set.
func (o *SpecialsFixturesEvent) HasAway() bool {
if o != nil && o.Away != nil {
return true
}
return false
}
// SetAway gets a reference to the given string and assigns it to the Away field.
func (o *SpecialsFixturesEvent) SetAway(v string) {
o.Away = &v
}
func (o SpecialsFixturesEvent) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Id != nil {
toSerialize["id"] = o.Id
}
if o.PeriodNumber != nil {
toSerialize["periodNumber"] = o.PeriodNumber
}
if o.Home != nil {
toSerialize["home"] = o.Home
}
if o.Away != nil {
toSerialize["away"] = o.Away
}
return json.Marshal(toSerialize)
}
type NullableSpecialsFixturesEvent struct {
value *SpecialsFixturesEvent
isSet bool
}
func (v NullableSpecialsFixturesEvent) Get() *SpecialsFixturesEvent {
return v.value
}
func (v *NullableSpecialsFixturesEvent) Set(val *SpecialsFixturesEvent) {
v.value = val
v.isSet = true
}
func (v NullableSpecialsFixturesEvent) IsSet() bool {
return v.isSet
}
func (v *NullableSpecialsFixturesEvent) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableSpecialsFixturesEvent(val *SpecialsFixturesEvent) *NullableSpecialsFixturesEvent {
return &NullableSpecialsFixturesEvent{value: val, isSet: true}
}
func (v NullableSpecialsFixturesEvent) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableSpecialsFixturesEvent) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | pinapi/model_specials_fixtures_event.go | 0.828696 | 0.466906 | model_specials_fixtures_event.go | starcoder |
package imageutil
import (
"image"
"image/color"
)
// ImageReader implements the same methods as the standard image interface.
type ImageReader interface {
At(x, y int) color.Color
Bounds() image.Rectangle
ColorModel() color.Model
}
// ImageWriter implements a method for setting colors at individual
// coordinates (which all standard image types implement).
type ImageWriter interface {
Set(x, y int, c color.Color)
}
// ImageReadWriter implements both ImageReader and ImageWriter.
type ImageReadWriter interface {
ImageReader
ImageWriter
}
// Copy concurrently copies Color values from a source ImageReader to a
// destination ImageReadWriter.
func Copy(dst ImageReadWriter, src ImageReader) {
if dst != src {
QuickRP(
AllPointsRP(
func(pt image.Point) {
dst.Set(pt.X, pt.Y, src.At(pt.X, pt.Y))
},
),
)(dst.Bounds().Union(src.Bounds()))
}
}
// ConvertToRGBA returns an *image.RGBA instance by asserting the given
// ImageReader has that type or, if it does not, using Copy to concurrently
// set the color.Color values of a new *image.RGBA instance with the same
// bounds.
func ConvertToRGBA(src ImageReader) *image.RGBA {
if dst, ok := src.(*image.RGBA); ok {
return dst
}
dst := image.NewRGBA(src.Bounds())
Copy(dst, src)
return dst
}
// ConvertToRGBA64 returns an *image.RGBA64 instance by asserting the given
// ImageReader has that type or, if it does not, using Copy to concurrently
// set the color.Color values of a new *image.RGBA64 instance with the same
// bounds.
func ConvertToRGBA64(src ImageReader) *image.RGBA64 {
if dst, ok := src.(*image.RGBA64); ok {
return dst
}
dst := image.NewRGBA64(src.Bounds())
Copy(dst, src)
return dst
}
// ConvertToNRGBA returns an *image.NRGBA instance by asserting the given
// ImageReader has that type or, if it does not, using Copy to concurrently
// set the color.Color values of a new *image.NRGBA instance with the same
// bounds.
func ConvertToNRGBA(src ImageReader) *image.NRGBA {
if dst, ok := src.(*image.NRGBA); ok {
return dst
}
dst := image.NewNRGBA(src.Bounds())
Copy(dst, src)
return dst
}
// ConvertToNRGBA64 returns an *image.NRGBA64 instance by asserting the given
// ImageReader has that type or, if it does not, using Copy to concurrently
// set the color.Color values of a new *image.NRGBA64 instance with the same
// bounds.
func ConvertToNRGBA64(src ImageReader) *image.NRGBA64 {
if dst, ok := src.(*image.NRGBA64); ok {
return dst
}
dst := image.NewNRGBA64(src.Bounds())
Copy(dst, src)
return dst
}
// ConvertToAlpha returns an *image.Alpha instance by asserting the given
// ImageReader has that type or, if it does not, using Copy to concurrently
// set the color.Color values of a new *image.Alpha instance with the same
// bounds.
func ConvertToAlpha(src ImageReader) *image.Alpha {
if dst, ok := src.(*image.Alpha); ok {
return dst
}
dst := image.NewAlpha(src.Bounds())
Copy(dst, src)
return dst
}
// ConvertToAlpha16 returns an *image.Alpha16 instance by asserting the given
// ImageReader has that type or, if it does not, using Copy to concurrently
// set the color.Color values of a new *image.Alpha16 instance with the same
// bounds.
func ConvertToAlpha16(src ImageReader) *image.Alpha16 {
if dst, ok := src.(*image.Alpha16); ok {
return dst
}
dst := image.NewAlpha16(src.Bounds())
Copy(dst, src)
return dst
}
// ConvertToGray returns an *image.Gray instance by asserting the given
// ImageReader has that type or, if it does not, using Copy to concurrently
// set the color.Color values of a new *image.Gray instance with the same
// bounds.
func ConvertToGray(src ImageReader) *image.Gray {
if dst, ok := src.(*image.Gray); ok {
return dst
}
dst := image.NewGray(src.Bounds())
Copy(dst, src)
return dst
}
// ConvertToGray16 returns an *image.Gray16 instance by asserting the given
// ImageReader has that type or, if it does not, using Copy to concurrently
// set the color.Color values of a new *image.Gray16 instance with the same
// bounds.
func ConvertToGray16(src ImageReader) *image.Gray16 {
if dst, ok := src.(*image.Gray16); ok {
return dst
}
dst := image.NewGray16(src.Bounds())
Copy(dst, src)
return dst
} | imageutil.go | 0.871584 | 0.57678 | imageutil.go | starcoder |
package astar
import (
"fmt"
"math"
)
// TableEntry is a row in the A* table.
type TableEntry struct {
Node *Tile
// Distance from start.
G *float64
// Heuristic distance from end.
H float64
// For tracking the path to how we got here.
PreviousVertex *Tile
}
// F is the A* distance function - g() + h().
func (te *TableEntry) F() float64 {
if te.G == nil {
return te.H
}
return *te.G + te.H
}
func FloatPtr(f float64) *float64 {
return &f
}
type AStarGraph struct {
Board *Board
Entries map[*Tile]*TableEntry
OpenNodes map[*Tile]struct{}
ClosedNodes map[*Tile]struct{}
CurrentNode *Tile
}
func NewAStarGraph(b *Board) *AStarGraph {
g := &AStarGraph{
Board: b,
OpenNodes: map[*Tile]struct{}{},
ClosedNodes: map[*Tile]struct{}{},
}
entries := map[*Tile]*TableEntry{}
for _, tile := range b.tiles {
entries[tile] = &TableEntry{
Node: tile,
G: nil,
H: tile.HeuristicDistanceFrom(b.End()),
PreviousVertex: nil,
}
}
g.Entries = entries
// Initialize the graph - open the neighbors of start.
for _, neighbor := range g.Neighbors(b.Start()) {
g.OpenNodes[neighbor.Tile] = struct{}{}
neighbor.Tile.SetKind(TypeOpen)
entry := g.Entries[neighbor.Tile]
entry.G = &neighbor.Cost
entry.PreviousVertex = b.Start()
entry.Node.value = fmt.Sprintf("%.1f", entry.F())
}
return g
}
// MarkPath resets the isPath value for every tile in the grid.
// It then paints the path to the CurrentNode in our graph traversal.
// This is very inefficient, and not necessary ffor solving A*.
// Instead we do this to give the user a visual representation of what the
// algorithm is "doing".
func (a *AStarGraph) MarkPath() {
for _, tile := range a.Board.tiles {
tile.isPath = false
}
entry := a.Entries[a.CurrentNode]
tile := entry.PreviousVertex
for tile != nil && tile != a.Board.Start() {
tile.isPath = true
entry := a.Entries[tile]
tile = entry.PreviousVertex
}
}
// Step explores the next "Open" node with the lowest F-value.
// If that node is End, then we have reached the end and found the shortest path.
func (a *AStarGraph) Step() {
if a.CurrentNode == a.Board.End() {
a.MarkPath()
return
}
// Get the open node with the lowest f-value.
smallestF := math.MaxFloat64
var tile *Tile
for t := range a.OpenNodes {
entry := a.Entries[t]
if entry.F() < smallestF {
smallestF = entry.F()
tile = t
}
}
a.CurrentNode = tile
currentEntry := a.Entries[tile]
if a.CurrentNode == a.Board.End() || a.CurrentNode == nil {
// We have found the shortest path.
return
}
for _, neighbor := range a.Neighbors(tile) {
// Open the node.
a.OpenNodes[neighbor.Tile] = struct{}{}
neighbor.Tile.SetKind(TypeOpen)
// Update the entries table if the new f-value is smaller.
entry := a.Entries[neighbor.Tile]
if entry.G == nil || entry.F() > *currentEntry.G+neighbor.Cost {
entry.G = FloatPtr(*currentEntry.G + neighbor.Cost)
entry.Node.value = fmt.Sprintf("%.1f", entry.F())
entry.PreviousVertex = tile
}
}
// Close the tile.
delete(a.OpenNodes, tile)
a.ClosedNodes[tile] = struct{}{}
tile.SetKind(TypeClosed)
// Draw the path on the map.
a.MarkPath()
}
// EdgeTo represents the edge from a tile to its valid neighbors.
type EdgeTo struct {
Tile *Tile
Cost float64
}
// Neighbors returns all neighbors to the tile that are not a wall or closed.
// Neighbors in cardinal directions have a cost of 1 (Lef, Right, Up, Down).
// Neighbors in corners have a cost of sqrt2 (TopRight, TopLeft, BotRight, BotLeft).
// Walls and closed (fully visited) nodes are not returned.
func (a *AStarGraph) Neighbors(tile *Tile) []*EdgeTo {
result := []*EdgeTo{}
for i := -1; i <= 1; i++ {
for j := -1; j <= 1; j++ {
neighbor, ok := a.isNeighbor(tile.x+i, tile.y+j, tile)
if ok {
cost := math.Sqrt2
if i == 0 || j == 0 {
cost = 1.0
}
result = append(result, &EdgeTo{Tile: neighbor, Cost: cost})
}
}
}
return result
}
// isNeighbor takes a tile, and a relative position to the tile.
// If the neighbor is on the board, not closed, and not a wall we return it.
func (a *AStarGraph) isNeighbor(i, j int, tile *Tile) (*Tile, bool) {
// Neighbor is off the grid.
if i < 0 || i >= a.Board.width {
return nil, false
}
if j < 0 || j >= a.Board.height {
return nil, false
}
// Neighbor is the tile itself.
if tile.x == i && tile.y == j {
return nil, false
}
index := a.Board.Index(i, j)
neighbor := a.Board.tiles[index]
if neighbor.kind == TypeWall {
return nil, false
}
if _, closed := a.ClosedNodes[neighbor]; closed {
return nil, false
}
return neighbor, true
} | graph.go | 0.783658 | 0.509154 | graph.go | starcoder |
import "fmt"
func BinSearch(nums []int, target int, low int, high int) int {
if low > high {
return -1
}
mid := low + (high-low)/2
//fmt.Println(low, mid,high, nums[low],nums[mid],nums[high], target)
if nums[mid] < target {
return BinSearch(nums, target, mid+1, high)
}
if nums[mid] > target {
return BinSearch(nums, target, low, mid-1)
}
return mid
}
func FindPivot(nums []int, low int, high int) int {
mid := low + (high-low)/2
fmt.Println(low, mid, high)
if low >= high {
return low
}
if nums[mid] > nums[mid+1] {
return mid + 1
}
if nums[mid] > nums[low] {
return FindPivot(nums, mid, high)
}
return FindPivot(nums, low, mid)
}
func search(nums []int, target int) int {
L := len(nums)
if L == 0 {
return -1
} else if L == 1 {
if target == nums[0] {
return 0
}
return -1
}
P := FindPivot(nums, 0, len(nums)-1)
fmt.Println(P)
X := BinSearch(nums, target, 0, P-1)
if X < 0 && P < L {
X = BinSearch(nums, target, P, len(nums)-1)
}
return X
}
// when duplicates are allowed
func FindPivot(nums []int, lo int, hi int) int {
for lo < hi {
mid := lo + (hi-lo)/2
if nums[mid] > nums[hi] {
lo = mid + 1
} else if nums[mid] < nums[hi] {
hi = mid
} else {
//fmt.Println("*")
if nums[hi-1] > nums[hi] {
lo = hi
break
}
hi--
}
}
return lo
}
func search(nums []int, target int) bool {
L := len(nums)
if L == 0 {
return false
}
pos = FindPivot(nums, 0, L-1)
fmt.Println(pos)
if BinSearch(nums, target, 0, pos-1) {
return true
}
if nums[pos] == target {
return true
}
return BinSearch(nums, target, pos+1, L-1)
}
/*
A peak element is an element that is greater than its neighbors.
Given an input array nums, where nums[i] ≠ nums[i+1], find a peak element and return its index.
The array may contain multiple peaks, in that case return the index to any one of the peaks is fine.
You may imagine that nums[-1] = nums[n] = -∞.
Example 1:
Input: nums = [1,2,3,1]
Output: 2
Explanation: 3 is a peak element and your function should return the index number 2.
Example 2:
Input: nums = [1,2,1,3,5,6,4]
Output: 1 or 5
Explanation: Your function can return either index number 1 where the peak element is 2,
or index number 5 where the peak element is 6.
Follow up: Your solution should be in logarithmic complexity.
*/
func findPeakElement(nums []int) int {
lo, hi := 0, len(nums) - 1
for lo < hi {
mi := lo + (hi - lo)/2
//fmt.Println(lo,mi,hi)
if nums[mi] < nums[mi+1] {
lo = mi+1
} else {
hi = mi
}
}
return lo
} | submissions/0033.Search_in_Rotated_Array.go | 0.539954 | 0.480357 | 0033.Search_in_Rotated_Array.go | starcoder |
package scaling
import (
"knative.dev/serving/pkg/metrics"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
)
var (
desiredPodCountM = stats.Int64(
"desired_pods",
"Number of pods autoscaler wants to allocate",
stats.UnitDimensionless)
excessBurstCapacityM = stats.Float64(
"excess_burst_capacity",
"Excess burst capacity overserved over the stable window",
stats.UnitDimensionless)
stableRequestConcurrencyM = stats.Float64(
"stable_request_concurrency",
"Average of requests count per observed pod over the stable window",
stats.UnitDimensionless)
panicRequestConcurrencyM = stats.Float64(
"panic_request_concurrency",
"Average of requests count per observed pod over the panic window",
stats.UnitDimensionless)
targetRequestConcurrencyM = stats.Float64(
"target_concurrency_per_pod",
"The desired number of concurrent requests for each pod",
stats.UnitDimensionless)
stableRPSM = stats.Float64(
"stable_requests_per_second",
"Average requests-per-second per observed pod over the stable window",
stats.UnitDimensionless)
panicRPSM = stats.Float64(
"panic_requests_per_second",
"Average requests-per-second per observed pod over the panic window",
stats.UnitDimensionless)
targetRPSM = stats.Float64(
"target_requests_per_second",
"The desired requests-per-second for each pod",
stats.UnitDimensionless)
panicM = stats.Int64(
"panic_mode",
"1 if autoscaler is in panic mode, 0 otherwise",
stats.UnitDimensionless)
)
func init() {
register()
}
func register() {
// Create views to see our measurements. This can return an error if
// a previously-registered view has the same name with a different value.
// View name defaults to the measure name if unspecified.
if err := view.Register(
&view.View{
Description: "Number of pods autoscaler wants to allocate",
Measure: desiredPodCountM,
Aggregation: view.LastValue(),
TagKeys: metrics.CommonRevisionKeys,
},
&view.View{
Description: "Average of requests count over the stable window",
Measure: stableRequestConcurrencyM,
Aggregation: view.LastValue(),
TagKeys: metrics.CommonRevisionKeys,
},
&view.View{
Description: "Current excess burst capacity over average request count over the stable window",
Measure: excessBurstCapacityM,
Aggregation: view.LastValue(),
TagKeys: metrics.CommonRevisionKeys,
},
&view.View{
Description: "Average of requests count over the panic window",
Measure: panicRequestConcurrencyM,
Aggregation: view.LastValue(),
TagKeys: metrics.CommonRevisionKeys,
},
&view.View{
Description: "The desired number of concurrent requests for each pod",
Measure: targetRequestConcurrencyM,
Aggregation: view.LastValue(),
TagKeys: metrics.CommonRevisionKeys,
},
&view.View{
Description: "1 if autoscaler is in panic mode, 0 otherwise",
Measure: panicM,
Aggregation: view.LastValue(),
TagKeys: metrics.CommonRevisionKeys,
},
&view.View{
Description: "Average requests-per-second over the stable window",
Measure: stableRPSM,
Aggregation: view.LastValue(),
TagKeys: metrics.CommonRevisionKeys,
},
&view.View{
Description: "Average requests-per-second over the panic window",
Measure: panicRPSM,
Aggregation: view.LastValue(),
TagKeys: metrics.CommonRevisionKeys,
},
&view.View{
Description: "The desired requests-per-second for each pod",
Measure: targetRPSM,
Aggregation: view.LastValue(),
TagKeys: metrics.CommonRevisionKeys,
},
); err != nil {
panic(err)
}
} | pkg/autoscaler/scaling/metrics.go | 0.660501 | 0.412944 | metrics.go | starcoder |
package gremlingo
type Lambda struct {
Script string
Language string
}
// GraphTraversal stores a Traversal.
type GraphTraversal struct {
*Traversal
}
// NewGraphTraversal make a new GraphTraversal.
func NewGraphTraversal(graph *Graph, traversalStrategies *TraversalStrategies, bytecode *bytecode, remote *DriverRemoteConnection) *GraphTraversal {
gt := &GraphTraversal{
Traversal: &Traversal{
graph: graph,
traversalStrategies: traversalStrategies,
bytecode: bytecode,
remote: remote,
},
}
return gt
}
// Clone make a copy of a traversal that is reset for iteration.
func (g *GraphTraversal) Clone() *GraphTraversal {
return NewGraphTraversal(g.graph, g.traversalStrategies, newBytecode(g.bytecode), g.remote)
}
// V adds the v step to the GraphTraversal.
func (g *GraphTraversal) V(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("V", args...)
return g
}
// AddE adds the addE step to the GraphTraversal.
func (g *GraphTraversal) AddE(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("addE", args...)
return g
}
// AddV adds the addV step to the GraphTraversal.
func (g *GraphTraversal) AddV(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("addV", args...)
return g
}
// Aggregate adds the aggregate step to the GraphTraversal.
func (g *GraphTraversal) Aggregate(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("aggregate", args...)
return g
}
// And adds the and step to the GraphTraversal.
func (g *GraphTraversal) And(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("and", args...)
return g
}
// As adds the as step to the GraphTraversal.
func (g *GraphTraversal) As(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("as", args...)
return g
}
// Barrier adds the barrier step to the GraphTraversal.
func (g *GraphTraversal) Barrier(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("barrier", args...)
return g
}
// Both adds the both step to the GraphTraversal.
func (g *GraphTraversal) Both(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("both", args...)
return g
}
// BothE adds the bothE step to the GraphTraversal.
func (g *GraphTraversal) BothE(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("bothE", args...)
return g
}
// BothV adds the bothV step to the GraphTraversal.
func (g *GraphTraversal) BothV(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("bothV", args...)
return g
}
// Branch adds the branch step to the GraphTraversal.
func (g *GraphTraversal) Branch(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("branch", args...)
return g
}
// By adds the by step to the GraphTraversal.
func (g *GraphTraversal) By(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("by", args...)
return g
}
// Cap adds the cap step to the GraphTraversal.
func (g *GraphTraversal) Cap(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("cap", args...)
return g
}
// Choose adds the choose step to the GraphTraversal.
func (g *GraphTraversal) Choose(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("choose", args...)
return g
}
// Coalesce adds the coalesce step to the GraphTraversal.
func (g *GraphTraversal) Coalesce(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("coalesce", args...)
return g
}
// Coin adds the coint step to the GraphTraversal.
func (g *GraphTraversal) Coin(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("coin", args...)
return g
}
// ConnectedComponent adds the connectedComponent step to the GraphTraversal.
func (g *GraphTraversal) ConnectedComponent(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("connectedComponent", args...)
return g
}
// Constant adds the constant step to the GraphTraversal.
func (g *GraphTraversal) Constant(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("constant", args...)
return g
}
// Count adds the count step to the GraphTraversal.
func (g *GraphTraversal) Count(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("count", args...)
return g
}
// CyclicPath adds the cyclicPath step to the GraphTraversal.
func (g *GraphTraversal) CyclicPath(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("cyclicPath", args...)
return g
}
// Dedup adds the dedup step to the GraphTraversal.
func (g *GraphTraversal) Dedup(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("dedup", args...)
return g
}
// Drop adds the drop step to the GraphTraversal.
func (g *GraphTraversal) Drop(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("drop", args...)
return g
}
// ElementMap adds the elementMap step to the GraphTraversal.
func (g *GraphTraversal) ElementMap(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("elementMap", args...)
return g
}
// Emit adds the emit step to the GraphTraversal.
func (g *GraphTraversal) Emit(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("emit", args...)
return g
}
// Filter adds the filter step to the GraphTraversal.
func (g *GraphTraversal) Filter(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("filter", args...)
return g
}
// FlatMap adds the flatMap step to the GraphTraversal.
func (g *GraphTraversal) FlatMap(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("flatMap", args...)
return g
}
// Fold adds the fold step to the GraphTraversal.
func (g *GraphTraversal) Fold(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("fold", args...)
return g
}
// From adds the from step to the GraphTraversal.
func (g *GraphTraversal) From(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("from", args...)
return g
}
// Group adds the group step to the GraphTraversal.
func (g *GraphTraversal) Group(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("group", args...)
return g
}
// GroupCount adds the groupCount step to the GraphTraversal.
func (g *GraphTraversal) GroupCount(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("groupCount", args...)
return g
}
// Has adds the has step to the GraphTraversal.
func (g *GraphTraversal) Has(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("has", args...)
return g
}
// HasId adds the hasId step to the GraphTraversal.
func (g *GraphTraversal) HasId(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("hasId", args...)
return g
}
// HasKey adds the hasKey step to the GraphTraversal.
func (g *GraphTraversal) HasKey(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("hasKey", args...)
return g
}
// HasLabel adds the hasLabel step to the GraphTraversal.
func (g *GraphTraversal) HasLabel(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("hasLabel", args...)
return g
}
// HasNot adds the hasNot step to the GraphTraversal.
func (g *GraphTraversal) HasNot(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("hasNot", args...)
return g
}
// HasValue adds the hasValue step to the GraphTraversal.
func (g *GraphTraversal) HasValue(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("hasValue", args...)
return g
}
// Id adds the id step to the GraphTraversal.
func (g *GraphTraversal) Id(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("id", args...)
return g
}
// Identity adds the identity step to the GraphTraversal.
func (g *GraphTraversal) Identity(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("identity", args...)
return g
}
// InE adds the inE step to the GraphTraversal.
func (g *GraphTraversal) InE(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("inE", args...)
return g
}
// InV adds the inV step to the GraphTraversal.
func (g *GraphTraversal) InV(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("inV", args...)
return g
}
// In adds the in step to the GraphTraversal.
func (g *GraphTraversal) In(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("in", args...)
return g
}
// Index adds the index step to the GraphTraversal.
func (g *GraphTraversal) Index(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("index", args...)
return g
}
// Inject adds the inject step to the GraphTraversal.
func (g *GraphTraversal) Inject(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("inject", args...)
return g
}
// Is adds the is step to the GraphTraversal.
func (g *GraphTraversal) Is(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("is", args...)
return g
}
// Key adds the key step to the GraphTraversal.
func (g *GraphTraversal) Key(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("key", args...)
return g
}
// Label adds the label step to the GraphTraversal.
func (g *GraphTraversal) Label(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("label", args...)
return g
}
// Limit adds the limit step to the GraphTraversal.
func (g *GraphTraversal) Limit(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("limit", args...)
return g
}
// Local adds the local step to the GraphTraversal.
func (g *GraphTraversal) Local(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("local", args...)
return g
}
// Loops adds the loops step to the GraphTraversal.
func (g *GraphTraversal) Loops(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("loops", args...)
return g
}
// Map adds the map step to the GraphTraversal.
func (g *GraphTraversal) Map(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("map", args...)
return g
}
// Match adds the match step to the GraphTraversal.
func (g *GraphTraversal) Match(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("match", args...)
return g
}
// Math adds the math step to the GraphTraversal.
func (g *GraphTraversal) Math(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("math", args...)
return g
}
// Max adds the max step to the GraphTraversal.
func (g *GraphTraversal) Max(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("max", args...)
return g
}
// Mean adds the mean step to the GraphTraversal.
func (g *GraphTraversal) Mean(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("mean", args...)
return g
}
// Min adds the min step to the GraphTraversal.
func (g *GraphTraversal) Min(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("min", args...)
return g
}
// None adds the none step to the GraphTraversal.
func (g *GraphTraversal) None(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("none", args...)
return g
}
// Not adds the not step to the GraphTraversal.
func (g *GraphTraversal) Not(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("not", args...)
return g
}
// Option adds the option step to the GraphTraversal.
func (g *GraphTraversal) Option(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("option", args...)
return g
}
// Optional adds the optional step to the GraphTraversal.
func (g *GraphTraversal) Optional(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("optional", args...)
return g
}
// Or adds the or step to the GraphTraversal.
func (g *GraphTraversal) Or(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("or", args...)
return g
}
// Order adds the order step to the GraphTraversal.
func (g *GraphTraversal) Order(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("order", args...)
return g
}
// OtherV adds the otherV step to the GraphTraversal.
func (g *GraphTraversal) OtherV(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("otherV", args...)
return g
}
// Out adds the out step to the GraphTraversal.
func (g *GraphTraversal) Out(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("out", args...)
return g
}
// OutE adds the outE step to the GraphTraversal.
func (g *GraphTraversal) OutE(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("outE", args...)
return g
}
// OutV adds the outV step to the GraphTraversal.
func (g *GraphTraversal) OutV(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("outV", args...)
return g
}
// PageRank adds the pageRank step to the GraphTraversal.
func (g *GraphTraversal) PageRank(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("pageRank", args...)
return g
}
// Path adds the path step to the GraphTraversal.
func (g *GraphTraversal) Path(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("path", args...)
return g
}
// PeerPressure adds the peerPressure step to the GraphTraversal.
func (g *GraphTraversal) PeerPressure(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("peerPressure", args...)
return g
}
// Profile adds the profile step to the GraphTraversal.
func (g *GraphTraversal) Profile(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("profile", args...)
return g
}
// Program adds the program step to the GraphTraversal.
func (g *GraphTraversal) Program(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("program", args...)
return g
}
// Project adds the project step to the GraphTraversal.
func (g *GraphTraversal) Project(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("project", args...)
return g
}
// Properties adds the properties step to the GraphTraversal.
func (g *GraphTraversal) Properties(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("properties", args...)
return g
}
// Property adds the property step to the GraphTraversal.
func (g *GraphTraversal) Property(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("property", args...)
return g
}
// PropertyMap adds the propertyMap step to the GraphTraversal.
func (g *GraphTraversal) PropertyMap(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("propertyMap", args...)
return g
}
// Range adds the range step to the GraphTraversal.
func (g *GraphTraversal) Range(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("range", args...)
return g
}
// Read adds the read step to the GraphTraversal.
func (g *GraphTraversal) Read(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("read", args...)
return g
}
// Repeat adds the repeat step to the GraphTraversal.
func (g *GraphTraversal) Repeat(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("repeat", args...)
return g
}
// Sack adds the sack step to the GraphTraversal.
func (g *GraphTraversal) Sack(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("sack", args...)
return g
}
// Sample adds the sample step to the GraphTraversal.
func (g *GraphTraversal) Sample(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("sample", args...)
return g
}
// Select adds the select step to the GraphTraversal.
func (g *GraphTraversal) Select(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("select", args...)
return g
}
// ShortestPath adds the shortestPath step to the GraphTraversal.
func (g *GraphTraversal) ShortestPath(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("shortestPath", args...)
return g
}
// SideEffect adds the sideEffect step to the GraphTraversal.
func (g *GraphTraversal) SideEffect(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("sideEffect", args...)
return g
}
// SimplePath adds the simplePath step to the GraphTraversal.
func (g *GraphTraversal) SimplePath(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("simplePath", args...)
return g
}
// Skip adds the skip step to the GraphTraversal.
func (g *GraphTraversal) Skip(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("skip", args...)
return g
}
// Store adds the store step to the GraphTraversal.
func (g *GraphTraversal) Store(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("store", args...)
return g
}
// Subgraph adds the subgraph step to the GraphTraversal.
func (g *GraphTraversal) Subgraph(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("subgraph", args...)
return g
}
// Sum adds the sum step to the GraphTraversal.
func (g *GraphTraversal) Sum(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("sum", args...)
return g
}
// Tail adds the tail step to the GraphTraversal.
func (g *GraphTraversal) Tail(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("tail", args...)
return g
}
// TimeLimit adds the timeLimit step to the GraphTraversal.
func (g *GraphTraversal) TimeLimit(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("timeLimit", args...)
return g
}
// Times adds the times step to the GraphTraversal.
func (g *GraphTraversal) Times(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("times", args...)
return g
}
// To adds the to step to the GraphTraversal.
func (g *GraphTraversal) To(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("to", args...)
return g
}
// ToE adds the toE step to the GraphTraversal.
func (g *GraphTraversal) ToE(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("toE", args...)
return g
}
// ToV adds the toV step to the GraphTraversal.
func (g *GraphTraversal) ToV(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("toV", args...)
return g
}
// Tree adds the tree step to the GraphTraversal.
func (g *GraphTraversal) Tree(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("tree", args...)
return g
}
// Unfold adds the unfold step to the GraphTraversal.
func (g *GraphTraversal) Unfold(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("unfold", args...)
return g
}
// Union adds the union step to the GraphTraversal.
func (g *GraphTraversal) Union(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("union", args...)
return g
}
// Until adds the until step to the GraphTraversal.
func (g *GraphTraversal) Until(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("until", args...)
return g
}
// Value adds the value step to the GraphTraversal.
func (g *GraphTraversal) Value(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("value", args...)
return g
}
// ValueMap adds the valueMap step to the GraphTraversal.
func (g *GraphTraversal) ValueMap(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("valueMap", args...)
return g
}
// Values adds the values step to the GraphTraversal.
func (g *GraphTraversal) Values(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("values", args...)
return g
}
// Where adds the where step to the GraphTraversal.
func (g *GraphTraversal) Where(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("where", args...)
return g
}
// With adds the with step to the GraphTraversal.
func (g *GraphTraversal) With(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("with", args...)
return g
}
// Write adds the write step to the GraphTraversal.
func (g *GraphTraversal) Write(args ...interface{}) *GraphTraversal {
g.bytecode.addStep("write", args...)
return g
} | gremlin-go/driver/graphTraversal.go | 0.901948 | 0.50354 | graphTraversal.go | starcoder |
package three
import "math"
// NewMatrix3 :
func NewMatrix3() *Matrix3 {
elements := [9]float64{
1, 0, 0,
0, 1, 0,
0, 0, 1,
}
return &Matrix3{elements}
}
// Matrix3 :
type Matrix3 struct {
Elements [9]float64
}
// Set :
func (m Matrix3) Set(n11, n12, n13, n21, n22, n23, n31, n32, n33 float64) *Matrix3 {
te := m.Elements
te[0] = n11
te[1] = n21
te[2] = n31
te[3] = n12
te[4] = n22
te[5] = n32
te[6] = n13
te[7] = n23
te[8] = n33
return &m
}
// Identity :
func (m Matrix3) Identity() *Matrix3 {
m.Set(
1, 0, 0,
0, 1, 0,
0, 0, 1,
)
return &m
}
// Clone :
func (m Matrix3) Clone() *Matrix3 {
array := make([]float64, 0)
for _, elem := range m.Elements {
array = append(array, elem)
}
return NewMatrix3().FromArray(array, 0)
}
// Copy :
func (m Matrix3) Copy(n Matrix3) *Matrix3 {
te := m.Elements
me := n.Elements
te[0] = me[0]
te[1] = me[1]
te[2] = me[2]
te[3] = me[3]
te[4] = me[4]
te[5] = me[5]
te[6] = me[6]
te[7] = me[7]
te[8] = me[8]
return &m
}
// ExtractBasis :
func (m Matrix3) ExtractBasis(xAxis, yAxis, zAxis Vector3) *Matrix3 {
xAxis.SetFromMatrix3Column(m, 0)
yAxis.SetFromMatrix3Column(m, 1)
zAxis.SetFromMatrix3Column(m, 2)
return &m
}
// SetFromMatrix4 :
func (m Matrix3) SetFromMatrix4(n Matrix4) *Matrix3 {
me := n.Elements
m.Set(
me[0], me[4], me[8],
me[1], me[5], me[9],
me[2], me[6], me[10],
)
return &m
}
// Multiply :
func (m Matrix3) Multiply(n Matrix3) *Matrix3 {
return m.MultiplyMatrices(m, n)
}
// Premultiply :
func (m Matrix3) Premultiply(n Matrix3) *Matrix3 {
return m.MultiplyMatrices(n, m)
}
// MultiplyMatrices :
func (m Matrix3) MultiplyMatrices(a, b Matrix3) *Matrix3 {
ae := a.Elements
be := b.Elements
te := m.Elements
a11, a12, a13 := ae[0], ae[3], ae[6]
a21, a22, a23 := ae[1], ae[4], ae[7]
a31, a32, a33 := ae[2], ae[5], ae[8]
b11, b12, b13 := be[0], be[3], be[6]
b21, b22, b23 := be[1], be[4], be[7]
b31, b32, b33 := be[2], be[5], be[8]
te[0] = a11*b11 + a12*b21 + a13*b31
te[3] = a11*b12 + a12*b22 + a13*b32
te[6] = a11*b13 + a12*b23 + a13*b33
te[1] = a21*b11 + a22*b21 + a23*b31
te[4] = a21*b12 + a22*b22 + a23*b32
te[7] = a21*b13 + a22*b23 + a23*b33
te[2] = a31*b11 + a32*b21 + a33*b31
te[5] = a31*b12 + a32*b22 + a33*b32
te[8] = a31*b13 + a32*b23 + a33*b33
return &m
}
// MultiplyScalar :
func (m Matrix3) MultiplyScalar(s float64) *Matrix3 {
te := m.Elements
te[0] *= s
te[3] *= s
te[6] *= s
te[1] *= s
te[4] *= s
te[7] *= s
te[2] *= s
te[5] *= s
te[8] *= s
return &m
}
// Determinant :
func (m Matrix3) Determinant() float64 {
te := m.Elements
a, b, c := te[0], te[1], te[2]
d, e, f := te[3], te[4], te[5]
g, h, i := te[6], te[7], te[8]
return a*e*i - a*f*h - b*d*i + b*f*g + c*d*h - c*e*g
}
// GetInverse :
func (m Matrix3) GetInverse(matrix Matrix3) *Matrix3 {
me := matrix.Elements
te := m.Elements
n11, n21, n31 := me[0], me[1], me[2]
n12, n22, n32 := me[3], me[4], me[5]
n13, n23, n33 := me[6], me[7], me[8]
t11 := n33*n22 - n32*n23
t12 := n32*n13 - n33*n12
t13 := n23*n12 - n22*n13
det := n11*t11 + n21*t12 + n31*t13
if det == 0 {
return m.Set(0, 0, 0, 0, 0, 0, 0, 0, 0)
}
detInv := 1 / det
te[0] = t11 * detInv
te[1] = (n31*n23 - n33*n21) * detInv
te[2] = (n32*n21 - n31*n22) * detInv
te[3] = t12 * detInv
te[4] = (n33*n11 - n31*n13) * detInv
te[5] = (n31*n12 - n32*n11) * detInv
te[6] = t13 * detInv
te[7] = (n21*n13 - n23*n11) * detInv
te[8] = (n22*n11 - n21*n12) * detInv
return &m
}
// Transpose :
func (m Matrix3) Transpose() *Matrix3 {
te := m.Elements
tmp := te[1]
te[1] = te[3]
te[3] = tmp
tmp = te[2]
te[2] = te[6]
te[6] = tmp
tmp = te[5]
te[5] = te[7]
te[7] = tmp
return &m
}
// GetNormalMatrix :
func (m Matrix3) GetNormalMatrix(matrix4 Matrix4) *Matrix3 {
return m.SetFromMatrix4(matrix4).GetInverse(m).Transpose()
}
// TransposeIntoArray :
func (m Matrix3) TransposeIntoArray(r []float64) *Matrix3 {
if len(r) < 9 {
panic("array length should be greater than 9")
}
te := m.Elements
r[0] = te[0]
r[1] = te[3]
r[2] = te[6]
r[3] = te[1]
r[4] = te[4]
r[5] = te[7]
r[6] = te[2]
r[7] = te[5]
r[8] = te[8]
return &m
}
// SetUvTransform :
func (m Matrix3) SetUvTransform(tx, ty, sx, sy, rotation, cx, cy float64) *Matrix3 {
c := math.Cos(rotation)
s := math.Sin(rotation)
m.Set(
sx*c, sx*s, -sx*(c*cx+s*cy)+cx+tx,
-sy*s, sy*c, -sy*(-s*cx+c*cy)+cy+ty,
0, 0, 1,
)
return &m
}
// Scale :
func (m Matrix3) Scale(sx, sy float64) *Matrix3 {
var te = m.Elements
te[0] *= sx
te[3] *= sx
te[6] *= sx
te[1] *= sy
te[4] *= sy
te[7] *= sy
return &m
}
// Rotate :
func (m Matrix3) Rotate(theta float64) *Matrix3 {
c := math.Cos(theta)
s := math.Sin(theta)
te := m.Elements
a11, a12, a13 := te[0], te[3], te[6]
a21, a22, a23 := te[1], te[4], te[7]
te[0] = c*a11 + s*a21
te[3] = c*a12 + s*a22
te[6] = c*a13 + s*a23
te[1] = -s*a11 + c*a21
te[4] = -s*a12 + c*a22
te[7] = -s*a13 + c*a23
return &m
}
// Translate :
func (m Matrix3) Translate(tx, ty float64) *Matrix3 {
te := m.Elements
te[0] += tx * te[2]
te[3] += tx * te[5]
te[6] += tx * te[8]
te[1] += ty * te[2]
te[4] += ty * te[5]
te[7] += ty * te[8]
return &m
}
// Equals :
func (m Matrix3) Equals(matrix Matrix3) bool {
te := m.Elements
me := matrix.Elements
for i := 0; i < 9; i++ {
if te[i] != me[i] {
return false
}
}
return true
}
// FromArray :
func (m Matrix3) FromArray(array []float64, offset int) *Matrix3 {
if len(array) < offset+9 {
panic("array length should be greater than offset+9")
}
for i := 0; i < 9; i++ {
m.Elements[i] = array[i+offset]
}
return &m
}
// ToArray :
func (m Matrix3) ToArray(array []float64, offset int) []float64 {
if len(array) < offset+9 {
panic("array length should be greater than offset+9")
}
te := m.Elements
array[offset] = te[0]
array[offset+1] = te[1]
array[offset+2] = te[2]
array[offset+3] = te[3]
array[offset+4] = te[4]
array[offset+5] = te[5]
array[offset+6] = te[6]
array[offset+7] = te[7]
array[offset+8] = te[8]
return array
} | server/three/matrix3.go | 0.655115 | 0.625953 | matrix3.go | starcoder |
package compare
import (
"bytes"
"encoding/gob"
"encoding/json"
"errors"
"fmt"
"reflect"
"regexp"
"strconv"
"strings"
)
var (
ErrInvalidMatchType = errors.New("invalid match type")
ErrValueNotANumber = errors.New("value is not a number")
)
// MatchType defines the type of match to be performed.
type MatchType string
const (
// MatchTypeLessThan is used to compare the response with the expected value.
// If the response is less than the expected value the validation is successful.
// If the response is equal or greater than the expected value the validation is not successful.
// If the response is not a number the validation is not successful.
MatchTypeLessThan MatchType = "lt"
// MatchTypeLessThanOrEqual is used to compare the response with the expected value.
// If the response is less than or equal to the expected value the validation is successful.
// If the response is greater than the expected value the validation is not successful.
// If the response is not a number the validation is not successful.
MatchTypeLessThanOrEqual MatchType = "lte"
// MatchTypeGreaterThan is used to compare the response with the expected value.
// If the response is greater than the expected value the validation is successful.
// If the response is equal or smaller than the expected value the validation is not successful.
// If the response is not a number the validation is not successful.
MatchTypeGreaterThan MatchType = "gt"
// MatchTypeGreaterThanOrEqual is used to compare the response with the expected value.
// If the response is greater than or equal to the expected value the validation is successful.
// If the response is smaller than the expected value the validation is not successful.
// If the response is not a number the validation is not successful.
MatchTypeGreaterThanOrEqual MatchType = "gte"
// MatchTypePercentageDeviation is used to compare the response with the expected value.
// A percentage offset is calculated from the expected value and the response.
// If the percentage offset is smaller than the calculated offset, the validation is successful.
// The defined offset is added to the expected value and defines a failure tolerance.
MatchTypePercentageDeviation MatchType = "pd"
// MatchTypeAbsoluteOffset is used to compare the response with the expected value.
// The defined regex is used to match the response.
MatchTypeRegex MatchType = "re"
// MatchTypeRange is used to compare the response with the expected value.
// The defined range is used to match the response.
// The value must be in between the range.
// If the response is not a number the validation is not successful.
MatchTypeRange MatchType = "rg"
// MatchTypeEqual is used to compare the response with the expected value.
// If the response is equal to the expected value the validation is successful.
// If the response is not equal to the expected value the validation is not successful.
MatchTypeEqual MatchType = "eq"
// MatchTypeNotEqual is used to compare the response with the expected value.
// If the response is not equal to the expected value the validation is successful.
// If the response is equal to the expected value the validation is not successful.
MatchTypeNotEqual MatchType = "neq"
// MatchTypeEmpty is used to compare the response with the expected value.
// If the response is empty the validation is successful.
// If the response is not empty the validation is not successful.
MatchTypeEmpty MatchType = "et"
// MatchTypeNotEmpty is used to compare the response with the expected value.
// If the response is not empty the validation is successful.
// If the response is empty the validation is not successful.
MatchTypeNotEmpty MatchType = "ne"
// MatchTypeContains is used to compare the response with the expected value.
// If the response contains the expected value the validation is successful.
// If the response does not contain the expected value the validation is not successful.
MatchTypeContains MatchType = "ct"
)
// Validation defines the validation specification to execute a test.
type Validation struct {
// MatchType defines the type of the validation.
// Possible values:
// - lt: less than
// - lte: less than or equal
// - gt: greater than
// - gte: greater than or equal
// - pd: percentual offset
// - re: regex
// - rg: range
// - eq: equals
// - neq: not equals
// - ne: not empty
// - et: empty
// - ct: contains
MatchType MatchType
// MatchValue defines the value operation.
// Must only be set for "percentual offset" and "range" definitions.
// Possible values:
// - [0-9]-[0-9]: range definition
// - [0-9]%: percentual offset
// - any: regex
MatchValue *string
// ExpectedValue defines the expected value.
ExpectedValue interface{}
}
// Matches validates the argument value against the validation specification.
// If the validation is successful the method returns true as validation and nil as error.
func (d Validation) Matches(value interface{}) (bool, error) {
switch d.MatchType {
case MatchTypeLessThan:
val1, err := valueToInt64(d.ExpectedValue)
if err != nil {
return false, err
}
val2, err := valueToInt64(value)
if err != nil {
return false, err
}
return val2 < val1, nil
case MatchTypeLessThanOrEqual:
val1, err := valueToInt64(d.ExpectedValue)
if err != nil {
return false, err
}
val2, err := valueToInt64(value)
if err != nil {
return false, err
}
return val2 <= val1, nil
case MatchTypeGreaterThan:
val1, err := valueToInt64(d.ExpectedValue)
if err != nil {
return false, err
}
val2, err := valueToInt64(value)
if err != nil {
return false, err
}
return val2 > val1, nil
case MatchTypeGreaterThanOrEqual:
val1, err := valueToInt64(d.ExpectedValue)
if err != nil {
return false, err
}
val2, err := valueToInt64(value)
if err != nil {
return false, err
}
return val2 >= val1, nil
case MatchTypePercentageDeviation:
btsFromValue, err := json.Marshal(value)
if err != nil {
return false, err
}
btsFromExpectedValue, err := json.Marshal(d.ExpectedValue)
if err != nil {
return false, err
}
reports, err := BytesDifferent(btsFromValue, btsFromExpectedValue)
if err != nil {
return false, err
}
pcnt, err := NewPercentFromFloats(len(btsFromExpectedValue), len(reports))
if err != nil {
return false, err
}
pcntFromMatchValue, err := ParsePercentageValueFromString(*d.MatchValue)
if err != nil {
return false, err
}
return pcnt.Get() <= pcntFromMatchValue.Get(), nil
case MatchTypeRegex:
rp := regexp.MustCompile(*d.MatchValue)
buf := bytes.Buffer{}
err := gob.NewEncoder(&buf).Encode(value)
if err != nil {
return false, fmt.Errorf("failed to encode value: %v", err)
}
return rp.Match(buf.Bytes()), nil
case MatchTypeRange:
r := rangeParser(*d.MatchValue)
val1, err := valueToInt64(value)
if err != nil {
return false, err
}
return val1 >= r[0] && val1 <= r[1], nil
case MatchTypeEqual:
return reflect.DeepEqual(d.ExpectedValue, value), nil
case MatchTypeNotEqual:
return !reflect.DeepEqual(d.ExpectedValue, value), nil
case MatchTypeNotEmpty:
if str, ok := value.(string); ok {
if str == "" {
return false, nil
}
return true, nil
}
return value != nil, nil
case MatchTypeEmpty:
if str, ok := value.(string); ok {
if str == "" {
return true, nil
}
return false, nil
}
return value == nil, nil
case MatchTypeContains:
buffer := bytes.Buffer{}
err := gob.NewEncoder(&buffer).Encode(value)
if err != nil {
return false, fmt.Errorf("%w: gob envoding failed for value = %v", err, value)
}
return strings.Contains(buffer.String(), *d.MatchValue), nil
default:
return false, fmt.Errorf("%w: %s", ErrInvalidMatchType, d.MatchType)
}
}
// valueToInt64 converts the value to an int64.
func valueToInt64(value interface{}) (int64, error) {
iVal, ok := value.(int64)
if !ok {
return 0, fmt.Errorf("%w: %v", ErrValueNotANumber, value)
}
return iVal, nil
}
// rangeParser parses the range definition.
// The range definition is expected to be in the format:
// - [0-9]-[0-9]: range definition
func rangeParser(input string) [2]int64 {
var result [2]int64
parts := strings.Split(input, "-")
if len(parts) != 2 {
return result
}
result[0], _ = strconv.ParseInt(parts[0], 10, 64)
result[1], _ = strconv.ParseInt(parts[1], 10, 64)
return result
} | match.go | 0.61173 | 0.703942 | match.go | starcoder |
// Package caesar provides interface to encrypt and decrypt Caesar cipher.
package caesar
import (
"bytes"
"fmt"
"io"
)
var (
// Classical bounds of the cipher.
alphabet = []int{'a', 'z', 'A', 'Z'}
// Printable ASCII as bounds.
printable = []int{' ', '~'}
)
// ROT13 rotates by 13 places. It's the most famous Caesar cipher.
var ROT13 = &Caesar{shift: 13, bounds: alphabet}
// Caesar represents a Caesar cipher.
// For more information on the Caesar's code:
// https://en.wikipedia.org/wiki/Caesar_cipher
type Caesar struct {
shift int
left bool
bounds []int
}
// Classic returns a classic Caesar cipher, only based on the alphabet.
// So, only the letter a to z or A to Z are substitute.
func Classic(key int) *Caesar {
return &Caesar{shift: key, bounds: alphabet}
}
// New returns a Caesar cipher using all the ASCII printable characters.
// It extends the number of characters classically used
// See bellow the complete list:
// > https://en.wikipedia.org/wiki/ASCII#Printable_characters
func New(key int) *Caesar {
return &Caesar{shift: key, bounds: printable}
}
// Encrypt uses the current cipher to encrypt the given stream.
func (c *Caesar) Encrypt(r io.Reader) ([]byte, error) {
return c.write(r, false)
}
// Decrypt uses the current cipher to decrypt the given stream.
func (c *Caesar) Decrypt(r io.Reader) ([]byte, error) {
return c.write(r, true)
}
func (c *Caesar) write(r io.Reader, reverse bool) (buf []byte, err error) {
if r == nil {
return
}
in := new(bytes.Buffer)
if _, err = in.ReadFrom(r); err != nil {
return
}
out := new(bytes.Buffer)
for _, r := range in.String() {
if _, err = out.WriteString(c.Rune(r, reverse).String()); err != nil {
return
}
}
return out.Bytes(), nil
}
// Rune applies the code on the given rune.
// The return implements the fmt.Stringer interface.
func (c *Caesar) Rune(r rune, reverse bool) fmt.Stringer {
return &code{cipher: c, reverse: reverse, rune: int(r)}
}
// Reverse reverses the direction of the substitution.
// If actually defined to right, the substitution will go the right after it.
func (c *Caesar) Reverse() *Caesar {
c.left = !c.left
return c
}
type code struct {
cipher *Caesar
reverse bool
rune int
}
// Returns three parameters:
// The first, a boolean returns true if the rune keeps inside the bounds.
// Then the minimum and maximum of the bounded range.
func (c code) bounded() (ok bool, min, max int) {
if ok = len(c.cipher.bounds) == 0; ok {
// Deals with empty struct.
return
}
for i := 1; i < len(c.cipher.bounds); i = i + 2 {
if ok = c.cipher.bounds[i-1] <= c.rune && c.rune <= c.cipher.bounds[i]; ok {
min, max = c.cipher.bounds[i-1], c.cipher.bounds[i]
return
}
}
return
}
// Returns true if the substitution goes to the left.
func (c code) leftShifted() bool {
if c.reverse {
return !c.cipher.left
}
return c.cipher.left
}
// String implements the fmt.Stinger interface.
func (c code) String() string {
ok, min, max := c.bounded()
if !ok {
// Do not care to this rune.
return string(c.rune)
}
// Direction.
diff := max - min + 1
shift := c.cipher.shift
if c.leftShifted() {
shift = diff - shift%diff
}
return string((c.rune-min+shift)%diff + min)
} | caesar/caesar.go | 0.834204 | 0.507446 | caesar.go | starcoder |
Package event implements the events that cause transitions between FSA states.
* Unicode classes: number, letter, upcase, lowcase, space
* Ranges: any, anyof, not
* CharLit
*/
package event
import (
"fmt"
"os"
"sort"
"unicode"
"github.com/goccmack/gogll/ast"
"github.com/goccmack/gogll/lex/item"
"github.com/goccmack/gogll/util/runeset"
)
// TriState has range: {True, False, Undefined}
type TriState int
const (
// Undefined is a TriState value
Undefined TriState = iota
// False is a TriState value
False
// True is a TriState value
True
)
type eventPair struct {
a, b ast.LexBase
}
// GetOrdered returns the set of unique transition events for items, ordered
// by the event precidence.
func GetOrdered(items ...*item.Item) (events []ast.LexBase) {
events = getEvents(items...)
incompatibleEvents := []eventPair{}
for i := 0; i < len(events)-1; i++ {
for j := i + 1; j < len(events); j++ {
if Subset(events[i], events[j]) == Undefined &&
Subset(events[j], events[i]) == Undefined {
incompatibleEvents = append(incompatibleEvents,
eventPair{events[i], events[j]})
}
}
}
if len(incompatibleEvents) > 0 {
fail(items, incompatibleEvents)
}
sortEvents(events)
return
}
// Subset returns True if a is a subset of b, False if a is not a subset of b,
// and Undefined if the subset relationship is not defined between a and b
func Subset(a, b ast.LexBase) TriState {
switch a1 := a.(type) {
case *ast.Any:
_, ok := b.(*ast.Any)
return toTriState(ok)
case *ast.AnyOf:
switch b1 := b.(type) {
case *ast.Any:
return True
case *ast.AnyOf:
return toTriState(a1.Set.Subset(b1.Set))
case *ast.CharLiteral:
return toTriState(a1.Set.Contains(b1.Char()))
case *ast.Not:
return toTriState(!a1.Set.Subset(b1.Set))
case *ast.UnicodeClass:
return toTriState(unicodeClassContains(b1, a1.Set.Elements()...))
default:
panic("Invalid")
}
case *ast.CharLiteral:
switch b1 := b.(type) {
case *ast.Any:
return True
case *ast.AnyOf:
return toTriState(b1.Set.Contains(a1.Char()))
case *ast.CharLiteral:
return toTriState(a1.Char() == b1.Char())
case *ast.Not:
return toTriState(!b1.Set.Contains(a1.Char()))
case *ast.UnicodeClass:
return toTriState(unicodeClassContains(b1, a1.Char()))
default:
panic("Invalid")
}
case *ast.Not:
switch b1 := b.(type) {
case *ast.Any:
return True
case *ast.AnyOf:
return Undefined
case *ast.CharLiteral:
return False
case *ast.Not:
return toTriState(b1.Set.Subset(a1.Set))
case *ast.UnicodeClass:
return Undefined
default:
panic("Invalid")
}
case *ast.UnicodeClass:
switch b1 := b.(type) {
case *ast.Any:
return True
case *ast.AnyOf:
return Undefined
case *ast.CharLiteral:
return False
case *ast.Not:
return False
case *ast.UnicodeClass:
return toTriState(a1.Type == b1.Type ||
(b1.Type == ast.Letter && (a1.Type == ast.Lowcase || a1.Type == ast.Upcase)))
default:
panic("Invalid")
}
default:
panic("Invalid")
}
}
func anyOf(r rune, rs *runeset.RuneSet) bool {
return rs.Contains(r)
}
func contains(events []ast.LexBase, event ast.LexBase) bool {
for _, ev := range events {
if ev.Equal(event) {
return true
}
}
return false
}
func getEvents(items ...*item.Item) (events []ast.LexBase) {
for _, item := range items {
event := item.Symbol()
if event != nil {
if !contains(events, event.(ast.LexBase)) {
events = append(events, event.(ast.LexBase))
}
}
}
return
}
func noneOf(r rune, rs *runeset.RuneSet) bool {
return !rs.Contains(r)
}
func sortEvents(events []ast.LexBase) {
sort.Slice(events, func(i, j int) bool {
switch e1 := events[i].(type) {
case *ast.Any:
return false
case *ast.AnyOf:
switch e2 := events[j].(type) {
case *ast.Any:
return true
case *ast.AnyOf:
return e1.Set.Subset(e2.Set)
case *ast.CharLiteral:
return e1.Set.Contains(e2.Char())
case *ast.Not:
return e1.Set.Subset(e2.Set)
case *ast.UnicodeClass:
unicodeClassContains(e2, e1.Set.Elements()...)
default:
panic("Invalid")
}
case *ast.CharLiteral:
switch e2 := events[j].(type) {
case *ast.Any:
return true
case *ast.AnyOf:
return true
case *ast.CharLiteral:
return e1.Char() < e2.Char()
case *ast.Not:
return true
case *ast.UnicodeClass:
return true
default:
panic("Invalid")
}
case *ast.Not:
switch e2 := events[j].(type) {
case *ast.Any:
return true
case *ast.AnyOf:
return false
case *ast.CharLiteral:
return false
case *ast.Not:
return e1.Set.Subset(e2.Set)
case *ast.UnicodeClass:
return false
default:
panic("Invalid")
}
case *ast.UnicodeClass:
switch e2 := events[j].(type) {
case *ast.Any:
return true
case *ast.AnyOf:
return false
case *ast.CharLiteral:
return false
case *ast.Not:
return !unicodeClassContains(e1, e2.Set.Elements()...)
case *ast.UnicodeClass:
return e2.Type == ast.Letter && (e1.Type == ast.Lowcase || e1.Type == ast.Upcase)
default:
panic("Invalid")
}
default:
panic("Invalid")
}
panic("Missing return")
})
}
func toTriState(b bool) TriState {
if b {
return True
}
return False
}
func unicodeClassContains(c *ast.UnicodeClass, rs ...rune) bool {
for _, r := range rs {
ok := false
switch c.Type {
case ast.Letter:
ok = unicode.IsLetter(r)
case ast.Upcase:
ok = unicode.IsUpper(r)
case ast.Lowcase:
ok = unicode.IsLower(r)
case ast.Number:
ok = unicode.IsNumber(r)
case ast.Space:
ok = unicode.IsSpace(r)
default:
panic("Invalid")
}
if !ok {
return false
}
}
return true
}
func fail(items []*item.Item, incomatibleEvents []eventPair) {
fmt.Println("Error in lexer events")
fmt.Println(" Set:")
for _, item := range items {
fmt.Println(" ", item)
}
fmt.Println(" Incompatible events:")
for _, ee := range incomatibleEvents {
fmt.Println(" ", ee.a, " ", ee.b)
}
os.Exit(1)
} | lex/items/event/event.go | 0.672762 | 0.441131 | event.go | starcoder |
package xrandr
// Rotation values.
const (
RotationNormal Rotation = iota
RotationLeft
RotationInverted
RotationRight
)
// Rotation represents the rotation status of a CRTC, which is sent to an output.
type Rotation int
// String returns this Rotation as a string, ready for use with the xrandr command.
func (r Rotation) String() string {
switch r {
default:
fallthrough
case RotationNormal:
return "normal"
case RotationLeft:
return "left"
case RotationInverted:
return "inverted"
case RotationRight:
return "right"
}
}
// Reflection values.
const (
ReflectionNormal Reflection = iota
ReflectionX
ReflectionY
)
// Reflection represents the reflection status of a CRTC, which is sent to an output.
type Reflection int
// String returns this Reflection as a string, ready for use with the xrandr command.
func (r Reflection) String() string {
switch r {
default:
fallthrough
case ReflectionNormal:
return "normal"
case ReflectionX:
return "x"
case ReflectionY:
return "y"
}
}
// Output represents an randr output, condensing the information we need into a simple struct.
type Output struct {
Name string `json:"name"`
IsConnected bool `json:"is_connected"`
IsEnabled bool `json:"is_enabled"`
IsPrimary bool `json:"is_primary"`
ModeName string `json:"mode_name"`
Width uint `json:"width_px"`
Height uint `json:"height_px"`
OffsetX int `json:"offset_x"`
OffsetY int `json:"offset_y"`
Rotation Rotation `json:"rotation"`
Reflection Reflection `json:"reflection"`
Properties Properties `json:"properties,omitempty"`
Modes []Mode `json:"modes"`
}
// Properties represents the properties of an output.
type Properties map[string][]byte
// Mode represents an randr mode, only including the information we need.
type Mode struct {
ID uint `json:"id"`
Name string `json:"name"`
Width uint `json:"width"`
Height uint `json:"height"`
IsPreferred bool `json:"is_preferred"`
} | xrandr/types.go | 0.841956 | 0.48688 | types.go | starcoder |
package gocommon
import (
"math"
"strconv"
)
// Rotli - Rotate left int
func Rotli(value int, count uint) int {
return (value << count) | (value >> (strconv.IntSize - count))
}
// Rotlu - Rotate left uint
func Rotlu(value uint, count uint) uint {
return (value << count) | (value >> (strconv.IntSize - count))
}
// Rotli32 - Rotate left int32
func Rotli32(value int32, count uint32) int32 {
return (value << count) | (value >> (32 - count))
}
// Rotlu32 - Rotate left uint32
func Rotlu32(value uint32, count uint32) uint32 {
return (value << count) | (value >> (32 - count))
}
// Rotli64 - Rotate left int64
func Rotli64(value int64, count uint32) int64 {
return (value << count) | (value >> (64 - count))
}
// Rotlu64 - Rotate left uint64
func Rotlu64(value uint64, count uint32) uint64 {
return (value << count) | (value >> (64 - count))
}
// Rotri - Rotate right int
func Rotri(value int, count uint) int {
return (value >> count) | (value << (strconv.IntSize - count))
}
// Rotru - Rotate right uint
func Rotru(value uint, count uint) uint {
return (value >> count) | (value << (strconv.IntSize - count))
}
// Rotri32 - Rotate right int32
func Rotri32(value int32, count uint32) int32 {
return (value >> count) | (value << (32 - count))
}
// Rotru32 - Rotate right uint32
func Rotru32(value uint32, count uint32) uint32 {
return (value >> count) | (value << (32 - count))
}
// Rotri64 - Rotate right int64
func Rotri64(value int64, count uint32) int64 {
return (value >> count) | (value << (64 - count))
}
// Rotru64 - Rotate right uint64
func Rotru64(value uint64, count uint32) uint64 {
return (value >> count) | (value << (64 - count))
}
// Decompf32 - Decompose a float32 value int two numbers. Given the following:
// 3.141592
// The return values will be:
// base = 3
// frac = 141592
func Decompf32(val float32) (base, frac int32) {
b, f := math.Modf(float64(val))
return int32(b), int32(f)
}
// Decompf64 - Decompose a float64 value int two numbers. Given the following:
// 3.141592
// The return values will be:
// base = 3
// frac = 141592
func Decompf64(val float64) (base, frac int64) {
b, f := math.Modf(val)
return int64(b), int64(f)
}
// Makef32 - Make a float32 variable from two numbers. Given the following:
// base = 3
// frac = 141592
// The return float64 value will be:
// 3.14159
func Makef32(base, frac int32) float32 {
b := float32(base)
f := float32(frac)
for f > 1.0 {
f /= 10.0
}
return b + f
}
// Makef64 - Make a float64 variable from two numbers. Given the following:
// base = 3
// frac = 141592
// The return float64 value will be:
// 3.14159
func Makef64(base, frac int64) float64 {
b := float64(base)
f := float64(frac)
for f > 1.0 {
f /= 10.0
}
return b + f
}
func getRangei16(minVal, maxVal int16) int16 {
rng := (maxVal - minVal)
if 0 == rng {
return 1
}
return rng
}
func getRangeu16(minVal, maxVal uint16) uint16 {
rng := (maxVal - minVal)
if 0 == rng {
return 1
}
return rng
}
func getRangei32(minVal, maxVal int32) int32 {
rng := (maxVal - minVal)
if 0 == rng {
return 1
}
return rng
}
func getRangeu32(minVal, maxVal uint32) uint32 {
rng := (maxVal - minVal)
if 0 == rng {
return 1
}
return rng
}
func getRangei64(minVal, maxVal int64) int64 {
rng := (maxVal - minVal)
if 0 == rng {
return 1
}
return rng
}
func getRangeu64(minVal, maxVal uint64) uint64 {
rng := (maxVal - minVal)
if 0 == rng {
return 1
}
return rng
}
func getRangef32(minVal, maxVal float32) float32 {
rng := (maxVal - minVal)
if 0 == rng {
return 1.0
}
return rng
}
func getRangef64(minVal, maxVal float64) float64 {
rng := (maxVal - minVal)
if 0 == rng {
return 1.0
}
return rng
}
// Mini - Return the smaller int of x or y
func Mini(x, y int) int {
if x < y {
return x
}
return y
}
// Minu - Return the smaller uint of x or y
func Minu(x, y uint) uint {
if x < y {
return x
}
return y
}
// Mini32 - Return the smaller int32 of x or y
func Mini32(x, y int32) int32 {
if x < y {
return x
}
return y
}
// Minu32 - Return the smaller uint32 of x or y
func Minu32(x, y uint32) uint32 {
if x < y {
return x
}
return y
}
// Mini64 - Return the smaller int64 of x or y
func Mini64(x, y int64) int64 {
if x < y {
return x
}
return y
}
// Minu64 - Return the smaller uint64 of x or y
func Minu64(x, y uint64) uint64 {
if x < y {
return x
}
return y
}
// Maxi - Return the larger int of x or y
func Maxi(x, y int) int {
if x > y {
return x
}
return y
}
// Maxu - Return the larger uint of x or y
func Maxu(x, y uint) uint {
if x > y {
return x
}
return y
}
// Maxi32 - Return the larger int32 of x or y
func Maxi32(x, y int32) int32 {
if x > y {
return x
}
return y
}
// Maxu32 - Return the larger uint32 of x or y
func Maxu32(x, y uint32) uint32 {
if x > y {
return x
}
return y
}
// Maxi64 - Return the larger int64 of x or y
func Maxi64(x, y int64) int64 {
if x > y {
return x
}
return y
}
// Maxu64 - Return the larger uint64 of x or y
func Maxu64(x, y uint64) uint64 {
if x > y {
return x
}
return y
} | mathutil.go | 0.826537 | 0.513363 | mathutil.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// Reminder
type Reminder struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// Identifies the version of the reminder. Every time the reminder is changed, changeKey changes as well. This allows Exchange to apply changes to the correct version of the object.
changeKey *string
// The date, time and time zone that the event ends.
eventEndTime DateTimeTimeZoneable
// The unique ID of the event. Read only.
eventId *string
// The location of the event.
eventLocation Locationable
// The date, time, and time zone that the event starts.
eventStartTime DateTimeTimeZoneable
// The text of the event's subject line.
eventSubject *string
// The URL to open the event in Outlook on the web.The event will open in the browser if you are logged in to your mailbox via Outlook on the web. You will be prompted to login if you are not already logged in with the browser.This URL cannot be accessed from within an iFrame.
eventWebLink *string
// The date, time, and time zone that the reminder is set to occur.
reminderFireTime DateTimeTimeZoneable
}
// NewReminder instantiates a new reminder and sets the default values.
func NewReminder()(*Reminder) {
m := &Reminder{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreateReminderFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateReminderFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewReminder(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *Reminder) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetChangeKey gets the changeKey property value. Identifies the version of the reminder. Every time the reminder is changed, changeKey changes as well. This allows Exchange to apply changes to the correct version of the object.
func (m *Reminder) GetChangeKey()(*string) {
if m == nil {
return nil
} else {
return m.changeKey
}
}
// GetEventEndTime gets the eventEndTime property value. The date, time and time zone that the event ends.
func (m *Reminder) GetEventEndTime()(DateTimeTimeZoneable) {
if m == nil {
return nil
} else {
return m.eventEndTime
}
}
// GetEventId gets the eventId property value. The unique ID of the event. Read only.
func (m *Reminder) GetEventId()(*string) {
if m == nil {
return nil
} else {
return m.eventId
}
}
// GetEventLocation gets the eventLocation property value. The location of the event.
func (m *Reminder) GetEventLocation()(Locationable) {
if m == nil {
return nil
} else {
return m.eventLocation
}
}
// GetEventStartTime gets the eventStartTime property value. The date, time, and time zone that the event starts.
func (m *Reminder) GetEventStartTime()(DateTimeTimeZoneable) {
if m == nil {
return nil
} else {
return m.eventStartTime
}
}
// GetEventSubject gets the eventSubject property value. The text of the event's subject line.
func (m *Reminder) GetEventSubject()(*string) {
if m == nil {
return nil
} else {
return m.eventSubject
}
}
// GetEventWebLink gets the eventWebLink property value. The URL to open the event in Outlook on the web.The event will open in the browser if you are logged in to your mailbox via Outlook on the web. You will be prompted to login if you are not already logged in with the browser.This URL cannot be accessed from within an iFrame.
func (m *Reminder) GetEventWebLink()(*string) {
if m == nil {
return nil
} else {
return m.eventWebLink
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *Reminder) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["changeKey"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetChangeKey(val)
}
return nil
}
res["eventEndTime"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateDateTimeTimeZoneFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetEventEndTime(val.(DateTimeTimeZoneable))
}
return nil
}
res["eventId"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetEventId(val)
}
return nil
}
res["eventLocation"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateLocationFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetEventLocation(val.(Locationable))
}
return nil
}
res["eventStartTime"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateDateTimeTimeZoneFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetEventStartTime(val.(DateTimeTimeZoneable))
}
return nil
}
res["eventSubject"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetEventSubject(val)
}
return nil
}
res["eventWebLink"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetEventWebLink(val)
}
return nil
}
res["reminderFireTime"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateDateTimeTimeZoneFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetReminderFireTime(val.(DateTimeTimeZoneable))
}
return nil
}
return res
}
// GetReminderFireTime gets the reminderFireTime property value. The date, time, and time zone that the reminder is set to occur.
func (m *Reminder) GetReminderFireTime()(DateTimeTimeZoneable) {
if m == nil {
return nil
} else {
return m.reminderFireTime
}
}
// Serialize serializes information the current object
func (m *Reminder) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteStringValue("changeKey", m.GetChangeKey())
if err != nil {
return err
}
}
{
err := writer.WriteObjectValue("eventEndTime", m.GetEventEndTime())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("eventId", m.GetEventId())
if err != nil {
return err
}
}
{
err := writer.WriteObjectValue("eventLocation", m.GetEventLocation())
if err != nil {
return err
}
}
{
err := writer.WriteObjectValue("eventStartTime", m.GetEventStartTime())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("eventSubject", m.GetEventSubject())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("eventWebLink", m.GetEventWebLink())
if err != nil {
return err
}
}
{
err := writer.WriteObjectValue("reminderFireTime", m.GetReminderFireTime())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *Reminder) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetChangeKey sets the changeKey property value. Identifies the version of the reminder. Every time the reminder is changed, changeKey changes as well. This allows Exchange to apply changes to the correct version of the object.
func (m *Reminder) SetChangeKey(value *string)() {
if m != nil {
m.changeKey = value
}
}
// SetEventEndTime sets the eventEndTime property value. The date, time and time zone that the event ends.
func (m *Reminder) SetEventEndTime(value DateTimeTimeZoneable)() {
if m != nil {
m.eventEndTime = value
}
}
// SetEventId sets the eventId property value. The unique ID of the event. Read only.
func (m *Reminder) SetEventId(value *string)() {
if m != nil {
m.eventId = value
}
}
// SetEventLocation sets the eventLocation property value. The location of the event.
func (m *Reminder) SetEventLocation(value Locationable)() {
if m != nil {
m.eventLocation = value
}
}
// SetEventStartTime sets the eventStartTime property value. The date, time, and time zone that the event starts.
func (m *Reminder) SetEventStartTime(value DateTimeTimeZoneable)() {
if m != nil {
m.eventStartTime = value
}
}
// SetEventSubject sets the eventSubject property value. The text of the event's subject line.
func (m *Reminder) SetEventSubject(value *string)() {
if m != nil {
m.eventSubject = value
}
}
// SetEventWebLink sets the eventWebLink property value. The URL to open the event in Outlook on the web.The event will open in the browser if you are logged in to your mailbox via Outlook on the web. You will be prompted to login if you are not already logged in with the browser.This URL cannot be accessed from within an iFrame.
func (m *Reminder) SetEventWebLink(value *string)() {
if m != nil {
m.eventWebLink = value
}
}
// SetReminderFireTime sets the reminderFireTime property value. The date, time, and time zone that the reminder is set to occur.
func (m *Reminder) SetReminderFireTime(value DateTimeTimeZoneable)() {
if m != nil {
m.reminderFireTime = value
}
} | models/reminder.go | 0.604165 | 0.466299 | reminder.go | starcoder |
package compare
import (
"fmt"
"reflect"
"github.com/golang/protobuf/proto"
)
// Action is the optional return value type of functions passes to
// Register and Custom.Register.
type Action int
const (
// Done is returned by custom comparison functions when the two objects
// require no further comparisons.
Done Action = iota
// Fallback is returned by custom comparison functions when the comparison of
// the two objects should continue with the fallback comparison method.
Fallback
)
type customKey struct {
reference reflect.Type
value reflect.Type
}
var (
globalCustom = &Custom{}
comparatorType = reflect.TypeOf(Comparator{})
actionType = reflect.TypeOf(Done)
protoType = reflect.TypeOf((*proto.Message)(nil)).Elem()
protoComparator = reflect.ValueOf(compareProtos)
)
// Custom is a collection of custom comparators that will be used instead of
// the default comparison methods when comparing objects of the registered types.
type Custom struct {
funcs map[customKey]reflect.Value
}
// Register assigns the function f with signature func(comparator, T, T) to
// be used as the comparator for instances of type T when using
// Custom.Compare(). f may return nothing or a CompareAction.
// Register will panic if f does not match the expected signature, or if a
// comparator for type T has already been registered with this Custom.
func (c *Custom) Register(f interface{}) {
v := reflect.ValueOf(f)
t := v.Type()
if t.Kind() != reflect.Func {
panic(fmt.Sprintf("Invalid function %v", t))
}
if t.NumIn() != 3 {
panic(fmt.Sprintf("Compare functions must have 3 args, got %v", t))
}
if t.In(0) != comparatorType {
panic(fmt.Sprintf("First argument must be compare.Comparator, got %v", t.In(0)))
}
if !(t.NumOut() == 0 || (t.NumOut() == 1 && t.Out(0) == actionType)) {
panic(fmt.Sprintf("Compare functions must either have no return values or a single Action"))
}
key := customKey{t.In(1), t.In(2)}
if key.reference != key.value {
panic(fmt.Sprintf("Comparison arguments must be of the same type, got %v and %v", key.reference, key.value))
}
if c.funcs == nil {
c.funcs = map[customKey]reflect.Value{}
} else if _, found := c.funcs[key]; found {
panic(fmt.Sprintf("%v to %v already registered", key.reference, key.value))
}
c.funcs[key] = v
}
// Compare delivers all the differences it finds to the specified Handler.
// Compare uses the list of custom comparison handlers registered with
// Custom.Register(), falling back to the default comparison method for the type
// when no custom comparison function has been registered with this custom.
// If the reference and value are equal, the handler will never be invoked.
func (c *Custom) Compare(reference, value interface{}, handler Handler) {
compare(reference, value, handler, c)
}
// Diff returns the differences between the reference and the value.
// Diff uses the list of custom comparison handlers registered with
// Custom.Register(), falling back to the default comparison method for the type
// when no custom comparison function has been registered with this custom.
// The maximum number of differences is controlled by limit, which must be >0.
// If they compare equal, the length of the returned slice will be 0.
func (c *Custom) Diff(reference, value interface{}, limit int) []Path {
diffs := make(collect, 0, limit)
c.Compare(reference, value, diffs.add)
return ([]Path)(diffs)
}
func (c *Custom) call(key customKey, args []reflect.Value) Action {
if c == nil {
return Fallback
}
comparator, found := c.funcs[key]
if !found {
if !key.reference.Implements(protoType) {
return c.fallback().call(key, args)
}
comparator = protoComparator
}
action := Done
if res := comparator.Call(args); len(res) > 0 {
action = res[0].Interface().(Action)
}
switch action {
case Done:
return Done
case Fallback:
return c.fallback().call(key, args)
default:
panic(fmt.Errorf("Unknown action %v", action))
}
}
func (c *Custom) fallback() *Custom {
if c == globalCustom {
return nil
}
return globalCustom
}
func compareProtos(c Comparator, reference proto.Message, value interface{}) {
if v, ok := value.(proto.Message); !ok || !proto.Equal(reference, v) {
c.AddDiff(reference, value)
}
} | core/data/compare/custom.go | 0.69233 | 0.414247 | custom.go | starcoder |
package cy
import "github.com/rannoch/cldr"
var calendar = cldr.Calendar{
Formats: cldr.CalendarFormats{
Date: cldr.CalendarDateFormat{Full: "EEEE, d MMMM y", Long: "d MMMM y", Medium: "d MMM y", Short: "dd/MM/yy"},
Time: cldr.CalendarDateFormat{Full: "HH:mm:ss zzzz", Long: "HH:mm:ss z", Medium: "HH:mm:ss", Short: "HH:mm"},
DateTime: cldr.CalendarDateFormat{Full: "{1} 'am' {0}", Long: "{1} 'am' {0}", Medium: "{1} {0}", Short: "{1} {0}"},
},
FormatNames: cldr.CalendarFormatNames{
Months: cldr.CalendarMonthFormatNames{
Abbreviated: cldr.CalendarMonthFormatNameValue{Jan: "Ion", Feb: "Chw", Mar: "Maw", Apr: "Ebr", May: "Mai", Jun: "Meh", Jul: "Gor", Aug: "Awst", Sep: "Medi", Oct: "Hyd", Nov: "Tach", Dec: "Rhag"},
Narrow: cldr.CalendarMonthFormatNameValue{Jan: "I", Feb: "Ch", Mar: "M", Apr: "E", May: "M", Jun: "M", Jul: "G", Aug: "A", Sep: "M", Oct: "H", Nov: "T", Dec: "Rh"},
Short: cldr.CalendarMonthFormatNameValue{},
Wide: cldr.CalendarMonthFormatNameValue{Jan: "Ionawr", Feb: "Chwefror", Mar: "Mawrth", Apr: "Ebrill", May: "Mai", Jun: "Mehefin", Jul: "Gorffennaf", Aug: "Awst", Sep: "Medi", Oct: "Hydref", Nov: "Tachwedd", Dec: "Rhagfyr"},
},
Days: cldr.CalendarDayFormatNames{
Abbreviated: cldr.CalendarDayFormatNameValue{Sun: "Sul", Mon: "Llun", Tue: "Maw", Wed: "Mer", Thu: "Iau", Fri: "Gwe", Sat: "Sad"},
Narrow: cldr.CalendarDayFormatNameValue{Sun: "S", Mon: "Ll", Tue: "M", Wed: "M", Thu: "I", Fri: "G", Sat: "S"},
Short: cldr.CalendarDayFormatNameValue{Sun: "Su", Mon: "Ll", Tue: "Ma", Wed: "Me", Thu: "Ia", Fri: "Gw", Sat: "Sa"},
Wide: cldr.CalendarDayFormatNameValue{Sun: "Dydd Sul", Mon: "Dydd Llun", Tue: "Dydd Mawrth", Wed: "Dydd Mercher", Thu: "Dydd Iau", Fri: "Dydd Gwener", Sat: "Dydd Sadwrn"},
},
Periods: cldr.CalendarPeriodFormatNames{
Abbreviated: cldr.CalendarPeriodFormatNameValue{},
Narrow: cldr.CalendarPeriodFormatNameValue{AM: "AM", PM: "PM"},
Short: cldr.CalendarPeriodFormatNameValue{},
Wide: cldr.CalendarPeriodFormatNameValue{AM: "AM", PM: "PM"},
},
},
} | resources/locales/cy/calendar.go | 0.512449 | 0.446555 | calendar.go | starcoder |
package arc
import (
"context"
"time"
"chromiumos/tast/local/arc"
"chromiumos/tast/local/bundles/cros/arc/motioninput"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/uiauto/mouse"
"chromiumos/tast/local/coords"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
// mouseInputParams holds a collection of tests to run in the given test setup.
type mouseInputParams struct {
tests []motioninput.WMTestParams
}
func init() {
testing.AddTest(&testing.Test{
Func: MouseInput,
Desc: "Verifies mouse input in various window states on Android",
Contacts: []string{"<EMAIL>", "<EMAIL>"},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"chrome", "android_vm"},
Fixture: "arcBooted",
Timeout: 10 * time.Minute,
Params: []testing.Param{{
Name: "tablet",
ExtraHardwareDeps: hwdep.D(hwdep.InternalDisplay(), hwdep.TouchScreen()),
Val: mouseInputParams{[]motioninput.WMTestParams{
{
Name: "Tablet",
TabletMode: true,
WmEventToSend: nil,
}, {
Name: "Tablet Snapped Left",
TabletMode: true,
WmEventToSend: ash.WMEventSnapLeft,
}, {
Name: "Tablet Snapped Right",
TabletMode: true,
WmEventToSend: ash.WMEventSnapRight,
},
}},
}, {
Name: "clamshell",
Val: mouseInputParams{[]motioninput.WMTestParams{
{
Name: "Clamshell Normal",
TabletMode: false,
WmEventToSend: ash.WMEventNormal,
}, {
Name: "Clamshell Fullscreen",
TabletMode: false,
WmEventToSend: ash.WMEventFullscreen,
}, {
Name: "Clamshell Maximized",
TabletMode: false,
WmEventToSend: ash.WMEventMaximize,
},
}},
}},
})
}
// MouseInput runs several sub-tests, where each sub-test sets up the Chrome WM environment as
// specified by the motioninput.WMTestParams. Each sub-test installs and runs the test application
// (ArcMotionInputTest.apk), injects various input events into ChromeOS through the test API,
// and verifies that those events were received by the Android application in the expected screen
// locations.
func MouseInput(ctx context.Context, s *testing.State) {
p := s.FixtValue().(*arc.PreData)
cr := p.Chrome
a := p.ARC
d := p.UIDevice
testParams := s.Param().(mouseInputParams)
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to create test API connection: ", err)
}
if err := a.Install(ctx, arc.APKPath(motioninput.APK)); err != nil {
s.Fatal("Failed installing ", motioninput.APK, ": ", err)
}
for _, params := range testParams.tests {
s.Run(ctx, params.Name+": Verify Mouse", func(ctx context.Context, s *testing.State) {
motioninput.RunTestWithWMParams(ctx, s, tconn, d, a, ¶ms, verifyMouse)
})
}
}
// mouseMatcher returns a motionEventMatcher that matches events from a Mouse device.
func mouseMatcher(a motioninput.Action, p coords.Point) motioninput.Matcher {
pressure := 0.
if a == motioninput.ActionMove || a == motioninput.ActionDown || a == motioninput.ActionButtonPress || a == motioninput.ActionHoverExit {
pressure = 1.
}
return motioninput.SinglePointerMatcher(a, motioninput.SourceMouse, p, pressure)
}
// initialEventMatcher returns a motionEventMatcher that matches the first mouse event
// that should be received by an app.
func initialEventMatcher(p coords.Point) motioninput.Matcher {
return motioninput.MatcherOr(mouseMatcher(motioninput.ActionHoverEnter, p), mouseMatcher(motioninput.ActionHoverMove, p))
}
// verifyMouse tests the behavior of mouse events injected into Ash on Android apps. It tests hover,
// button, and drag events. It does not use the uinput mouse to inject events because the scale
// relation between the relative movements injected by a relative mouse device and the display
// pixels is determined by ChromeOS and could vary between devices.
func verifyMouse(ctx context.Context, s *testing.State, tconn *chrome.TestConn, t *motioninput.WMTestState, tester *motioninput.Tester) {
s.Log("Verifying Mouse")
// The sequence of events reported for ARC++ P and newer versions differ, and this test
// takes those into account. In particular, P reports HOVER_EXIT and HOVER_ENTER before
// and after button down and up respectively, which newer versions do not report.
version, err := arc.SDKVersion()
if err != nil {
s.Fatal("Failed to get ARC SDK version: ", err)
}
p := t.CenterOfWindow()
e := t.ExpectedPoint(p)
s.Log("Injected initial move, waiting... ")
if err := mouse.Move(tconn, p, 0)(ctx); err != nil {
s.Fatalf("Failed to inject move at %v: %v", e, err)
}
if err := tester.WaitUntilEvent(ctx, initialEventMatcher(e)); err != nil {
s.Fatal("Failed to wait for the initial hover event: ", err)
}
if err := tester.ClearMotionEvents(ctx); err != nil {
s.Fatal("Failed to clear events: ", err)
}
// numMouseMoveIterations is the number of times certain motion events should be repeated in
// a test. Increasing this number will increase the time it takes to run the test.
const numMouseMoveIterations = 1
// deltaDP is the amount we want to move the mouse pointer between each successive injected
// event. We use an arbitrary value that is not too large so that we can safely assume that
// the injected events stay within the bounds of the application in the various WM states, so
// that clicks performed after moving the mouse are still inside the application.
const deltaDP = 5
for i := 0; i < numMouseMoveIterations; i++ {
p.X += deltaDP
p.Y += deltaDP
e = t.ExpectedPoint(p)
s.Log("Verifying mouse move event at ", e)
if err := mouse.Move(tconn, p, 0)(ctx); err != nil {
s.Fatalf("Failed to inject move at %v: %v", e, err)
}
if err := tester.ExpectEventsAndClear(ctx, mouseMatcher(motioninput.ActionHoverMove, e)); err != nil {
s.Fatal("Failed to expect events and clear: ", err)
}
}
if err := mouse.Press(tconn, mouse.LeftButton)(ctx); err != nil {
s.Fatal("Failed to press button on mouse: ", err)
}
var pressEvents []motioninput.Matcher
if version <= arc.SDKP {
pressEvents = append(pressEvents, mouseMatcher(motioninput.ActionHoverExit, e))
}
pressEvents = append(pressEvents, mouseMatcher(motioninput.ActionDown, e), mouseMatcher(motioninput.ActionButtonPress, e))
if err := tester.ExpectEventsAndClear(ctx, pressEvents...); err != nil {
s.Fatal("Failed to expect events and clear: ", err)
}
for i := 0; i < numMouseMoveIterations; i++ {
p.X -= deltaDP
p.Y -= deltaDP
e = t.ExpectedPoint(p)
s.Log("Verifying mouse move event at ", e)
if err := mouse.Move(tconn, p, 0)(ctx); err != nil {
s.Fatalf("Failed to inject move at %v: %v", e, err)
}
if err := tester.ExpectEventsAndClear(ctx, mouseMatcher(motioninput.ActionMove, e)); err != nil {
s.Fatal("Failed to expect events and clear: ", err)
}
}
if err := mouse.Release(tconn, mouse.LeftButton)(ctx); err != nil {
s.Fatal("Failed to release mouse button: ", err)
}
var releaseEvents []motioninput.Matcher
releaseEvents = append(releaseEvents, mouseMatcher(motioninput.ActionButtonRelease, e), mouseMatcher(motioninput.ActionUp, e))
if version > arc.SDKP {
releaseEvents = append(releaseEvents, mouseMatcher(motioninput.ActionHoverMove, e))
}
if err := tester.ExpectEventsAndClear(ctx, releaseEvents...); err != nil {
s.Fatal("Failed to expect events and clear: ", err)
}
p.X -= deltaDP
p.Y -= deltaDP
e = t.ExpectedPoint(p)
if err := mouse.Move(tconn, p, 0)(ctx); err != nil {
s.Fatalf("Failed to inject move at %v: %v", e, err)
}
var moveEvents []motioninput.Matcher
if version <= arc.SDKP {
moveEvents = append(moveEvents, mouseMatcher(motioninput.ActionHoverEnter, e))
}
moveEvents = append(moveEvents, mouseMatcher(motioninput.ActionHoverMove, e))
if err := tester.ExpectEventsAndClear(ctx, moveEvents...); err != nil {
s.Fatal("Failed to expect events and clear: ", err)
}
} | src/chromiumos/tast/local/bundles/cros/arc/mouse_input.go | 0.560614 | 0.413418 | mouse_input.go | starcoder |
Common 3D shapes.
*/
//-----------------------------------------------------------------------------
package sdf
import (
"errors"
"fmt"
"math"
)
//-----------------------------------------------------------------------------
// CounterBoredHole3D returns the SDF3 for a counterbored hole.
func CounterBoredHole3D(
l float64, // total length
r float64, // hole radius
cbRadius float64, // counter bore radius
cbDepth float64, // counter bore depth
) SDF3 {
s0 := Cylinder3D(l, r, 0)
s1 := Cylinder3D(cbDepth, cbRadius, 0)
s1 = Transform3D(s1, Translate3d(V3{0, 0, (l - cbDepth) / 2}))
return Union3D(s0, s1)
}
// ChamferedHole3D returns the SDF3 for a chamfered hole (45 degrees).
func ChamferedHole3D(
l float64, // total length
r float64, // hole radius
chRadius float64, // chamfer radius
) SDF3 {
s0 := Cylinder3D(l, r, 0)
s1 := Cone3D(chRadius, r, r+chRadius, 0)
s1 = Transform3D(s1, Translate3d(V3{0, 0, (l - chRadius) / 2}))
return Union3D(s0, s1)
}
// CounterSunkHole3D returns the SDF3 for a countersunk hole (45 degrees).
func CounterSunkHole3D(
l float64, // total length
r float64, // hole radius
) SDF3 {
return ChamferedHole3D(l, r, r)
}
//-----------------------------------------------------------------------------
// HexHead3D returns the rounded hex head for a nut or bolt.
func HexHead3D(
r float64, // radius
h float64, // height
round string, // (t)top, (b)bottom, (tb)top/bottom
) SDF3 {
// basic hex body
cornerRound := r * 0.08
hex2d := Polygon2D(Nagon(6, r-cornerRound))
hex2d = Offset2D(hex2d, cornerRound)
hex3d := Extrude3D(hex2d, h)
// round out the top and/or bottom as required
if round != "" {
topRound := r * 1.6
d := r * math.Cos(DtoR(30))
sphere3d := Sphere3D(topRound)
zOfs := math.Sqrt(topRound*topRound-d*d) - h/2
if round == "t" || round == "tb" {
hex3d = Intersect3D(hex3d, Transform3D(sphere3d, Translate3d(V3{0, 0, -zOfs})))
}
if round == "b" || round == "tb" {
hex3d = Intersect3D(hex3d, Transform3D(sphere3d, Translate3d(V3{0, 0, zOfs})))
}
}
return hex3d
}
// KnurledHead3D returns a cylindrical knurled head.
func KnurledHead3D(
r float64, // radius
h float64, // height
pitch float64, // knurl pitch
) SDF3 {
theta := DtoR(45)
cylinderRound := r * 0.05
knurlH := pitch * math.Floor((h-cylinderRound)/pitch)
knurl3d := Knurl3D(knurlH, r, pitch, pitch*0.3, theta)
return Union3D(Cylinder3D(h, r, cylinderRound), knurl3d)
}
//-----------------------------------------------------------------------------
// KnurlProfile returns a 2D knurl profile.
func KnurlProfile(
radius float64, // radius of knurled cylinder
pitch float64, // pitch of the knurl
height float64, // height of the knurl
) SDF2 {
knurl := NewPolygon()
knurl.Add(pitch/2, 0)
knurl.Add(pitch/2, radius)
knurl.Add(0, radius+height)
knurl.Add(-pitch/2, radius)
knurl.Add(-pitch/2, 0)
//knurl.Render("knurl.dxf")
return Polygon2D(knurl.Vertices())
}
// Knurl3D returns a knurled cylinder.
func Knurl3D(
length float64, // length of cylinder
radius float64, // radius of cylinder
pitch float64, // knurl pitch
height float64, // knurl height
theta float64, // knurl helix angle
) SDF3 {
// A knurl is the the intersection of left and right hand
// multistart "threads". Work out the number of starts using
// the desired helix angle.
n := int(Tau * radius * math.Tan(theta) / pitch)
// build the knurl profile.
knurl2d := KnurlProfile(radius, pitch, height)
// create the left/right hand spirals
knurl0_3d := Screw3D(knurl2d, length, pitch, n)
knurl1_3d := Screw3D(knurl2d, length, pitch, -n)
return Intersect3D(knurl0_3d, knurl1_3d)
}
//-----------------------------------------------------------------------------
// WasherParms defines the parameters for a washer.
type WasherParms struct {
Thickness float64 // thickness
InnerRadius float64 // inner radius
OuterRadius float64 // outer radius
Remove float64 // fraction of complete washer removed
}
// Washer3D returns a washer.
// This is also used to create circular walls.
func Washer3D(k *WasherParms) SDF3 {
if k.Thickness <= 0 {
panic("Thickness <= 0")
}
if k.InnerRadius >= k.OuterRadius {
panic("InnerRadius >= OuterRadius")
}
if k.Remove < 0 || k.Remove >= 1.0 {
panic("Remove must be [0..1)")
}
var s SDF3
if k.Remove == 0 {
// difference of cylinders
outer := Cylinder3D(k.Thickness, k.OuterRadius, 0)
inner := Cylinder3D(k.Thickness, k.InnerRadius, 0)
s = Difference3D(outer, inner)
} else {
// build a 2d profile box
dx := k.OuterRadius - k.InnerRadius
dy := k.Thickness
xofs := 0.5 * (k.InnerRadius + k.OuterRadius)
b := Box2D(V2{dx, dy}, 0)
b = Transform2D(b, Translate2d(V2{xofs, 0}))
// rotate about the z-axis
theta := Tau * (1.0 - k.Remove)
s = RevolveTheta3D(b, theta)
// center the removed portion on the x-axis
dtheta := 0.5 * (Tau - theta)
s = Transform3D(s, RotateZ(dtheta))
}
return s
}
//-----------------------------------------------------------------------------
// Board standoffs
// StandoffParms defines the parameters for a board standoff pillar.
type StandoffParms struct {
PillarHeight float64
PillarDiameter float64
HoleDepth float64 // > 0 is a hole, < 0 is a support stub
HoleDiameter float64
NumberWebs int // number of triangular gussets around the standoff base
WebHeight float64
WebDiameter float64
WebWidth float64
}
// single web
func pillarWeb(k *StandoffParms) SDF3 {
w := NewPolygon()
w.Add(0, 0)
w.Add(0.5*k.WebDiameter, 0)
w.Add(0, k.WebHeight)
s := Extrude3D(Polygon2D(w.Vertices()), k.WebWidth)
m := Translate3d(V3{0, 0, -0.5 * k.PillarHeight}).Mul(RotateX(DtoR(90.0)))
return Transform3D(s, m)
}
// multiple webs
func pillarWebs(k *StandoffParms) SDF3 {
if k.NumberWebs == 0 {
return nil
}
return RotateCopy3D(pillarWeb(k), k.NumberWebs)
}
// pillar
func pillar(k *StandoffParms) SDF3 {
return Cylinder3D(k.PillarHeight, 0.5*k.PillarDiameter, 0)
}
// pillar hole
func pillarHole(k *StandoffParms) SDF3 {
if k.HoleDiameter == 0.0 || k.HoleDepth == 0.0 {
return nil
}
s := Cylinder3D(Abs(k.HoleDepth), 0.5*k.HoleDiameter, 0)
zOfs := 0.5 * (k.PillarHeight - k.HoleDepth)
return Transform3D(s, Translate3d(V3{0, 0, zOfs}))
}
// Standoff3D returns a single board standoff.
func Standoff3D(k *StandoffParms) SDF3 {
s0 := Union3D(pillar(k), pillarWebs(k))
if k.NumberWebs != 0 {
// Cut off any part of the webs that protrude from the top of the pillar
s0 = Intersect3D(s0, Cylinder3D(k.PillarHeight, k.WebDiameter, 0))
}
// Add the pillar hole/stub
if k.HoleDepth >= 0.0 {
// hole
s0 = Difference3D(s0, pillarHole(k))
} else {
// support stub
s0 = Union3D(s0, pillarHole(k))
}
return s0
}
//-----------------------------------------------------------------------------
// truncated rectangular pyramid (with rounded edges)
// TruncRectPyramidParms defines the parameters for a truncated rectangular pyramid.
type TruncRectPyramidParms struct {
Size V3 // size of truncated pyramid
BaseAngle float64 // base angle of pyramid (radians)
BaseRadius float64 // base corner radius
RoundRadius float64 // edge rounding radius
}
// TruncRectPyramid3D returns a truncated rectangular pyramid with rounded edges.
func TruncRectPyramid3D(k *TruncRectPyramidParms) SDF3 {
h := k.Size.Z
dr := h / math.Tan(k.BaseAngle)
rb := k.BaseRadius + dr
rt := Max(k.BaseRadius-dr, 0)
round := Min(0.5*rt, k.RoundRadius)
s := Cone3D(2.0*h, rb, rt, round)
wx := Max(k.Size.X-2.0*k.BaseRadius, 0)
wy := Max(k.Size.Y-2.0*k.BaseRadius, 0)
s = Elongate3D(s, V3{wx, wy, 0})
s = Cut3D(s, V3{0, 0, 0}, V3{0, 0, 1})
return s
}
//-----------------------------------------------------------------------------
// ChamferedCylinder intersects a chamfered cylinder with an SDF3.
func ChamferedCylinder(s SDF3, kb, kt float64) SDF3 {
// get the length and radius from the bounding box
l := s.BoundingBox().Max.Z
r := s.BoundingBox().Max.X
p := NewPolygon()
p.Add(0, -l)
p.Add(r, -l).Chamfer(r * kb)
p.Add(r, l).Chamfer(r * kt)
p.Add(0, l)
return Intersect3D(s, Revolve3D(Polygon2D(p.Vertices())))
}
//-----------------------------------------------------------------------------
// Simple Bolt for 3d printing.
// BoltParms defines the parameters for a bolt.
type BoltParms struct {
Thread string // name of thread
Style string // head style "hex" or "knurl"
Tolerance float64 // subtract from external thread radius
TotalLength float64 // threaded length + shank length
ShankLength float64 // non threaded length
}
// Bolt returns a simple bolt suitable for 3d printing.
func Bolt(k *BoltParms) (SDF3, error) {
// validate parameters
t, err := ThreadLookup(k.Thread)
if err != nil {
return nil, err
}
if k.TotalLength < 0 {
return nil, errors.New("total length < 0")
}
if k.ShankLength < 0 {
return nil, errors.New("shank length < 0")
}
if k.Tolerance < 0 {
return nil, errors.New("tolerance < 0")
}
// head
hr := t.HexRadius()
hh := t.HexHeight()
var head SDF3
switch k.Style {
case "hex":
head = HexHead3D(hr, hh, "b")
case "knurl":
head = KnurledHead3D(hr, hh, hr*0.25)
default:
return nil, fmt.Errorf("unknown style \"%s\"", k.Style)
}
// shank
shankLength := k.ShankLength + hh/2
shankOffset := shankLength / 2
shank := Cylinder3D(shankLength, t.Radius, hh*0.08)
shank = Transform3D(shank, Translate3d(V3{0, 0, shankOffset}))
// external thread
threadLength := k.TotalLength - k.ShankLength
if threadLength < 0 {
threadLength = 0
}
var thread SDF3
if threadLength != 0 {
r := t.Radius - k.Tolerance
threadOffset := threadLength/2 + shankLength
thread = Screw3D(ISOThread(r, t.Pitch, "external"), threadLength, t.Pitch, 1)
// chamfer the thread
thread = ChamferedCylinder(thread, 0, 0.5)
thread = Transform3D(thread, Translate3d(V3{0, 0, threadOffset}))
}
return Union3D(head, shank, thread), nil
}
//-----------------------------------------------------------------------------
// Simple Nut for 3d printing.
// NutParms defines the parameters for a nut.
type NutParms struct {
Thread string // name of thread
Style string // head style "hex" or "knurl"
Tolerance float64 // add to internal thread radius
}
// Nut returns a simple nut suitable for 3d printing.
func Nut(k *NutParms) (SDF3, error) {
// validate parameters
t, err := ThreadLookup(k.Thread)
if err != nil {
return nil, err
}
if k.Tolerance < 0 {
return nil, errors.New("tolerance < 0")
}
// nut body
var nut SDF3
nr := t.HexRadius()
nh := t.HexHeight()
switch k.Style {
case "hex":
nut = HexHead3D(nr, nh, "tb")
case "knurl":
nut = KnurledHead3D(nr, nh, nr*0.25)
default:
return nil, fmt.Errorf("unknown style \"%s\"", k.Style)
}
// internal thread
thread := Screw3D(ISOThread(t.Radius+k.Tolerance, t.Pitch, "internal"), nh, t.Pitch, 1)
return Difference3D(nut, thread), nil
}
//----------------------------------------------------------------------------- | sdf/shapes3.go | 0.89526 | 0.458591 | shapes3.go | starcoder |
package ckks
import (
"math/bits"
)
// PowerOf2 computes op^(2^logPow2), consuming logPow2 levels, and returns the result on opOut. Providing an evaluation
// key is necessary when logPow2 > 1.
func (eval *evaluator) PowerOf2(op *Ciphertext, logPow2 int, opOut *Ciphertext) {
if logPow2 == 0 {
if op != opOut {
opOut.Copy(op.El())
}
} else {
eval.MulRelin(op.El(), op.El(), opOut)
if err := eval.Rescale(opOut, eval.scale, opOut); err != nil {
panic(err)
}
for i := 1; i < logPow2; i++ {
eval.MulRelin(opOut.El(), opOut.El(), opOut)
if err := eval.Rescale(opOut, eval.scale, opOut); err != nil {
panic(err)
}
}
}
}
// PowerNew computes op^degree, consuming log(degree) levels, and returns the result on a new element. Providing an evaluation
// key is necessary when degree > 2.
func (eval *evaluator) PowerNew(op *Ciphertext, degree int) (opOut *Ciphertext) {
opOut = NewCiphertext(eval.params, 1, op.Level(), op.Scale())
eval.Power(op, degree, opOut)
return
}
// Power computes op^degree, consuming log(degree) levels, and returns the result on opOut. Providing an evaluation
// key is necessary when degree > 2.
func (eval *evaluator) Power(op *Ciphertext, degree int, opOut *Ciphertext) {
if degree < 1 {
panic("eval.Power -> degree cannot be smaller than 1")
}
tmpct0 := op.CopyNew()
var logDegree, po2Degree int
logDegree = bits.Len64(uint64(degree)) - 1
po2Degree = 1 << logDegree
eval.PowerOf2(tmpct0.Ciphertext(), logDegree, opOut)
degree -= po2Degree
for degree > 0 {
logDegree = bits.Len64(uint64(degree)) - 1
po2Degree = 1 << logDegree
tmp := NewCiphertext(eval.params, 1, tmpct0.Level(), tmpct0.Scale())
eval.PowerOf2(tmpct0.Ciphertext(), logDegree, tmp)
eval.MulRelin(opOut.El(), tmp.El(), opOut)
if err := eval.Rescale(opOut, eval.scale, opOut); err != nil {
panic(err)
}
degree -= po2Degree
}
}
// InverseNew computes 1/op and returns the result on a new element, iterating for n steps and consuming n levels. The algorithm requires the encrypted values to be in the range
// [-1.5 - 1.5i, 1.5 + 1.5i] or the result will be wrong. Each iteration increases the precision.
func (eval *evaluator) InverseNew(op *Ciphertext, steps int) (opOut *Ciphertext) {
cbar := eval.NegNew(op)
eval.AddConst(cbar, 1, cbar)
tmp := eval.AddConstNew(cbar, 1)
opOut = tmp.CopyNew().Ciphertext()
for i := 1; i < steps; i++ {
eval.MulRelin(cbar.El(), cbar.El(), cbar.Ciphertext())
if err := eval.Rescale(cbar, eval.scale, cbar); err != nil {
panic(err)
}
tmp = eval.AddConstNew(cbar, 1)
eval.MulRelin(tmp.El(), opOut.El(), tmp.Ciphertext())
if err := eval.Rescale(tmp, eval.scale, tmp); err != nil {
panic(err)
}
opOut = tmp.CopyNew().Ciphertext()
}
return opOut
} | ckks/algorithms.go | 0.676834 | 0.403508 | algorithms.go | starcoder |
package utils
// reference: https://github.com/mohae/deepcopy
import (
"bytes"
"encoding/gob"
"encoding/json"
"reflect"
)
func deepCopy(dst, src reflect.Value) {
switch src.Kind() {
case reflect.Interface:
value := src.Elem()
if !value.IsValid() {
return
}
newValue := reflect.New(value.Type()).Elem()
deepCopy(newValue, value)
dst.Set(newValue)
case reflect.Ptr:
value := src.Elem()
if !value.IsValid() {
return
}
dst.Set(reflect.New(value.Type()))
deepCopy(dst.Elem(), value)
case reflect.Map:
dst.Set(reflect.MakeMap(src.Type()))
keys := src.MapKeys()
for _, key := range keys {
value := src.MapIndex(key)
newValue := reflect.New(value.Type()).Elem()
deepCopy(newValue, value)
dst.SetMapIndex(key, newValue)
}
case reflect.Slice:
dst.Set(reflect.MakeSlice(src.Type(), src.Len(), src.Cap()))
for i := 0; i < src.Len(); i++ {
deepCopy(dst.Index(i), src.Index(i))
}
case reflect.Struct:
typeSrc := src.Type()
for i := 0; i < src.NumField(); i++ {
value := src.Field(i)
tag := typeSrc.Field(i).Tag
if value.CanSet() && tag.Get("deepcopy") != "-" {
deepCopy(dst.Field(i), value)
}
}
default:
dst.Set(src)
}
}
func DeepCoderCopy(dst, src interface{}) error {
var buf bytes.Buffer
if err := gob.NewEncoder(&buf).Encode(src); err != nil {
return err
}
return gob.NewDecoder(bytes.NewBuffer(buf.Bytes())).Decode(dst)
}
func DeepCopy(dst, src interface{}) {
typeDst := reflect.TypeOf(dst)
typeSrc := reflect.TypeOf(src)
if typeDst != typeSrc {
panic("DeepCopy: " + typeDst.String() + " != " + typeSrc.String())
}
if typeSrc.Kind() != reflect.Ptr {
panic("DeepCopy: pass arguments by address")
}
valueDst := reflect.ValueOf(dst).Elem()
valueSrc := reflect.ValueOf(src).Elem()
if !valueDst.IsValid() || !valueSrc.IsValid() {
panic("DeepCopy: invalid arguments")
}
deepCopy(valueDst, valueSrc)
}
func DeepClone(v interface{}) interface{} {
dst := reflect.New(reflect.TypeOf(v)).Elem()
deepCopy(dst, reflect.ValueOf(v))
return dst.Interface()
}
func CopyFloat32Map(source map[int32]float32) map[int32]float32 {
target := make(map[int32]float32)
for key, value := range source {
target[key] = value
}
return target
}
func CopyFloat64Map(source map[int32]float64) map[int32]float64 {
target := make(map[int32]float64)
for key, value := range source {
target[key] = value
}
return target
}
func CopyInt32Map(source map[int32]int32) map[int32]int32 {
target := make(map[int32]int32)
for key, value := range source {
target[key] = value
}
return target
}
func CopyInt64Map(source map[int32]int64) map[int32]int64 {
target := make(map[int32]int64)
for key, value := range source {
target[key] = value
}
return target
}
func MargeMap(a map[int32]int32, b map[int32]int32) {
for key, value := range b {
a[key] = value
}
}
func CopyMapInt32(a map[int32]int32) map[int32]int32 {
results := make(map[int32]int32)
for key, value := range a {
results[key] = value
}
return results
}
func CopyJSON(marshaler interface{}, unMarshaler interface{}) error {
data, error := json.Marshal(marshaler)
if error != nil {
return error
}
return json.Unmarshal(data, unMarshaler)
} | utils/clone.go | 0.582847 | 0.403302 | clone.go | starcoder |
package generate_sp
import (
"fmt"
"github.com/swamp/assembler/lib/assembler_sp"
decorated "github.com/swamp/compiler/src/decorated/expression"
dectype "github.com/swamp/compiler/src/decorated/types"
"github.com/swamp/opcodes/instruction_sp"
opcode_sp_type "github.com/swamp/opcodes/type"
)
func booleanToBinaryIntOperatorType(operatorType decorated.BooleanOperatorType) instruction_sp.BinaryOperatorType {
switch operatorType {
case decorated.BooleanEqual:
return instruction_sp.BinaryOperatorBooleanIntEqual
case decorated.BooleanNotEqual:
return instruction_sp.BinaryOperatorBooleanIntNotEqual
case decorated.BooleanLess:
return instruction_sp.BinaryOperatorBooleanIntLess
case decorated.BooleanLessOrEqual:
return instruction_sp.BinaryOperatorBooleanIntLessOrEqual
case decorated.BooleanGreater:
return instruction_sp.BinaryOperatorBooleanIntGreater
case decorated.BooleanGreaterOrEqual:
return instruction_sp.BinaryOperatorBooleanIntGreaterOrEqual
default:
panic(fmt.Errorf("not allowed int operator type"))
}
return 0
}
func booleanToBinaryEnumOperatorType(operatorType decorated.BooleanOperatorType) instruction_sp.BinaryOperatorType {
switch operatorType {
case decorated.BooleanEqual:
return instruction_sp.BinaryOperatorBooleanEnumEqual
case decorated.BooleanNotEqual:
return instruction_sp.BinaryOperatorBooleanEnumNotEqual
default:
panic(fmt.Errorf("not allowed enum operator type"))
}
}
func booleanToBinaryStringOperatorType(operatorType decorated.BooleanOperatorType) instruction_sp.BinaryOperatorType {
switch operatorType {
case decorated.BooleanEqual:
return instruction_sp.BinaryOperatorBooleanStringEqual
case decorated.BooleanNotEqual:
return instruction_sp.BinaryOperatorBooleanStringNotEqual
default:
panic(fmt.Errorf("not allowed string operator type"))
}
return 0
}
func booleanToBinaryBooleanOperatorType(operatorType decorated.BooleanOperatorType) instruction_sp.BinaryOperatorType {
switch operatorType {
case decorated.BooleanEqual:
return instruction_sp.BinaryOperatorBooleanBooleanEqual
case decorated.BooleanNotEqual:
return instruction_sp.BinaryOperatorBooleanBooleanNotEqual
default:
panic(fmt.Errorf("not allowed binary operator type"))
}
return 0
}
func generateBinaryOperatorBooleanResult(code *assembler_sp.Code, target assembler_sp.TargetStackPosRange, operator *decorated.BooleanOperator, genContext *generateContext) error {
leftVar, leftErr := generateExpressionWithSourceVar(code, operator.Left(), genContext, "bool-left")
if leftErr != nil {
return leftErr
}
rightVar, rightErr := generateExpressionWithSourceVar(code, operator.Right(), genContext, "bool-right")
if rightErr != nil {
return rightErr
}
filePosition := genContext.toFilePosition(operator.FetchPositionLength())
unaliasedTypeLeft := dectype.UnaliasWithResolveInvoker(operator.Left().Type())
foundPrimitive, _ := unaliasedTypeLeft.(*dectype.PrimitiveAtom)
if foundPrimitive == nil {
foundCustomType, _ := unaliasedTypeLeft.(*dectype.CustomTypeAtom)
if foundCustomType == nil {
panic(fmt.Errorf("not implemented binary operator boolean %v", unaliasedTypeLeft.HumanReadable()))
} else {
// unaliasedTypeRight := dectype.UnaliasWithResolveInvoker(operator.Right().Type())
opcodeBinaryOperator := booleanToBinaryEnumOperatorType(operator.OperatorType())
code.EnumBinaryOperator(target.Pos, leftVar.Pos, rightVar.Pos, opcodeBinaryOperator, filePosition)
// panic(fmt.Errorf("not implemented yet %v", unaliasedTypeRight))
}
} else if foundPrimitive.AtomName() == "String" {
opcodeBinaryOperator := booleanToBinaryStringOperatorType(operator.OperatorType())
code.StringBinaryOperator(target.Pos, leftVar.Pos, rightVar.Pos, opcodeBinaryOperator, filePosition)
} else if foundPrimitive.AtomName() == "Int" || foundPrimitive.AtomName() == "Char" || foundPrimitive.AtomName() == "Fixed" {
opcodeBinaryOperator := booleanToBinaryIntOperatorType(operator.OperatorType())
code.IntBinaryOperator(target.Pos, leftVar.Pos, rightVar.Pos, opcodeBinaryOperator, filePosition)
} else if foundPrimitive.AtomName() == "Bool" {
opcodeBinaryOperator := booleanToBinaryBooleanOperatorType(operator.OperatorType())
code.IntBinaryOperator(target.Pos, leftVar.Pos, rightVar.Pos, opcodeBinaryOperator, filePosition)
} else {
panic(fmt.Errorf("generate sp: what operator is this for %v", foundPrimitive.AtomName()))
}
return nil
}
func handleBinaryOperatorBooleanResult(code *assembler_sp.Code, operator *decorated.BooleanOperator, genContext *generateContext) (assembler_sp.SourceStackPosRange, error) {
target := genContext.context.stackMemory.Allocate(uint(opcode_sp_type.SizeofSwampBool), uint32(opcode_sp_type.AlignOfSwampBool), "booleanOperatorTarget")
if err := generateBinaryOperatorBooleanResult(code, target, operator, genContext); err != nil {
return assembler_sp.SourceStackPosRange{}, err
}
return targetToSourceStackPosRange(target), nil
} | src/generate_sp/binary_operator.go | 0.642769 | 0.446615 | binary_operator.go | starcoder |
package util
import (
"bytes"
"image"
"image/color"
"image/draw"
"image/png"
"os"
"path/filepath"
"strconv"
"github.com/anthonynsimon/bild/blend"
"github.com/lucasb-eyer/go-colorful"
)
var outfitColors = []string{
"FFFFFF", "FFD4BF", "FFE9BF", "FFFFBF", "E9FFBF", "D4FFBF",
"BFFFBF", "BFFFD4", "BFFFE9", "BFFFFF", "BFE9FF", "BFD4FF",
"BFBFFF", "D4BFFF", "E9BFFF", "FFBFFF", "FFBFE9", "FFBFD4",
"FFBFBF", "DADADA", "BF9F8F", "BFAF8F", "BFBF8F", "AFBF8F",
"9FBF8F", "8FBF8F", "8FBF9F", "8FBFAF", "8FBFBF", "8FAFBF",
"8F9FBF", "8F8FBF", "9F8FBF", "AF8FBF", "BF8FBF", "BF8FAF",
"BF8F9F", "BF8F8F", "B6B6B6", "BF7F5F", "BFAF8F", "BFBF5F",
"9FBF5F", "7FBF5F", "5FBF5F", "5FBF7F", "5FBF9F", "5FBFBF",
"5F9FBF", "5F7FBF", "5F5FBF", "7F5FBF", "9F5FBF", "BF5FBF",
"BF5F9F", "BF5F7F", "BF5F5F", "919191", "BF6A3F", "BF943F",
"BFBF3F", "94BF3F", "6ABF3F", "3FBF3F", "3FBF6A", "3FBF94",
"3FBFBF", "3F94BF", "3F6ABF", "3F3FBF", "6A3FBF", "943FBF",
"BF3FBF", "BF3F94", "BF3F6A", "BF3F3F", "6D6D6D", "FF5500",
"FFAA00", "FFFF00", "AAFF00", "54FF00", "00FF00", "00FF54",
"00FFAA", "00FFFF", "00A9FF", "0055FF", "0000FF", "5500FF",
"A900FF", "FE00FF", "FF00AA", "FF0055", "FF0000", "484848",
"BF3F00", "BF7F00", "BFBF00", "7FBF00", "3FBF00", "00BF00",
"00BF3F", "00BF7F", "00BFBF", "007FBF", "003FBF", "0000BF",
"3F00BF", "7F00BF", "BF00BF", "BF007F", "BF003F", "BF0000",
"242424", "7F2A00", "7F5500", "7F7F00", "557F00", "2A7F00",
"007F00", "007F2A", "007F55", "007F7F", "00547F", "002A7F",
"00007F", "2A007F", "54007F", "7F007F", "7F0055", "7F002A",
"7F0000",
}
// GenerateOutfitImage generates an outfit image for the given values
func GenerateOutfitImage(t, feet, legs, body, head, addons int) ([]byte, error) {
// Parse colors
feetColor, err := colorful.Hex("#" + outfitColors[feet])
if err != nil {
return nil, err
}
legsColor, err := colorful.Hex("#" + outfitColors[legs])
if err != nil {
return nil, err
}
bodyColor, err := colorful.Hex("#" + outfitColors[body])
if err != nil {
return nil, err
}
headColor, err := colorful.Hex("#" + outfitColors[head])
if err != nil {
return nil, err
}
// Create base image
baseImage, err := paintOutfitPart(
filepath.Join(
"public",
"images",
"outfits",
"generator",
strconv.Itoa(t),
"1_1_1_3.png",
),
filepath.Join(
"public",
"images",
"outfits",
"generator",
strconv.Itoa(t),
"1_1_1_3_template.png",
),
feetColor,
legsColor,
bodyColor,
headColor,
)
if err != nil {
return nil, err
}
// Get first addon
if addons >= 2 {
// Get addon outfit
addonImage, err := paintOutfitPart(
filepath.Join(
"public",
"images",
"outfits",
"generator",
strconv.Itoa(t),
"1_1_2_3.png",
),
filepath.Join(
"public",
"images",
"outfits",
"generator",
strconv.Itoa(t),
"1_1_2_3_template.png",
),
feetColor,
legsColor,
bodyColor,
headColor,
)
if err != nil {
return nil, err
}
// Draw outfit image over main image
draw.Draw(baseImage, baseImage.Bounds(), addonImage, image.ZP, draw.Over)
}
// Get second addon
if addons >= 3 {
// Get addon outfit
addonImage, err := paintOutfitPart(
filepath.Join(
"public",
"images",
"outfits",
"generator",
strconv.Itoa(t),
"1_1_3_3.png",
),
filepath.Join(
"public",
"images",
"outfits",
"generator",
strconv.Itoa(t),
"1_1_3_3_template.png",
),
feetColor,
legsColor,
bodyColor,
headColor,
)
if err != nil {
return nil, err
}
// Draw outfit image over main image
draw.Draw(baseImage, baseImage.Bounds(), addonImage, image.ZP, draw.Over)
}
// Image buffer
buff := &bytes.Buffer{}
// Encode image as png
if err := png.Encode(buff, baseImage); err != nil {
return nil, err
}
return buff.Bytes(), nil
}
func paintOutfitPart(generator, template string, feetColor, legsColor, bodyColor, headColor color.Color) (*image.RGBA, error) {
// Open generator image
generatorImageFile, err := os.Open(generator)
if err != nil {
return nil, err
}
// Close generator image
defer generatorImageFile.Close()
// Open template image
templateImageFile, err := os.Open(template)
if err != nil {
return nil, err
}
// Close template image
defer templateImageFile.Close()
// Get generator image from file
generatorImage, err := png.Decode(generatorImageFile)
if err != nil {
return nil, err
}
// Get template image from file
templateImageRaw, err := png.Decode(templateImageFile)
if err != nil {
return nil, err
}
// Convert raw image to RGBA
templateImage := templateImageRaw.(*image.NRGBA)
// Parse feet color
// Paint all pixels
paintPixels(templateImage, color.RGBA{0, 0, 255, 255}, feetColor)
paintPixels(templateImage, color.RGBA{0, 255, 0, 255}, legsColor)
paintPixels(templateImage, color.RGBA{255, 0, 0, 255}, bodyColor)
paintPixels(templateImage, color.RGBA{255, 255, 0, 255}, headColor)
// Use multiple blend mode to main image
outfitImage := blend.Multiply(templateImage, generatorImage)
return outfitImage, nil
}
// paintPixels paints colors from an image
func paintPixels(img *image.NRGBA, base color.Color, dst color.Color) {
br, bg, bb, ba := base.RGBA()
dr, dg, db, _ := dst.RGBA()
for x := 0; x < img.Bounds().Dx(); x++ {
for y := 0; y < img.Bounds().Dy(); y++ {
r, g, b, a := img.At(x, y).RGBA()
if br == r && bg == g && bb == b && ba == a {
img.Set(x, y, color.RGBA{uint8(dr), uint8(dg), uint8(db), 255})
}
}
}
} | app/util/outfit.go | 0.514156 | 0.435902 | outfit.go | starcoder |
package inverted
import (
"bytes"
"encoding/binary"
"fmt"
"math"
"github.com/pkg/errors"
)
// LexicographicallySortableFloat64 transforms a conversion to a
// lexicographically sortable byte slice. In general, for lexicographical
// sorting big endian notatino is required. Additionally the sign needs to be
// flipped in any case, but additionally each remaining byte also needs to be
// flipped if the number is negative
func LexicographicallySortableFloat64(in float64) ([]byte, error) {
buf := bytes.NewBuffer(nil)
err := binary.Write(buf, binary.BigEndian, in)
if err != nil {
return nil, errors.Wrap(err, "serialize float64 value as big endian")
}
var out []byte
if in >= 0 {
// on positive numbers only flip the sign
out = buf.Bytes()
firstByte := out[0] ^ 0x80
out = append([]byte{firstByte}, out[1:]...)
} else {
// on negative numbers flip every bit
out = make([]byte, 8)
for i, b := range buf.Bytes() {
out[i] = b ^ 0xFF
}
}
return out, nil
}
// ParseLexicographicallySortableFloat64 reverses the changes in
// LexicographicallySortableFloat64
func ParseLexicographicallySortableFloat64(in []byte) (float64, error) {
if len(in) != 8 {
return 0, fmt.Errorf("float64 must be 8 bytes long, got: %d", len(in))
}
flipped := make([]byte, 8)
if in[0]&0x80 == 0x80 {
// encoded as negative means it was originally positive, so we only need to
// flip the sign
flipped[0] = in[0] ^ 0x80
// the remainder can be copied
for i := 1; i < 8; i++ {
flipped[i] = in[i]
}
} else {
// encoded as positive means it was originally negative, so we need to flip
// everything
for i := 0; i < 8; i++ {
flipped[i] = in[i] ^ 0xFF
}
}
r := bytes.NewReader(flipped)
var value float64
err := binary.Read(r, binary.BigEndian, &value)
if err != nil {
return 0, errors.Wrap(err, "deserialize float64 value as big endian")
}
return value, nil
}
// LexicographicallySortableInt64 performs a conversion to a lexicographically
// sortable byte slice. Fro this, big endian notation is required and the sign
// must be flipped
func LexicographicallySortableInt64(in int64) ([]byte, error) {
buf := bytes.NewBuffer(nil)
asInt64 := int64(in)
// flip the sign
asInt64 = asInt64 ^ math.MinInt64
err := binary.Write(buf, binary.BigEndian, asInt64)
if err != nil {
return nil, errors.Wrap(err, "serialize int value as big endian")
}
return buf.Bytes(), nil
}
// ParseLexicographicallySortableInt64 reverses the changes in
// LexicographicallySortableInt64
func ParseLexicographicallySortableInt64(in []byte) (int64, error) {
if len(in) != 8 {
return 0, fmt.Errorf("int64 must be 8 bytes long, got: %d", len(in))
}
r := bytes.NewReader(in)
var value int64
err := binary.Read(r, binary.BigEndian, &value)
if err != nil {
return 0, errors.Wrap(err, "deserialize int64 value as big endian")
}
return value ^ math.MinInt64, nil
}
// LexicographicallySortableUint64 performs a conversion to a lexicographically
// sortable byte slice. For this, big endian notation is required.
func LexicographicallySortableUint64(in uint64) ([]byte, error) {
buf := bytes.NewBuffer(nil)
// no signs to flip as this is a uint
err := binary.Write(buf, binary.BigEndian, in)
if err != nil {
return nil, errors.Wrap(err, "serialize int value as big endian")
}
return buf.Bytes(), nil
}
// ParseLexicographicallySortableUint64 reverses the changes in
// LexicographicallySortableUint64
func ParseLexicographicallySortableUint64(in []byte) (uint64, error) {
if len(in) != 8 {
return 0, fmt.Errorf("uint64 must be 8 bytes long, got: %d", len(in))
}
r := bytes.NewReader(in)
var value uint64
err := binary.Read(r, binary.BigEndian, &value)
if err != nil {
return 0, errors.Wrap(err, "deserialize uint64 value as big endian")
}
return value, nil
} | adapters/repos/db/inverted/serialization.go | 0.727395 | 0.406243 | serialization.go | starcoder |
package rendering
import (
"github.com/llgcode/draw2d"
"github.com/llgcode/draw2d/draw2dimg"
"github.com/samlecuyer/ecumene/geom"
"github.com/samlecuyer/ecumene/mapping"
"github.com/samlecuyer/ecumene/query"
"github.com/samlecuyer/ecumene/util"
"code.google.com/p/sadbox/color"
"image"
"image/draw"
"log"
"math"
"reflect"
"sync"
)
type Renderer struct {
m *mapping.Map
width, height float64
bbox geom.Bbox
layers [][]geom.Shape
matrix draw2d.Matrix
sync.Mutex
}
type SubImage interface {
SubImage(r image.Rectangle) image.Image
}
func NewRenderer(m *mapping.Map, width, height int) *Renderer {
r := &Renderer{
m: m, width: float64(width), height: float64(height),
}
return r
}
func (r *Renderer) ClipTo(lng0, lat0, lng1, lat1 float64) {
r.bbox[0] = lng0
r.bbox[1] = lat0
r.bbox[2] = lng1
r.bbox[3] = lat1
log.Println("clipped to: ", r.bbox)
}
func (r *Renderer) ClipToMap() error {
b := r.m.Bounds()
x0, y0, _ := r.m.Srs.Forward(b[0], b[1])
x1, y1, _ := r.m.Srs.Forward(b[2], b[3])
r.bbox = geom.Bbox{x0, y0, x1, y1}
x0, y0, _ = r.m.Srs.Forward(b[0], b[3])
x1, y1, _ = r.m.Srs.Forward(b[2], b[1])
r.bbox = r.bbox.ExpandToFit(geom.Bbox{x0, y0, x1, y1})
x0, y0, _ = r.m.Srs.Forward(0, b[3])
x1, y1, _ = r.m.Srs.Forward(0, b[1])
r.bbox = r.bbox.ExpandToFit(geom.Bbox{x0, y0, x1, y1})
log.Println("clipped to: ", r.bbox)
return nil
}
func (r *Renderer) DrawToFile(filename string) error {
dest := r.Draw()
return draw2dimg.SaveToPngFile(filename, dest)
}
func (r *Renderer) DrawAdjustedToFile(filename string) error {
dest := r.Draw()
if subimage, ok := dest.(SubImage); ok {
bb := r.bbox
x0, y0, x1, y1 := int(bb[0]), int(bb[1]), int(bb[2]), int(bb[3])
dest = subimage.SubImage(image.Rect(x0, y0, x1, y1))
}
return draw2dimg.SaveToPngFile(filename, dest)
}
func (r *Renderer) Draw() image.Image {
pixelsX, pixelsY := int(r.width), int(r.height)
dest := image.NewRGBA(image.Rect(0, 0, pixelsX, pixelsY))
draw.Draw(dest, dest.Bounds(), &image.Uniform{r.m.BgColor}, image.ZP, draw.Src)
draw2d.SetFontFolder("/Library/Fonts/")
draw2d.SetFontNamer(func(fontData draw2d.FontData) string {
return fontData.Name + ".ttf"
})
gc := draw2dimg.NewGraphicContext(dest)
// gc.DPI = 300
gc.SetLineCap(draw2d.RoundCap)
gc.SetFillColor(r.m.BgColor)
gc.SetStrokeColor(r.m.Stroke)
gc.SetFontData(draw2d.FontData{Name: "Georgia", Family: draw2d.FontFamilySerif, Style: draw2d.FontStyleNormal})
dx := math.Abs(r.bbox[2] - r.bbox[0])
dy := math.Abs(r.bbox[3] - r.bbox[1])
pxf, pyf := float64(pixelsX), float64(pixelsY)
r1, r2 := (pxf / dx), (pyf / dy)
r0 := math.Min(r1, r2)
w, h := dx*r0, dy*r0
ox, oy := (pxf-w)/2, (pyf-h)/2
img_box := [4]float64{ox, oy, ox + w, oy + h}
r.matrix = draw2d.NewMatrixFromRects(r.bbox, img_box)
for _, layer := range r.m.Layers {
q := query.NewQuery(r.m.Bounds()).Select(layer.SourceQuery())
if ds := layer.LoadSource(); ds != nil {
defer ds.Close()
for shp := range ds.Query(q) {
var symbolizerType util.SymbolizerType
switch shp.(type) {
case geom.LineShape, geom.MultiLineShape:
symbolizerType = util.PathType
case geom.PolygonShape:
symbolizerType = util.PolygonType
}
for _, symbolizer := range r.findSymbolizers(layer, symbolizerType) {
symbolizer.Draw(gc, shp)
}
for _, symbolizer := range r.findSymbolizers(layer, util.TextType) {
symbolizer.Draw(gc, shp)
}
}
}
}
return dest
}
func (r *Renderer) graticule(gc draw2d.GraphicContext) {
b := r.m.Bounds()
d2r := math.Pi/180.
gc.SetFillColor(AlphaHex("#ce4251"))
// iterate over all the latitudes
padding := 20 * d2r
dxy := 0.001
for phi := b[1]; phi > b[3]; phi -= padding {
x, y, _ := r.m.Srs.Forward(b[0], phi)
x, y = r.matrix.TransformPoint(x, y)
gc.MoveTo(phi, b[0])
for lam := b[0] + dxy; lam < b[2]; lam += dxy {
x, y, _ = r.m.Srs.Forward(lam, phi)
x, y = r.matrix.TransformPoint(x, y)
gc.LineTo(x, y)
}
gc.Stroke()
}
for lam := b[0]; lam <= b[2]; lam += padding {
x, y, _ := r.m.Srs.Forward(lam, b[1])
x, y = r.matrix.TransformPoint(x, y)
gc.MoveTo(lam, b[1])
for phi := b[1] + dxy; phi >= b[3]; phi -= dxy {
x, y, _ = r.m.Srs.Forward(lam, phi)
x, y = r.matrix.TransformPoint(x, y)
gc.LineTo(x, y)
}
gc.Stroke()
}
}
func (r *Renderer) coordsAsPath(coords geom.Coordinates) *draw2d.Path {
path := new(draw2d.Path)
for i, point := range coords {
x, y, _ := r.m.Srs.Forward(point[0], point[1])
x, y = r.matrix.TransformPoint(x, y)
if math.IsNaN(x) || math.IsInf(x, 1) {
continue
}
if i == 0 {
path.MoveTo(x, y)
} else {
path.LineTo(x, y)
}
}
return path
}
func (r *Renderer) findSymbolizers(layer *mapping.Layer, filter util.SymbolizerType) []Symbolizer {
var symbolizers []Symbolizer
for _, styleName := range layer.Styles() {
if style := r.m.FindStyle(styleName); style != nil {
for _, rule := range style.Rules {
if ps, ok := rule.Symbolizers[filter]; ok {
switch specific := ps.(type) {
case *mapping.PolygonSymbolizer:
symbolizers = append(symbolizers, &PolygonSymbolizer{query.Filter(rule.Filter), r, specific})
case *mapping.PathSymbolizer:
symbolizers = append(symbolizers, &PathSymbolizer{query.Filter(rule.Filter), r, specific})
case *mapping.TextSymbolizer:
symbolizers = append(symbolizers, &TextSymbolizer{query.Filter(rule.Filter), r, specific})
default:
log.Println(reflect.TypeOf(ps).Elem())
}
}
}
}
}
return symbolizers
} | rendering/renderer.go | 0.645008 | 0.404831 | renderer.go | starcoder |
package chron
import (
"time"
"github.com/dustinevan/chron/dura"
"fmt"
"reflect"
"database/sql/driver"
"strings"
)
type Milli struct {
time.Time
}
func NewMilli(year int, month time.Month, day, hour, min, sec, milli int) Milli {
return Milli{time.Date(year, month, day, hour, min, sec, milli*1000000, time.UTC)}
}
func ThisMilli() Milli {
return Now().AsMilli()
}
func MilliOf(t time.Time) Milli {
t = t.UTC()
return Milli{t.Truncate(time.Millisecond)}
}
func (m Milli) AsYear() Year { return YearOf(m.Time) }
func (m Milli) AsMonth() Month { return MonthOf(m.Time) }
func (m Milli) AsDay() Day { return DayOf(m.Time) }
func (m Milli) AsHour() Hour { return HourOf(m.Time) }
func (m Milli) AsMinute() Minute { return MinuteOf(m.Time) }
func (m Milli) AsSecond() Second { return SecondOf(m.Time) }
func (m Milli) AsMilli() Milli { return m }
func (m Milli) AsMicro() Micro { return MicroOf(m.Time) }
func (m Milli) AsChron() Chron { return TimeOf(m.Time) }
func (m Milli) AsTime() time.Time { return m.Time }
func (m Milli) Increment(l dura.Time) Chron {
return Chron{m.AddDate(l.Years(), l.Months(), l.Days()).Add(l.Duration())}
}
func (m Milli) Decrement(l dura.Time) Chron {
return Chron{m.AddDate(-1*l.Years(), -1*l.Months(), -1*l.Days()).Add(-1 * l.Duration())}
}
func (m Milli) AddN(n int) Milli {
return Milli{m.Add(time.Duration(int(time.Millisecond) * n))}
}
// span.Time implementation
func (m Milli) Start() Chron {
return m.AsChron()
}
func (m Milli) End() Chron {
return m.AddN(1).Decrement(dura.Nano)
}
func (m Milli) Contains(t Span) bool {
return !m.Before(t) && !m.After(t)
}
func (m Milli) Before(t Span) bool {
return m.End().AsTime().Before(t.Start().AsTime())
}
func (m Milli) After(t Span) bool {
return m.Start().AsTime().After(t.End().AsTime())
}
func (m Milli) Duration() dura.Time {
return dura.Milli
}
func (m Milli) AddYears(y int) Milli {
return m.Increment(dura.Years(y)).AsMilli()
}
func (m Milli) AddMonths(ms int) Milli {
return m.Increment(dura.Months(ms)).AsMilli()
}
func (m Milli) AddDays(d int) Milli {
return m.Increment(dura.Days(d)).AsMilli()
}
func (m Milli) AddHours(h int) Milli {
return m.Increment(dura.Hours(h)).AsMilli()
}
func (m Milli) AddMinutes(ms int) Milli {
return m.Increment(dura.Mins(ms)).AsMilli()
}
func (m Milli) AddSeconds(s int) Milli {
return m.Increment(dura.Secs(s)).AsMilli()
}
func (m Milli) AddMillis(ms int) Milli {
return m.AddN(ms)
}
func (m Milli) AddMicros(ms int) Micro {
return m.AsMicro().AddN(ms)
}
func (m Milli) AddNanos(n int) Chron {
return m.AsChron().AddN(n)
}
func (m *Milli) Scan(value interface{}) error {
if value == nil {
*m = ZeroValue().AsMilli()
return nil
}
if t, ok := value.(time.Time); ok {
*m = MilliOf(t)
return nil
}
return fmt.Errorf("unsupported Scan, storing %s into type *chron.Day", reflect.TypeOf(value))
}
func (m Milli) Value() (driver.Value, error) {
// todo: error check the range.
return m.Time, nil
}
func (m *Milli) UnmarshalJSON(data []byte) error {
if string(data) == "null" {
return nil
}
s := strings.Trim(string(data), `"`)
t, err := Parse(s)
*m = MilliOf(t)
return err
} | milli.go | 0.65379 | 0.461077 | milli.go | starcoder |
package ConcurrentSkipList
import (
"errors"
"math"
"sync/atomic"
"github.com/OneOfOne/xxhash"
)
// Comes from redis's implementation.
// Also you can see more detail in <NAME>'s paper <Skip Lists: A Probabilistic Alternative to Balanced Trees>.
// The paper is in ftp://ftp.cs.umd.edu/pub/skipLists/skiplists.pdf
const (
MAX_LEVEL = 32
PROBABILITY = 0.25
SHARDS = 32
)
// shardIndex is used to indicate which shard a given index belong to.
var shardIndexes = make([]uint64, SHARDS)
// init will initialize the shardIndexes.
func init() {
var step uint64 = 1 << 59 // 2^64/SHARDS
var t uint64 = math.MaxUint64
for i := SHARDS - 1; i >= 0; i-- {
shardIndexes[i] = t
t -= step
}
}
// ConcurrentSkipList is a struct contains a slice of concurrent skip list.
type ConcurrentSkipList struct {
skipLists []*skipList
level int
}
// NewConcurrentSkipList will create a new concurrent skip list with given level.
// Level must between 1 to 32. If not, will return an error.
// To determine the level, you can see the paper ftp://ftp.cs.umd.edu/pub/skipLists/skiplists.pdf.
// A simple way to determine the level is L(N) = log(1/PROBABILITY)(N).
// N is the count of the skip list which you can estimate. PROBABILITY is 0.25 in this case.
// For example, if you expect the skip list contains 10000000 elements, then N = 10000000, L(N) ≈ 12.
// After initialization, the head field's level equal to level parameter and point to tail field.
func NewConcurrentSkipList(level int) (*ConcurrentSkipList, error) {
if level <= 0 || level > MAX_LEVEL {
return nil, errors.New("invalid level, level must between 1 to 32")
}
skipLists := make([]*skipList, SHARDS, SHARDS)
for i := 0; i < SHARDS; i++ {
skipLists[i] = newSkipList(level)
}
return &ConcurrentSkipList{
skipLists: skipLists,
level: level,
}, nil
}
// Level will return the level of skip list.
func (s *ConcurrentSkipList) Level() int {
return s.level
}
// Length will return the length of skip list.
func (s *ConcurrentSkipList) Length() int32 {
var length int32
for _, sl := range s.skipLists {
length += sl.getLength()
}
return length
}
// Search will search the skip list with the given index.
// If the index exists, return the value and true, otherwise return nil and false.
func (s *ConcurrentSkipList) Search(index uint64) (*Node, bool) {
sl := s.skipLists[getShardIndex(index)]
if atomic.LoadInt32(&sl.length) == 0 {
return nil, false
}
result := sl.searchWithoutPreviousNodes(index)
return result, result != nil
}
// Insert will insert a value into skip list. If skip has these this index, overwrite the value, otherwise add it.
func (s *ConcurrentSkipList) Insert(index uint64, value interface{}) {
// Ignore nil value.
if value == nil {
return
}
sl := s.skipLists[getShardIndex(index)]
sl.insert(index, value)
}
// Delete the node with the given index.
func (s *ConcurrentSkipList) Delete(index uint64) {
sl := s.skipLists[getShardIndex(index)]
if atomic.LoadInt32(&sl.length) == 0 {
return
}
sl.delete(index)
}
// ForEach will create a snapshot first shard by shard. Then iterate each node in snapshot and do the function f().
// If f() return false, stop iterating and return.
// If skip list is inserted or deleted while iterating, the node in snapshot will not change.
// The performance is not very high and the snapshot with be stored in memory.
func (s *ConcurrentSkipList) ForEach(f func(node *Node) bool) {
for _, sl := range s.skipLists {
if sl.getLength() == 0 {
continue
}
nodes := sl.snapshot()
stop := false
for _, node := range nodes {
if !f(node) {
stop = true
break
}
}
if stop {
break
}
}
}
// Sub will return a slice the skip list who starts with startNumber.
// The startNumber start with 0 as same as slice and maximum length is skip list's length.
func (s *ConcurrentSkipList) Sub(startNumber int32, length int32) []*Node {
// Ignore invalid parameter.
if startNumber > s.Length() || startNumber < 0 || length <= 0 {
return nil
}
var result []*Node
var position, count int32 = 0, 0
for _, sl := range s.skipLists {
if l := sl.getLength(); l == 0 || position+l <= startNumber {
position += l
continue
}
nodes := sl.snapshot()
for _, node := range nodes {
if position < startNumber {
position++
continue
}
if count == length {
break
}
result = append(result, node)
count++
}
if count == length {
break
}
}
return result
}
// Locate which shard the given index belong to.
func getShardIndex(index uint64) int {
result := -1
for i, t := range shardIndexes {
if index <= t {
result = i
break
}
}
return result
}
// Hash will calculate the input's hash value using xxHash algorithm.
// It can be used to calculate the index of skip list.
// See more detail in https://cyan4973.github.io/xxHash/
func Hash(input []byte) uint64 {
h := xxhash.New64()
h.Write(input)
return h.Sum64()
} | concurrentSkipList.go | 0.692642 | 0.445891 | concurrentSkipList.go | starcoder |
package viql
import (
"fmt"
"io"
)
const qlHelpText = `
VIQL - The VimInfo Query Language
Viql is a simple boolean query language for selecting VimInfo records
as created by examining a Vim swapfile with 'toolman.org/file/viminfo'.
Query statements are boolean expressions composed of declarations or
comparisons combined with boolean operators which are evaluated against
a series of VimInfo structures to derive a boolean value.
As an example:
cryptmethod = plaintext and not(running or missing)
Here, 'cryptmethod = plaintext' is a comparison while 'running' and
'missing' are declarations. Therefore, the above statement is true for
VimInfo structs describing a plaintext file whose edit session is no
longer running and it's associated file still exists. Otherwise, it
returns false.
See GRAMMAR below for the formal grammar specification.
DECLARATIONS
A declaration is a single token that evaluates to true or false in the
context of a specific VinInfo structure. The following declarations are
currently supported:
all - true for all VinInfo
none - false for all VimInfo
changed - true if the edit session has unsaved changes
modified
currhost - true if the edit session is on the current host
thishost
thisuser - true of the edit session was initiated by
curruser the current user
running - true if the edit session is currently running
(implies "thishost")
missing - true if the file associated with this edit session
no longer exists
COMPARISONS
A comparison is used to test the value of a particular VimInfo field.
All comparisons are either for equality or a regular expression match;
there are no inequality comparisons. Regular expression matching is
only available for string fields and other fields may have a limited
set of allowed values. See FIELDS AND VALUES below for details.
Note that boolean fields are not supported via comparisons. Supported
boolean tests are available as declarations.
COMPARITORS
The following comparitors are allowed:
= Is equal to
== (note that '=' and '==' mean the same thing).
!= Is not equal to
=~ A regular expression match (value must be a regex)
!~ A negative regular expression match
FIELDS AND VALUES
Certain VimInfo fields are only comparable against a particular type of
data or a limited set of values. For example, only string fields may be
use with regular expression matches and the PID and Inode fields must be
compared against an integer value.
String values containing spaces must be double quoted otherwise no quoting
is required. Integer values and values for one of the enumerated fields
(e.g. 'cryptmethod' or 'fileformat') should NOT be quoted.
The following list enumerates all supported fields and their associated
value restrictions, if any:
cm - One of: plaintext, zip, blowfish or blowfish2
cryptmethod
filename - A string field; may be compared using a regexp
format - One of: unix, dos or mac
fileformat
host - A string field; may be compared using a regexp
hostname
inode - Integer field; value must be a number
pid - Integer field; value must be a number
user - A string field; may be compared using a regexp
REGULAR EXPRESSIONS
The supported regular expression syntax is that accepted by RE2 as
implemented by the standard Go "regexp" package and is specified in
a manner inspired by Perl's "qr//" operator as described below.
Regular expression values are surrounded by a pair of '/' characters
and are not anchored; if you'd like your regexp to be anchored to one
end or the other you must use '^' and '$' accordingly. If a '/' is to
be included in the regular expression, it must be escaped by preceding
it with a backslash '\' character. Likewise, a literal backslash may
also be included by specifying two of them together (e.g. '\\').
Otherwise, no escaping is needed for backslash characters intended as
regexp meta characters.
Consider the following regular expression value:
/foo\/bar\\..*\.bla/
Here "\/" forstalls the end of the regexp and matches a literal "/", "\\"
matches a literal backslash and does not escape the following ".", but "\."
is escaped and matches a literal "." character.
Regular expression options may be specified with one or more of the
following characters immediately following the closing '/' character:
i case-insensitve match
m multi-line mode: ^ and $ match begin/end line in addition to
begin/end text
s let . match \n
U ungreedy: swap meaning of x* and x*?, x+ and x+?, etc
Capture groups may also be used to limit the scope of certain options to a
subset of the regular expression such as:
xxx(?flags:re)xxx
...beyond that however, capture groups are ignored.
GRAMMAR
The formal grammer for the language is specified by the following modified
BNF:
expression ::= parenthetic | operation | declaration | comparison
parenthetic ::= '(' expression ')'
operation ::= 'not' expression
| expression 'and' expression
| expression 'or' expression
declaration ::= 'all' | 'changed' | 'currhost' | 'curruser'
| 'missing' | 'modified' | 'none' | 'running'
| 'thishost' | 'thisuser'
comparison ::= field comparitor value
field ::= 'cm' | 'cryptmethod' | 'filename' | 'fileformat'
| 'format' | 'host' | 'hostname' | 'inode'
| 'pid' | 'user'
comparitor ::= '=' | '==' | '!=' | '=~' | '!~'
value ::= BAREWORD | QUOTED | INTEGER | REGEXP
`
func Help(w io.Writer) {
fmt.Fprint(w, qlHelpText)
} | viql/help.go | 0.762513 | 0.508361 | help.go | starcoder |
package generator
// Generate integers resembling a hotspot distribution where x% of operations
// access y% of data items. The parameters specify the bounds for the numbers,
// the percentage of the interval which comprises the hot set and
// the percentage of operations that access the hot set. Numbers of the host set
// are always smaller than any number in the cold set. Elements from the hot set
// and the cold set are chosen using a uniform distribution.
type HotspotIntegerGenerator struct {
*IntegerGeneratorBase
lowerBound int64
upperBound int64
hotInterval int64
coldInterval int64
hotsetFraction float64
hotOpnFraction float64
}
// Check the validation of value in range [0.0, 1.0].
func checkFraction(value float64) float64 {
if value < 0.0 || value > 1.0 {
// Hotset fraction out of range
value = 0.0
}
return value
}
// Create a generator for hotspot distribution.
func NewHotspotIntegerGenerator(
lowerBound, upperBound int64,
hotsetFraction, hotOpnFraction float64) *HotspotIntegerGenerator {
// check whether hostset fraction is out of range
hotsetFraction = checkFraction(hotsetFraction)
// check whether hot operation fraction is out of range
hotOpnFraction = checkFraction(hotOpnFraction)
if lowerBound > upperBound {
// upper bound of hotspot Generator smaller than the lower one
// swap the values
lowerBound, upperBound = upperBound, lowerBound
}
interval := upperBound - lowerBound + 1
hotInterval := int64(float64(interval) * hotsetFraction)
return &HotspotIntegerGenerator{
IntegerGeneratorBase: NewIntegerGeneratorBase(0),
lowerBound: lowerBound,
upperBound: upperBound,
hotInterval: hotInterval,
coldInterval: interval - hotInterval,
hotsetFraction: hotsetFraction,
hotOpnFraction: hotOpnFraction,
}
}
func (self *HotspotIntegerGenerator) NextInt() int64 {
var value int64
if NextFloat64() < self.hotOpnFraction {
// Choose a value from the hot set.
value = self.lowerBound + NextInt64(self.hotInterval)
} else {
// Choose a value from the cold set.
value = self.lowerBound + self.hotInterval + NextInt64(self.coldInterval)
}
self.SetLastInt(value)
return value
}
func (self *HotspotIntegerGenerator) NextString() string {
return self.IntegerGeneratorBase.NextString(self)
}
func (self *HotspotIntegerGenerator) Mean() float64 {
return self.hotOpnFraction*float64(self.lowerBound+self.hotInterval/2.0) +
(1-self.hotOpnFraction)*float64(self.lowerBound+self.hotInterval+self.coldInterval/2.0)
}
func (self *HotspotIntegerGenerator) GetLowerBound() int64 {
return self.lowerBound
}
func (self *HotspotIntegerGenerator) GetUpperBound() int64 {
return self.upperBound
}
func (self *HotspotIntegerGenerator) GetHotsetFraction() float64 {
return self.hotsetFraction
}
func (self *HotspotIntegerGenerator) GetHotOpnFraction() float64 {
return self.hotOpnFraction
} | generator/hotspot_integer_generator.go | 0.894881 | 0.495484 | hotspot_integer_generator.go | starcoder |
package common
import "math"
// 定义相关辅助函数与数据类型
// 守恒性通量类型
type Flux struct {
Density float64 // rho
MomX float64 // rho*u
MomY float64 // rho*v
Energy float64 // rho*E
}
type PrimtiveFlux struct {
Density float64
VelocityX float64
VelocityY float64
Pressure float64
}
// 对流通量对象
type ConvectiveFlux struct {
ConvFlux1 float64 // rho*V
ConvFlux2 float64 // rho*u*V+nx*p
ConvFlux3 float64 // rho*v*V+ny*p
ConvFlux4 float64 // rho*H*V
}
func (flux ConvectiveFlux) ScalarMultiFlux(scalar float64) ConvectiveFlux {
var res ConvectiveFlux
res = ConvectiveFlux{ConvFlux1: scalar * flux.ConvFlux1, ConvFlux2: scalar * flux.ConvFlux2, ConvFlux3: scalar * flux.ConvFlux3, ConvFlux4: scalar * flux.ConvFlux4}
return res
}
func (flux ConvectiveFlux) FluxMinus(flux1 ConvectiveFlux) ConvectiveFlux {
var res ConvectiveFlux
res = ConvectiveFlux{ConvFlux1: flux.ConvFlux1 - flux1.ConvFlux1, ConvFlux2: flux.ConvFlux2 - flux1.ConvFlux2,
ConvFlux3: flux.ConvFlux3 - flux1.ConvFlux3, ConvFlux4: flux.ConvFlux4 - flux1.ConvFlux4}
return res
}
func (flux ConvectiveFlux) FluxPlus(flux1 ConvectiveFlux) ConvectiveFlux {
var res ConvectiveFlux
res = ConvectiveFlux{ConvFlux1: flux.ConvFlux1 + flux1.ConvFlux1, ConvFlux2: flux.ConvFlux2 + flux1.ConvFlux2,
ConvFlux3: flux.ConvFlux3 + flux1.ConvFlux3, ConvFlux4: flux.ConvFlux4 + flux1.ConvFlux4}
return res
}
// 用于更新解,守恒性变量减对流通量
func (flux Flux) FluxMinusConvc(flux1 ConvectiveFlux) Flux {
var res Flux
res = Flux{Density: flux.Density - flux1.ConvFlux1, MomX: flux.MomX - flux1.ConvFlux2, MomY: flux.MomY - flux1.ConvFlux3, Energy: flux.Energy - flux1.ConvFlux4}
return res
}
func (flux Flux) Conv2Prim(gamma float64) PrimtiveFlux {
density := flux.Density
momX := flux.MomX
momY := flux.MomY
totale := flux.Energy
pr := PrimtiveFlux{Density: density, VelocityX: momX / density, VelocityY: momY / density, Pressure: (gamma - 1.0) * (totale - 0.5*(momX*momX+momY*momY)/density)}
return pr
}
func (pr PrimtiveFlux) Prim2Conv(gamma float64) Flux {
var flux Flux
density := pr.Density
velocityX := pr.VelocityX
velocityY := pr.VelocityY
totale := TotalEnergy(pr, gamma)
flux = Flux{Density: density, MomX: density * velocityX, MomY: density * velocityY, Energy: density * totale}
return flux
}
func (flux PrimtiveFlux) FluxMinus(flux1 PrimtiveFlux) PrimtiveFlux {
var res PrimtiveFlux
res = PrimtiveFlux{Density: flux.Density - flux1.Density, VelocityX: flux.VelocityX - flux1.VelocityX, VelocityY: flux.VelocityY - flux1.VelocityY, Pressure: flux.Pressure - flux1.Pressure}
return res
}
func (flux Flux) FluxMinus(flux1 Flux) Flux {
var res Flux
res = Flux{Density: flux.Density - flux1.Density, MomX: flux.MomX - flux1.MomX, MomY: flux.MomY - flux1.MomY, Energy: flux.Energy - flux1.Energy}
return res
}
func (flux Flux) FluxPlus(flux1 Flux) Flux {
var res Flux
res = Flux{Density: flux.Density + flux1.Density, MomX: flux.MomX + flux1.MomX, MomY: flux.MomY + flux1.MomY, Energy: flux.Energy + flux1.Energy}
return res
}
func (flux PrimtiveFlux) FluxPlus(flux1 PrimtiveFlux) PrimtiveFlux {
var res PrimtiveFlux
res = PrimtiveFlux{Density: flux.Density + flux1.Density, VelocityX: flux.VelocityX + flux1.VelocityX, VelocityY: flux.VelocityY + flux1.VelocityY, Pressure: flux.Pressure + flux1.Pressure}
return res
}
func (flux Flux) ScalarMultiFlux(saclar float64) Flux {
var res Flux
res = Flux{Density: saclar * flux.Density, MomX: saclar * flux.MomX, MomY: saclar * flux.MomY, Energy: saclar * flux.Energy}
return res
}
func (flux PrimtiveFlux) ScalarMultiFlux(saclar float64) PrimtiveFlux {
var res PrimtiveFlux
res = PrimtiveFlux{Density: saclar * flux.Density, VelocityX: saclar * flux.VelocityX, VelocityY: saclar * flux.VelocityY, Pressure: saclar * flux.Pressure}
return res
}
// 作为限制器分母,相减的同时确保分母不为0
func (flux PrimtiveFlux) FluxMinDenominator(flux1 PrimtiveFlux) PrimtiveFlux {
var res PrimtiveFlux
epsilon := 1.0e-6
var signdensitydiff float64
densitydiff := flux.Density - flux1.Density
if densitydiff < 0 {
signdensitydiff = -1.0 * epsilon
} else {
signdensitydiff = epsilon
}
if math.Abs(densitydiff) < epsilon {
densitydiff = signdensitydiff
}
velocityXdiff := flux.VelocityX - flux1.VelocityX
var signvelodiff float64
if velocityXdiff < 0 {
signvelodiff = -1.0 * epsilon
} else {
signvelodiff = epsilon
}
if math.Abs(velocityXdiff) < epsilon {
velocityXdiff = signvelodiff
}
velocityYdiff := flux.VelocityY - flux1.VelocityY
var signvloydiff float64
if velocityXdiff < 0 {
signvloydiff = -1.0 * epsilon
} else {
signvloydiff = epsilon
}
if math.Abs(velocityYdiff) < epsilon {
velocityYdiff = signvloydiff
}
pressurediff := flux.Pressure - flux1.Pressure
var signpressureDiff float64
if velocityXdiff < 0 {
signpressureDiff = -1.0 * epsilon
} else {
signpressureDiff = epsilon
}
if math.Abs(pressurediff) < epsilon {
pressurediff = signpressureDiff
}
res = PrimtiveFlux{Density: densitydiff, VelocityX: velocityXdiff, VelocityY: velocityYdiff, Pressure: pressurediff}
return res
}
// 限制器向量与守恒性变量相乘
// 入参,限制器向量
func (flux PrimtiveFlux) LimiterMulti(flux1 PrimtiveFlux) PrimtiveFlux {
var res PrimtiveFlux
res = PrimtiveFlux{flux.Density * flux1.Density, flux.VelocityX * flux1.VelocityX, flux.VelocityY * flux1.VelocityY, flux.Pressure * flux1.Pressure}
return res
}
// 在通量限制器中用于计算r
func (flux PrimtiveFlux) FluxDivision(flux1 PrimtiveFlux) PrimtiveFlux {
var res PrimtiveFlux
densityDiv := flux.Density / flux1.Density
velocityXDiv := flux.VelocityX / flux1.VelocityX
velocityYDiv := flux.VelocityY / flux1.VelocityY
pressureDiv := flux.Pressure / flux1.Pressure
res = PrimtiveFlux{Density: densityDiv, VelocityX: velocityXDiv, VelocityY: velocityYDiv, Pressure: pressureDiv}
return res
}
// limiter计算函数
// 入参:已经计算得到的r. 返回:phi(r)
func LimiterFun(flux PrimtiveFlux) PrimtiveFlux {
// min-mod
var res PrimtiveFlux
mindensityR := flux.Density
minvelocityxR := flux.VelocityX
minvelocityyR := flux.VelocityY
minpressure := flux.Pressure
res.Density = math.Max(math.Min(mindensityR, 1.0), 0)
res.VelocityX = math.Max(math.Min(minvelocityxR, 1.0), 0)
res.VelocityY = math.Max(math.Min(minvelocityyR, 1.0), 0)
res.Pressure = math.Max(math.Min(minpressure, 1.0), 0.0)
return res
}
// 计算总能
func TotalEnergy(pr PrimtiveFlux, gamma float64) float64 {
density := pr.Density
velocityX := pr.VelocityX
velocityY := pr.VelocityY
pressure := pr.Pressure
totale := (1.0/(gamma-1.0))*(pressure/density) + 0.5*(velocityX*velocityX+velocityY*velocityY)
return totale
}
// 计算当地声速
func SoundSpeed(pr PrimtiveFlux, gamma float64) float64 {
dens := pr.Density
pres := pr.Pressure
c := math.Sqrt(gamma * pres / dens)
return c
}
// 计算总焓
func TotalEnthalpy(pr PrimtiveFlux, gamma float64) float64 {
dens := pr.Density
velocityX := pr.VelocityX
velocityY := pr.VelocityY
press := pr.Pressure
return (gamma/(gamma-1.0))*(press/dens) + 0.5*(velocityX*velocityX+velocityY*velocityY)
} | Common/Flux.go | 0.671255 | 0.584657 | Flux.go | starcoder |
package graphproc
//Vertex : graph vertex type
type Vertex struct {
Name string
Prev []*Edge
Next []*Edge
Vstage *Stage
joined int
forked int
produced int
}
//Edge : graph edge type
type Edge struct {
//In *Vertex
Out *Vertex
Epayload *Payload
Estate *State
}
//Graph : graph type of vertexes and edges and execution path
type Graph struct {
V []*Vertex
E []*Edge
Path []*Vertex
current int
step *Vertex
forking []*Vertex
}
//NewGraph : create a new graph
func NewGraph() *Graph {
g := new(Graph)
g.V = make([]*Vertex, 0, 32)
g.E = make([]*Edge, 0, 32)
g.Path = make([]*Vertex, 0, 32)
g.forking = make([]*Vertex, 0, 32)
return g
}
//NewVertex : create a new vertex
func (g *Graph) NewVertex(n string) *Vertex {
v := new(Vertex)
v.Name = n
v.Prev = make([]*Edge, 0, 32)
v.Next = make([]*Edge, 0, 32)
v.Vstage = NewStage()
g.V = append(g.V, v)
return v
}
//NewEdge : create a new edge
func (g *Graph) NewEdge() *Edge {
e := new(Edge)
e.Epayload = new(Payload)
e.Epayload.Raw = make([]byte, 0, 2048)
g.E = append(g.E, e)
return e
}
//NextVertex : get the next vertex in the path, returns nil if at the path end
func (g *Graph) NextVertex() *Vertex {
if g.current == len(g.Path) {
return nil
}
v := g.Path[g.current]
g.current++
return v
}
//Link : link two vertexes by creating an edge from vertex 1 to vertex 2
func (g *Graph) Link(v1 *Vertex, v2 *Vertex) {
e := g.NewEdge()
e.Out = v2
v1.Next = append(v1.Next, e)
v2.Prev = append(v2.Prev, e)
}
//Path : build the execution path by walking through the graph depth first until a vertex is found with incomplete inputs
// go back to the last vertex with a fork in the path and start the next walk
func (g *Graph) BuildPath() {
if g.step == nil { //First step in building the path
g.step = g.V[0]
g.Path = append(g.Path, g.step)
}
for {
if len(g.step.Prev) == g.step.joined { //All inputs are present
if len(g.step.Next) == 0 { // We are done
break
}
if len(g.step.Next) == 1 { // Simple serial step in the path
g.step = g.step.Next[0].Out
g.step.joined++
g.Path = append(g.Path, g.step)
} else { //This vertex is a fork in the path
if g.step.forked < len(g.step.Next) {
if g.step.forked == 0 { //If first time, add it to the slice of vertexes forking
g.forking = append(g.forking, g.step)
}
g.step.forked++
g.step = g.step.Next[g.step.forked-1].Out
g.step.joined++
g.Path = append(g.Path, g.step)
} else { //Finished with this vertex's forking, remove it from the forking slice
g.forking = g.forking[1:]
g.step = g.forking[0]
}
}
} else if len(g.forking) > 0 { //Go back to a vertex that is forking since this vertex is waiting for inputs
g.step = g.forking[0]
} else {
continue
}
}
} | graphproc/graph.go | 0.518546 | 0.47859 | graph.go | starcoder |
package colorful
import (
"fmt"
"math"
"math/rand"
)
// The algorithm works in L*a*b* color space and converts to RGB in the end.
// L* in [0..1], a* and b* in [-1..1]
type SoftPaletteSettings struct {
// A function which can be used to restrict the allowed color-space.
CheckColor func(c ColorLab) bool
// The higher, the better quality but the slower. Usually two figures.
Iterations int
// Use up to 160000 or 8000 samples of the L*a*b* space (and thus calls to CheckColor).
// Set this to true only if your CheckColor shapes the Lab space weirdly.
ManySamples bool
}
// Yeah, windows-stype Foo, FooEx, screw you golang...
// Uses K-means to cluster the color-space and return the means of the clusters
// as a new palette of distinctive colors. Falls back to K-medoid if the mean
// happens to fall outside of the color-space, which can only happen if you
// specify a CheckColor function.
func SoftPaletteEx(colorsCount int, settings SoftPaletteSettings) ([]Color, error) {
// Checks whether it's a valid RGB and also fulfills the potentially provided constraint.
check := func(col ColorLab) bool {
c := col.Color()
return c.IsValid() && (settings.CheckColor == nil || settings.CheckColor(col))
}
// Sample the color space. These will be the points k-means is run on.
dl := 0.05
dab := 0.1
if settings.ManySamples {
dl = 0.01
dab = 0.05
}
samples := make([]ColorLab, 0, int(1.0/dl*2.0/dab*2.0/dab))
for l := 0.0; l <= 1.0; l += dl {
for a := -1.0; a <= 1.0; a += dab {
for b := -1.0; b <= 1.0; b += dab {
if check(ColorLab{l, a, b}) {
samples = append(samples, ColorLab{l, a, b})
}
}
}
}
// That would cause some infinite loops down there...
if len(samples) < colorsCount {
return nil, fmt.Errorf("palettegen: more colors requested (%v) than samples available (%v). Your requested color count may be wrong, you might want to use many samples or your constraint function makes the valid color space too small.")
} else if len(samples) == colorsCount {
return labs2cols(samples), nil // Oops?
}
// We take the initial means out of the samples, so they are in fact medoids.
// This helps us avoid infinite loops or arbitrary cutoffs with too restrictive constraints.
means := make([]ColorLab, colorsCount)
for i := 0; i < colorsCount; i++ {
for means[i] = samples[rand.Intn(len(samples))]; in(means, i, means[i]); means[i] = samples[rand.Intn(len(samples))] {
}
}
clusters := make([]int, len(samples))
samples_used := make([]bool, len(samples))
// The actual k-means/medoid iterations
for i := 0; i < settings.Iterations; i++ {
// Reassing the samples to clusters, i.e. to their closest mean.
// By the way, also check if any sample is used as a medoid and if so, mark that.
for isample, sample := range samples {
samples_used[isample] = false
mindist := math.Inf(+1)
for imean, mean := range means {
dist := sample.Dist(mean)
if dist < mindist {
mindist = dist
clusters[isample] = imean
}
// Mark samples which are used as a medoid.
if sample.Eq(mean) {
samples_used[isample] = true
}
}
}
// Compute new means according to the samples.
for imean := range means {
// The new mean is the average of all samples belonging to it..
nsamples := 0
newmean := ColorLab{0.0, 0.0, 0.0}
for isample, sample := range samples {
if clusters[isample] == imean {
nsamples++
newmean.L += sample.L
newmean.A += sample.A
newmean.B += sample.B
}
}
if nsamples > 0 {
newmean.L /= float64(nsamples)
newmean.A /= float64(nsamples)
newmean.B /= float64(nsamples)
} else {
// That mean doesn't have any samples? Get a new mean from the sample list!
var inewmean int
for inewmean = rand.Intn(len(samples_used)); samples_used[inewmean]; inewmean = rand.Intn(len(samples_used)) {
}
newmean = samples[inewmean]
samples_used[inewmean] = true
}
// But now we still need to check whether the new mean is an allowed color.
if nsamples > 0 && check(newmean) {
// It does, life's good (TM)
means[imean] = newmean
} else {
// New mean isn't an allowed color or doesn't have any samples!
// Switch to medoid mode and pick the closest (unused) sample.
// This should always find something thanks to len(samples) >= colorsCount
mindist := math.Inf(+1)
for isample, sample := range samples {
if !samples_used[isample] {
dist := sample.Dist(newmean)
if dist < mindist {
mindist = dist
newmean = sample
}
}
}
}
}
}
return labs2cols(means), nil
}
// A wrapper which uses common parameters.
func SoftPalette(colorsCount int) ([]Color, error) {
return SoftPaletteEx(colorsCount, SoftPaletteSettings{nil, 50, false})
}
func in(haystack []ColorLab, upto int, needle ColorLab) bool {
for i := 0; i < upto && i < len(haystack); i++ {
if haystack[i] == needle {
return true
}
}
return false
}
func labs2cols(labs []ColorLab) (cols []Color) {
cols = make([]Color, len(labs))
for k, v := range labs {
cols[k] = v.Color()
}
return cols
} | soft_palettegen.go | 0.849784 | 0.598518 | soft_palettegen.go | starcoder |
package testutils
import (
"github.com/stretchr/testify/require"
"testing"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
const (
messageExpectedToBeNotRequired = "Expected %s to be not required"
messageExpectedToBeRequired = "Expected %s to be required"
messageExpectedToBeComputed = "Expected %s to be required"
messageExpectedToBeOptional = "Expected %s to be optional"
messageExpectedDefaultValue = "Expected default value %t"
)
//NewTerraformSchemaAssert creates a new instance of TerraformSchemaAssert
func NewTerraformSchemaAssert(schemaMap map[string]*schema.Schema, t *testing.T) TerraformSchemaAssert {
return &terraformSchemaAssertImpl{schemaMap: schemaMap, t: t}
}
//TerraformSchemaAssert a test util to verify terraform schema fields
type TerraformSchemaAssert interface {
//AssertSchemaIsRequiredAndOfTypeString checks if the given schema field is required and of type string
AssertSchemaIsRequiredAndOfTypeString(fieldName string)
//AssertSchemaIsRequiredAndTypeInt checks if the given schema field is required and of type int
AssertSchemaIsRequiredAndOfTypeInt(fieldName string)
//AssertSchemaIsRequiredAndOfTypeFloat checks if the given schema field is required and of type float
AssertSchemaIsRequiredAndOfTypeFloat(fieldName string)
//AssertSchemaIsOptionalAndOfTypeString checks if the given schema field is optional and of type string
AssertSchemaIsOptionalAndOfTypeString(fieldName string)
//AssertSchemaIsOptionalAndOfTypeStringWithDefault checks if the given schema field is optional and of type string and has the given default value
AssertSchemaIsOptionalAndOfTypeStringWithDefault(fieldName string, defaultValue string)
//AssertSchemaIsOptionalAndOfTypeInt checks if the given schema field is optional and of type int
AssertSchemaIsOptionalAndOfTypeInt(fieldName string)
//AssertSchemaIsOptionalAndOfTypeFloat checks if the given schema field is required and of type float
AssertSchemaIsOptionalAndOfTypeFloat(fieldName string)
//AssertSchemaIsOfTypeBooleanWithDefault checks if the given schema field is an optional boolean field with an expected default value
AssertSchemaIsOfTypeBooleanWithDefault(fieldName string, defaultValue bool)
//AssertSchemaIsRequiredAndOfTypeListOfStrings checks if the given schema field is required and of type list of string
AssertSchemaIsRequiredAndOfTypeListOfStrings(fieldName string)
//AssertSchemaIsRequiredAndOfTypeListOfStrings checks if the given schema field is required and of type list of string
AssertSchemaIsOptionalAndOfTypeListOfStrings(fieldName string)
//AssertSchemaIsRequiredAndOfTypeSetOfStrings checks if the given schema field is required and of type set of string
AssertSchemaIsRequiredAndOfTypeSetOfStrings(fieldName string)
//AssertSchemaIsOptionalAndOfTypeSetOfStrings checks if the given schema field is required and of type set of string
AssertSchemaIsOptionalAndOfTypeSetOfStrings(fieldName string)
//AssertSchemaIsComputedAndOfTypeString checks if the given schema field is computed and of type string
AssertSchemaIsComputedAndOfTypeString(fieldName string)
//AssertSchemaIsComputedAndOfTypeInt checks if the given schema field is computed and of type int
AssertSchemaIsComputedAndOfTypeInt(fieldName string)
//AssertSchemaIsComputedAndOfTypeBool checks if the given schema field is computed and of type bool
AssertSchemaIsComputedAndOfTypeBool(fieldName string)
}
type terraformSchemaAssertImpl struct {
schemaMap map[string]*schema.Schema
t *testing.T
}
func (inst *terraformSchemaAssertImpl) AssertSchemaIsRequiredAndOfTypeString(schemaField string) {
inst.AssertSchemaIsRequiredAndType(schemaField, schema.TypeString)
}
func (inst *terraformSchemaAssertImpl) AssertSchemaIsRequiredAndOfTypeInt(schemaField string) {
inst.AssertSchemaIsRequiredAndType(schemaField, schema.TypeInt)
}
func (inst *terraformSchemaAssertImpl) AssertSchemaIsRequiredAndOfTypeFloat(schemaField string) {
inst.AssertSchemaIsRequiredAndType(schemaField, schema.TypeFloat)
}
func (inst *terraformSchemaAssertImpl) AssertSchemaIsRequiredAndType(schemaField string, dataType schema.ValueType) {
s := inst.schemaMap[schemaField]
require.NotNil(inst.t, s)
inst.assertSchemaIsOfType(s, dataType)
require.True(inst.t, s.Required)
}
func (inst *terraformSchemaAssertImpl) AssertSchemaIsOptionalAndOfTypeString(schemaField string) {
inst.assertSchemaIsOptionalAndOfType(schemaField, schema.TypeString)
}
func (inst *terraformSchemaAssertImpl) AssertSchemaIsOptionalAndOfTypeStringWithDefault(schemaField string, defaultValue string) {
inst.assertSchemaIsOptionalAndOfType(schemaField, schema.TypeString)
s := inst.schemaMap[schemaField]
require.Equal(inst.t, defaultValue, s.Default)
}
func (inst *terraformSchemaAssertImpl) AssertSchemaIsOptionalAndOfTypeInt(schemaField string) {
inst.assertSchemaIsOptionalAndOfType(schemaField, schema.TypeInt)
}
func (inst *terraformSchemaAssertImpl) AssertSchemaIsOptionalAndOfTypeFloat(schemaField string) {
inst.assertSchemaIsOptionalAndOfType(schemaField, schema.TypeFloat)
}
func (inst *terraformSchemaAssertImpl) assertSchemaIsOptionalAndOfType(schemaField string, dataType schema.ValueType) {
s := inst.schemaMap[schemaField]
require.NotNil(inst.t, s)
inst.assertSchemaIsOfType(s, dataType)
require.True(inst.t, s.Optional)
}
func (inst *terraformSchemaAssertImpl) AssertSchemaIsOfTypeBooleanWithDefault(schemaField string, defaultValue bool) {
s := inst.schemaMap[schemaField]
require.NotNil(inst.t, s)
inst.assertSchemaIsOfType(s, schema.TypeBool)
require.False(inst.t, s.Required)
require.Equal(inst.t, defaultValue, s.Default)
}
func (inst *terraformSchemaAssertImpl) assertSchemaIsOfType(s *schema.Schema, dataType schema.ValueType) {
require.Equal(inst.t, dataType, s.Type)
require.Greater(inst.t, len(s.Description), 0)
}
func (inst *terraformSchemaAssertImpl) AssertSchemaIsOptionalAndOfTypeListOfStrings(schemaField string) {
s := inst.schemaMap[schemaField]
require.NotNil(inst.t, s)
require.False(inst.t, s.Required)
require.True(inst.t, s.Optional)
inst.assertSchemaIsOfTypeListOfStrings(s)
}
func (inst *terraformSchemaAssertImpl) AssertSchemaIsRequiredAndOfTypeListOfStrings(schemaField string) {
s := inst.schemaMap[schemaField]
require.NotNil(inst.t, s)
require.True(inst.t, s.Required)
inst.assertSchemaIsOfTypeListOfStrings(s)
}
func (inst *terraformSchemaAssertImpl) assertSchemaIsOfTypeListOfStrings(s *schema.Schema) {
require.Equal(inst.t, schema.TypeList, s.Type)
require.NotNil(inst.t, s.Elem)
require.Equal(inst.t, schema.TypeString, s.Elem.(*schema.Schema).Type)
require.Greater(inst.t, len(s.Description), 0)
}
func (inst *terraformSchemaAssertImpl) AssertSchemaIsOptionalAndOfTypeSetOfStrings(schemaField string) {
s := inst.schemaMap[schemaField]
require.NotNil(inst.t, s)
require.False(inst.t, s.Required)
require.True(inst.t, s.Optional)
inst.assertSchemaIsOfTypeSetOfStrings(s)
}
func (inst *terraformSchemaAssertImpl) AssertSchemaIsRequiredAndOfTypeSetOfStrings(schemaField string) {
s := inst.schemaMap[schemaField]
require.NotNil(inst.t, s)
require.True(inst.t, s.Required)
inst.assertSchemaIsOfTypeSetOfStrings(s)
}
func (inst *terraformSchemaAssertImpl) assertSchemaIsOfTypeSetOfStrings(s *schema.Schema) {
require.Equal(inst.t, schema.TypeSet, s.Type)
require.NotNil(inst.t, s.Elem)
require.Equal(inst.t, schema.TypeString, s.Elem.(*schema.Schema).Type)
require.Greater(inst.t, len(s.Description), 0)
}
func (inst *terraformSchemaAssertImpl) AssertSchemaIsComputedAndOfTypeString(schemaField string) {
s := inst.schemaMap[schemaField]
require.NotNil(inst.t, s)
inst.assertSchemaIsOfType(s, schema.TypeString)
require.True(inst.t, s.Computed)
}
func (inst *terraformSchemaAssertImpl) AssertSchemaIsComputedAndOfTypeInt(schemaField string) {
s := inst.schemaMap[schemaField]
require.NotNil(inst.t, s)
inst.assertSchemaIsOfType(s, schema.TypeInt)
require.True(inst.t, s.Computed)
}
func (inst *terraformSchemaAssertImpl) AssertSchemaIsComputedAndOfTypeBool(schemaField string) {
s := inst.schemaMap[schemaField]
require.NotNil(inst.t, s)
inst.assertSchemaIsOfType(s, schema.TypeBool)
require.True(inst.t, s.Computed)
} | testutils/terraform-schema-asserts.go | 0.736401 | 0.62986 | terraform-schema-asserts.go | starcoder |
package lbpCalc
import (
"image"
"github.com/bejohi/golbp/model"
)
var pixelNeighboursY = []int{-1,-1,-1,0,1,1,1,0}
var pixelNeighboursX = []int{-1,0,1,1,1,0,-1,-1}
// createUniformMatrix creates a 2d binary matrix for the given uniform struct.
// A 'true' in a cell means, that the pixel lbp pattern matches one of the given lbp- uniform numbers from the
// uniform object. This function is rly fast, since it is not necessary to create an lbp matrix in one step
// and calculate the pattern in the next one (the speed is ~ 2x as fast as the other approach).
func createUniformMatrix(img *image.Gray16, uniform model.LbpUniform)*[][]bool {
width := (*img).Bounds().Max.X
height := (*img).Bounds().Max.Y
uniformArray:= make([][]bool,height)
for y := 0; y < height; y++{
uniformArray[y] = make([]bool,width)
for x := 0; x < width; x++{
if !pointNotAtBorder(x,y,width,height){
uniformArray[y][x] = false
continue
}
lbpByte := calculateLbpPatternForPixel(img,x,y)
uniformArray[y][x] = uniform.IsByteInUniform(lbpByte)
}
}
return &uniformArray
}
// CreateLbpMatrix creates a 2d matrix of bytes from a given Gray16 image. Every byte in a cell represents a lbp
// value (e.g 0b01001010).
func CreateLbpMatrix(img *image.Gray16) *[][]byte{
width := (*img).Bounds().Max.X
height := (*img).Bounds().Max.Y
lbpArray := make([][]byte,height)
for y := 0; y < height; y++{
lbpArray[y] = make([]byte,width)
for x := 0; x < width; x++{
if !pointNotAtBorder(x,y,width,height){
lbpArray[y][x] = 0
continue
}
lbpArray[y][x] = calculateLbpPatternForPixel(img,x,y)
}
}
return &lbpArray
}
// calculateLbpPatternForPixel compares an pixel at the given position with its 8 neighbours and returns the
// lbp pattern as a byte.
func calculateLbpPatternForPixel(img *image.Gray16,x int,y int) byte {
var pattern byte = 0
for pos := uint(0); pos < 8; pos++{
yNeighbourPos := y + pixelNeighboursY[pos]
xNeighbourPos := x + pixelNeighboursX[pos]
neigbourValue := (*img).Gray16At(xNeighbourPos,yNeighbourPos)
centerValue := (*img).Gray16At(x,y)
if neigbourValue.Y >= centerValue.Y{
// Set the bit at the current position to 1
pattern |= 1 << pos
} else {
// 0
pattern &= ^(1 << pos)
}
}
return pattern
}
// pointNotAtBorder returns true if a given point is not a the matrix border (which is represented with its width
// and height)
func pointNotAtBorder(x int, y int, width int, height int) bool {
if x <= 0 || y <= 0{
return false
}
if x >= width -1 || y >= height -1 {
return false
}
return true
} | lbpCalc/lbpCalculator.go | 0.677581 | 0.46132 | lbpCalculator.go | starcoder |
package main
import (
"bufio"
"fmt"
"image"
"image/color"
"image/png"
"math/cmplx"
"os"
)
/*
just take a point called z in the complex plane
let z1 be z^2 plus z
and z2 is z1^2 plus z
and z3 is z2^2 plus z
and if the series of z's should always stay
close to z and never trend away
that point is in the mandelbrot set
*/
// Settings
const xMin = -2.0
const xMax = 1.0
const yMin = -1.0
const yMax = 1.0
const outFileName = "out.png"
const imgCols = 6144
const imgRows = 4096
const scale = 6 // Supersample by this much in both dimensions
const escapeThresh = 100.0 // Treat a point as escaping if it exceeds this
const workerNum = 6
const chunkNum = 100 // Divide the image into this many chunks
// Image implementation
type img struct {
cols, rows int
px []color.RGBA
}
func (img) ColorModel() color.Model {
return color.RGBAModel
}
func (i img) Bounds() image.Rectangle {
return image.Rectangle{
image.Point{0, 0},
image.Point{i.cols, i.rows},
}
}
func (i img) At(x, y int) color.Color {
if 0 <= x && x < i.cols && 0 <= y && y < i.rows {
return i.px[y * i.cols + x]
}
return color.RGBA{255, 0, 0, 255}
}
func (i img) get(x, y int) color.RGBA {
if 0 <= x && x < i.cols && 0 <= y && y < i.rows {
return i.px[y * i.cols + x]
}
return color.RGBA{255, 0, 0, 255}
}
func (i img) set(x, y int, c color.RGBA) {
if 0 <= x && x < i.cols && 0 <= y && y < i.rows {
i.px[y * i.cols + x] = c
}
}
// img utilities
func mkImg(cols, rows int) img {
return img{
cols,
rows,
make([]color.RGBA, cols * rows),
}
}
func downScale(in img, scale int) img {
out := mkImg(in.cols / scale, in.rows / scale)
samples := scale * scale
for outRow := 0; outRow < out.rows; outRow++ {
for outCol := 0; outCol < out.cols; outCol++ {
outRed, outGreen, outBlue := 0, 0, 0
for subRow := 0; subRow < scale; subRow++ {
for subCol := 0; subCol < scale; subCol++ {
inRow := outRow * scale + subRow
inCol := outCol * scale + subCol
inColor := in.get(inCol, inRow)
outRed += int(inColor.R)
outGreen += int(inColor.G)
outBlue += int(inColor.B)
}
}
outRed /= samples
outGreen /= samples
outBlue /= samples
outColor := color.RGBA{uint8(outRed), uint8(outGreen), uint8(outBlue), 255}
out.set(outCol, outRow, outColor)
}
}
return out
}
// Math!
// Map x from the range x1,y1 to x2,y2
func linear(x, x1, x2, y1, y2 float64) float64 {
slope := (y2 - y1) / (x2 - x1)
intercept := y1 - x1 * slope
return x * slope + intercept
}
// Return the number of iterations (max 255) before the point gets "far away"
func mandelbrot(c complex128) int {
z := c
var i int
for i = 0; i < 256; i++ {
z = z*z + c
if cmplx.Abs(z) > escapeThresh {
break
}
}
return i
}
type rowRange struct {
startRow, stopRow int
}
// Render chunks
func work(i img, chunks chan rowRange, flag chan int) {
for {
chunk, ok := <- chunks
if !ok {
break
}
for r := chunk.startRow; r < chunk.stopRow; r++ {
y := linear(float64(r), 0.0, float64(i.rows-1), yMax, yMin)
for c := 0; c < i.cols; c++ {
x := linear(float64(c), 0.0, float64(i.cols-1), xMin, xMax)
v := mandelbrot(complex(x, y))
i.set(c, r, color.RGBA{0, uint8(v), uint8(v), 255})
}
}
}
flag <- 0 // Signal completion
}
func main() {
render := mkImg(imgCols * scale, imgRows * scale)
chunkRows := render.rows / chunkNum
// Queue up chunks of work on a channel
chunks := make(chan rowRange, chunkNum)
startRow := 0;
stopRow := chunkRows
for i := 0; i < chunkNum-1; i++ {
chunks <- rowRange{startRow, stopRow}
startRow, stopRow = stopRow, stopRow + chunkRows
}
chunks <- rowRange{startRow, render.rows}
close(chunks)
// Create a channel for each worker on which to signal completion
flags := make([]chan int, workerNum)
for i := 0; i < workerNum; i++ {
flags[i] = make(chan int)
}
// Start workers
for i := 0; i < workerNum; i++ {
go work(render, chunks, flags[i])
}
// Wait for workers to finish
for i := 0; i < workerNum; i++ {
<- flags[i]
}
renderSmall := downScale(render, scale)
outFile, err := os.Create(outFileName)
defer outFile.Close()
if err != nil {
fmt.Println(err)
return
}
outWriter := bufio.NewWriter(outFile)
err = png.Encode(outWriter, renderSmall)
if err != nil {
fmt.Println(err)
return
}
outWriter.Flush()
} | src/mandelbarf/mandelbarf.go | 0.628977 | 0.448547 | mandelbarf.go | starcoder |
package design
import (
. "goa.design/goa/v3/dsl"
)
var _ = Service("codeset", func() {
Description("The codeset service performs operations on Codesets.")
// Method describes a service method (endpoint)
Method("list", func() {
Description("Retrieve information about Codesets registered in FuseML.")
// Payload describes the method payload.
// Here the payload is an object that consists of two fields.
Payload(func() {
// Field describes an object field given a field index, a field
// name, a type and a description.
Field(1, "project", String, "List only Codesets that belong to given project", func() {
Example("mlflow-project-01")
})
Field(2, "label", String, "List only Codesets with matching label", func() {
Example("mlflow")
})
})
// Result describes the method result.
// Here the result is a collection of codeset value.
Result(ArrayOf(Codeset), "Return all registered Codesets matching the query.")
Error("NotFound", func() {
Description("If the Codeset is not found, should return 404 Not Found.")
})
// HTTP describes the HTTP transport mapping.
HTTP(func() {
// Requests to the service consist of HTTP GET requests.
// The payload fields are encoded as path parameters.
GET("/codesets")
Param("project", String, "List only Codesets that belong to given project", func() {
Example("mlflow-project-01")
})
Param("label", String, "List only Codesets with matching label", func() {
Example("mlflow")
})
// Responses use a "200 OK" HTTP status.
// The result is encoded in the response body (default).
Response(StatusOK)
Response("NotFound", StatusNotFound)
})
// GRPC describes the gRPC transport mapping.
GRPC(func() {
// Responses use a "OK" gRPC code.
// The result is encoded in the response message (default).
Response(CodeOK)
Response("NotFound", CodeNotFound)
})
})
Method("register", func() {
Description("Register a Codeset with the FuseML codeset store.")
Payload(func() {
Field(1, "codeset", Codeset, "Codeset descriptor")
Field(2, "location", String, "Path to the code that should be registered as Codeset", func() {
Example("mlflow-project-01")
})
Required("codeset", "location")
})
Error("BadRequest", func() {
Description("If the Codeset does not have the required fields, should return 400 Bad Request.")
})
Result(Codeset)
HTTP(func() {
POST("/codesets")
Param("location", String, "Path to the code that should be registered as Codeset", func() {
Example("work/ml/mlflow-code")
})
Response(StatusCreated)
Response("BadRequest", StatusBadRequest)
})
GRPC(func() {
Response(CodeOK)
Response("BadRequest", CodeInvalidArgument)
})
})
Method("get", func() {
Description("Retrieve an Codeset from FuseML.")
Payload(func() {
Field(1, "project", String, "Project name", func() {
Example("mlflow-project-01")
})
Field(2, "name", String, "Codeset name", func() {
Example("mlflow-app-01")
})
Required("project", "name")
})
Error("BadRequest", func() {
Description("If neither name or project is not given, should return 400 Bad Request.")
})
Error("NotFound", func() {
Description("If there is no codeset with the given name and project, should return 404 Not Found.")
})
Result(Codeset)
HTTP(func() {
GET("/codesets/{project}/{name}")
Response(StatusOK)
Response("BadRequest", StatusBadRequest)
Response("NotFound", StatusNotFound)
})
GRPC(func() {
Response(CodeOK)
Response("BadRequest", CodeInvalidArgument)
Response("NotFound", CodeNotFound)
})
})
})
// Codeset describes the Codeset
var Codeset = Type("Codeset", func() {
Field(1, "name", String, "The name of the Codeset", func() {
Example("mlflow-app-01")
})
Field(2, "project", String, "The project this Codeset belongs to", func() {
Example("mlflow-project-01")
})
Field(3, "description", String, "Codeset description", func() {
Example("My first MLFlow application with FuseML")
})
Field(4, "labels", ArrayOf(String), "Additional Codeset labels that helps with identifying the type", func() {
Example([]string{"mlflow", "playground"})
})
Field(5, "url", String, "Full URL to the Codeset", func() {
Example("http://my-gitea.server/project/repository.git")
})
Required("name", "project")
}) | design/codeset.go | 0.617397 | 0.415017 | codeset.go | starcoder |
package azuremonitorexporter
// Contains code common to both trace and metrics exporters
import (
"strconv"
"time"
"github.com/microsoft/ApplicationInsights-Go/appinsights/contracts"
"go.opentelemetry.io/collector/consumer/pdata"
"go.opentelemetry.io/collector/translator/conventions"
tracetranslator "go.opentelemetry.io/collector/translator/trace"
"go.uber.org/zap"
)
// Transforms a tuple of pdata.Resource, pdata.InstrumentationLibrary, pdata.LogRecord into an AppInsights contracts.Envelope
// This is the only method that should be targeted in the unit tests
func metricToEnvelopes(
resource pdata.Resource,
instrumentationLibrary pdata.InstrumentationLibrary,
metric pdata.Metric,
logger *zap.Logger) ([]*contracts.Envelope, error) {
dropped := 0
var envelopes []*contracts.Envelope
switch metric.DataType() {
case pdata.MetricDataTypeIntGauge:
intGauge := metric.IntGauge()
dps := intGauge.DataPoints()
for i := 0; i < dps.Len(); i++ {
dp := dps.At(i)
envelope := contracts.NewEnvelope()
envelope.Tags = make(map[string]string)
envelope.Time = toTime(dp.Timestamp()).Format(time.RFC3339Nano)
data := contracts.NewMetricData()
data.Properties = map[string]string{}
envelope.Name = data.EnvelopeName("")
dp.LabelsMap().Range(func(k string, v string) bool {
data.Properties[k] = v
return true
})
dataPoint := contracts.NewDataPoint()
dataPoint.Name = metric.Name()
dataPoint.Value = float64(dp.Value())
data.Metrics = []*contracts.DataPoint { dataPoint }
dataWrapper := contracts.NewData()
dataWrapper.BaseType = data.BaseType()
dataWrapper.BaseData = data
envelope.Data = dataWrapper
envelopes = append(envelopes, envelope)
}
case pdata.MetricDataTypeDoubleGauge:
doubleGauge := metric.DoubleGauge()
dps := doubleGauge.DataPoints()
for i := 0; i < dps.Len(); i++ {
dp := dps.At(i)
envelope := contracts.NewEnvelope()
envelope.Tags = make(map[string]string)
envelope.Time = toTime(dp.Timestamp()).Format(time.RFC3339Nano)
data := contracts.NewMetricData()
data.Properties = map[string]string{}
envelope.Name = data.EnvelopeName("")
dp.LabelsMap().Range(func(k string, v string) bool {
data.Properties[k] = v
return true
})
dataPoint := contracts.NewDataPoint()
dataPoint.Name = metric.Name()
dataPoint.Value = dp.Value()
data.Metrics = []*contracts.DataPoint { dataPoint }
dataWrapper := contracts.NewData()
dataWrapper.BaseType = data.BaseType()
dataWrapper.BaseData = data
envelope.Data = dataWrapper
envelopes = append(envelopes, envelope)
}
case pdata.MetricDataTypeIntSum:
intSum := metric.IntSum()
dps := intSum.DataPoints()
for i := 0; i < dps.Len(); i++ {
dp := dps.At(i)
envelope := contracts.NewEnvelope()
envelope.Tags = make(map[string]string)
envelope.Time = toTime(dp.Timestamp()).Format(time.RFC3339Nano)
data := contracts.NewMetricData()
data.Properties = map[string]string{}
envelope.Name = data.EnvelopeName("")
dp.LabelsMap().Range(func(k string, v string) bool {
data.Properties[k] = v
return true
})
dataPoint := contracts.NewDataPoint()
dataPoint.Name = metric.Name()
dataPoint.Value = float64(dp.Value())
data.Metrics = []*contracts.DataPoint { dataPoint }
dataWrapper := contracts.NewData()
dataWrapper.BaseType = data.BaseType()
dataWrapper.BaseData = data
envelope.Data = dataWrapper
envelopes = append(envelopes, envelope)
}
case pdata.MetricDataTypeDoubleSum:
doubleSum := metric.DoubleSum()
dps := doubleSum.DataPoints()
for i := 0; i < dps.Len(); i++ {
dp := dps.At(i)
envelope := contracts.NewEnvelope()
envelope.Tags = make(map[string]string)
envelope.Time = toTime(dp.Timestamp()).Format(time.RFC3339Nano)
data := contracts.NewMetricData()
data.Properties = map[string]string{}
envelope.Name = data.EnvelopeName("")
dp.LabelsMap().Range(func(k string, v string) bool {
data.Properties[k] = v
return true
})
dataPoint := contracts.NewDataPoint()
dataPoint.Name = metric.Name()
dataPoint.Value = dp.Value()
data.Metrics = []*contracts.DataPoint { dataPoint }
dataWrapper := contracts.NewData()
dataWrapper.BaseType = data.BaseType()
dataWrapper.BaseData = data
envelope.Data = dataWrapper
envelopes = append(envelopes, envelope)
}
case pdata.MetricDataTypeIntHistogram:
intHistogram := metric.IntHistogram()
dps := intHistogram.DataPoints()
for i := 0; i < dps.Len(); i++ {
dp := dps.At(i)
envelopeCount := contracts.NewEnvelope()
envelopeCount.Tags = make(map[string]string)
envelopeCount.Time = toTime(dp.Timestamp()).Format(time.RFC3339Nano)
data := contracts.NewMetricData()
data.Properties = map[string]string{}
envelopeCount.Name = data.EnvelopeName("")
dp.LabelsMap().Range(func(k string, v string) bool {
data.Properties[k] = v
return true
})
dataPoint := contracts.NewDataPoint()
dataPoint.Name = metric.Name() + "_count"
dataPoint.Value = float64(dp.Count())
data.Metrics = []*contracts.DataPoint { dataPoint }
dataWrapper := contracts.NewData()
dataWrapper.BaseType = data.BaseType()
dataWrapper.BaseData = data
envelopeCount.Data = dataWrapper
envelope := contracts.NewEnvelope()
envelope.Tags = make(map[string]string)
envelope.Time = toTime(dp.Timestamp()).Format(time.RFC3339Nano)
data = contracts.NewMetricData()
data.Properties = map[string]string{}
envelope.Name = data.EnvelopeName("")
dp.LabelsMap().Range(func(k string, v string) bool {
data.Properties[k] = v
return true
})
dataPoint = contracts.NewDataPoint()
dataPoint.Name = metric.Name()
dataPoint.Value = float64(dp.Sum())
data.Metrics = []*contracts.DataPoint { dataPoint }
dataWrapper = contracts.NewData()
dataWrapper.BaseType = data.BaseType()
dataWrapper.BaseData = data
envelope.Data = dataWrapper
envelopes = append(envelopes, envelope, envelopeCount)
bucketName := metric.Name() + "_bucket"
for j := 0; j < len(dp.BucketCounts()); j++ {
envelopeBucket := contracts.NewEnvelope()
envelopeBucket.Tags = make(map[string]string)
envelopeBucket.Time = envelope.Time
md := contracts.NewMetricData()
md.Properties = map[string]string{}
envelopeBucket.Name = md.EnvelopeName("")
dp.LabelsMap().Range(func(k string, v string) bool {
md.Properties[k] = v
return true
})
if j < len(dp.ExplicitBounds()) {
md.Properties["upper_bound"] = strconv.FormatFloat(dp.ExplicitBounds()[j], 'f', -1, 64)
}
dataPoint := contracts.NewDataPoint()
dataPoint.Name = bucketName
dataPoint.Value = float64(dp.BucketCounts()[j])
md.Metrics = []*contracts.DataPoint { dataPoint }
dataWrapper = contracts.NewData()
dataWrapper.BaseType = md.BaseType()
dataWrapper.BaseData = md
envelopeBucket.Data = dataWrapper
envelopes = append(envelopes, envelopeBucket)
}
}
case pdata.MetricDataTypeHistogram:
doubleHistogram := metric.Histogram()
dps := doubleHistogram.DataPoints()
for i := 0; i < dps.Len(); i++ {
dp := dps.At(i)
envelopeCount := contracts.NewEnvelope()
envelopeCount.Tags = make(map[string]string)
envelopeCount.Time = toTime(dp.Timestamp()).Format(time.RFC3339Nano)
data := contracts.NewMetricData()
data.Properties = map[string]string{}
envelopeCount.Name = data.EnvelopeName("")
dp.LabelsMap().Range(func(k string, v string) bool {
data.Properties[k] = v
return true
})
dataPoint := contracts.NewDataPoint()
dataPoint.Name = metric.Name() + "_count"
dataPoint.Value = float64(dp.Count())
data.Metrics = []*contracts.DataPoint { dataPoint }
dataWrapper := contracts.NewData()
dataWrapper.BaseType = data.BaseType()
dataWrapper.BaseData = data
envelopeCount.Data = dataWrapper
envelope := contracts.NewEnvelope()
envelope.Tags = make(map[string]string)
envelope.Time = toTime(dp.Timestamp()).Format(time.RFC3339Nano)
data = contracts.NewMetricData()
data.Properties = map[string]string{}
envelope.Name = data.EnvelopeName("")
dp.LabelsMap().Range(func(k string, v string) bool {
data.Properties[k] = v
return true
})
dataPoint = contracts.NewDataPoint()
dataPoint.Name = metric.Name()
dataPoint.Value = dp.Sum()
data.Metrics = []*contracts.DataPoint { dataPoint }
dataWrapper = contracts.NewData()
dataWrapper.BaseType = data.BaseType()
dataWrapper.BaseData = data
envelope.Data = dataWrapper
envelopes = append(envelopes, envelope, envelopeCount)
bucketName := metric.Name() + "_bucket"
for j := 0; j < len(dp.BucketCounts()); j++ {
envelopeBucket := contracts.NewEnvelope()
envelopeBucket.Tags = make(map[string]string)
envelopeBucket.Time = envelope.Time
md := contracts.NewMetricData()
md.Properties = map[string]string{}
envelopeBucket.Name = md.EnvelopeName("")
if j < len(dp.ExplicitBounds()) {
md.Properties["upper_bound"] = strconv.FormatFloat(dp.ExplicitBounds()[j], 'f', -1, 64)
}
dataPoint := contracts.NewDataPoint()
dataPoint.Name = bucketName
dataPoint.Value = float64(dp.BucketCounts()[j])
md.Metrics = []*contracts.DataPoint { dataPoint }
dataWrapper = contracts.NewData()
dataWrapper.BaseType = md.BaseType()
dataWrapper.BaseData = md
envelopeBucket.Data = dataWrapper
envelopes = append(envelopes, envelopeBucket)
}
}
default:
// Unknown type, so just increment dropped by 1 as a best effort.
dropped++
}
resourceAttributes := resource.Attributes()
for i := 0; i < len(envelopes); i++ {
envelope := envelopes[i]
data := envelope.Data.(*contracts.Data).BaseData.(contracts.MetricData)
// Copy all the resource labels into the base data properties.
resource.Attributes().Range(func(k string, v pdata.AttributeValue) bool {
data.Properties[k] = tracetranslator.AttributeValueToString(v)
return true
})
// Copy the instrumentation properties
if instrumentationLibrary.Name() != "" {
data.Properties[instrumentationLibraryName] = instrumentationLibrary.Name()
}
if instrumentationLibrary.Version() != "" {
data.Properties[instrumentationLibraryVersion] = instrumentationLibrary.Version()
}
// Extract key service.* labels from the Resource labels and construct CloudRole and CloudRoleInstance envelope tags
// https://github.com/open-telemetry/opentelemetry-specification/tree/master/specification/resource/semantic_conventions
if serviceName, serviceNameExists := resourceAttributes.Get(conventions.AttributeServiceName); serviceNameExists {
cloudRole := serviceName.StringVal()
if serviceNamespace, serviceNamespaceExists := resourceAttributes.Get(conventions.AttributeServiceNamespace); serviceNamespaceExists {
cloudRole = serviceNamespace.StringVal() + "." + cloudRole
}
envelope.Tags[contracts.CloudRole] = cloudRole
}
if serviceInstance, exists := resourceAttributes.Get(conventions.AttributeServiceInstance); exists {
envelope.Tags[contracts.CloudRoleInstance] = serviceInstance.StringVal()
}
// Sanitize the base data, the envelope and envelope tags
sanitize(func() []string { return data.Sanitize() }, logger)
sanitize(func() []string { return envelope.Data.(*contracts.Data).Sanitize() }, logger)
sanitize(func() []string { return envelope.Sanitize() }, logger)
sanitize(func() []string { return contracts.SanitizeTags(envelope.Tags) }, logger)
}
return envelopes, nil
} | exporter/azuremonitorexporter/metric_to_envelopes.go | 0.641198 | 0.462898 | metric_to_envelopes.go | starcoder |
package charvol
import (
"fmt"
"github.com/zellyn/adventofcode/charmap"
"github.com/zellyn/adventofcode/geom"
)
// V is a map of geom.Vec3 to rune.
type V map[geom.Vec3]rune
// V4 is a map of geom.Vec4 to rune.
type V4 map[geom.Vec4]rune
// MinMax returns a geom.Vec3 for minimum coordinates, and one for maximum.
func (v V) MinMax() (geom.Vec3, geom.Vec3) {
return MinMax(v)
}
// MinMax4 returns a geom.Vec4 for minimum coordinates, and one for maximum.
func (v V4) MinMax() (geom.Vec4, geom.Vec4) {
return MinMax4(v)
}
// AsString stringifies a charvol.
// It uses the `unknown` param for positions within the min/max range
// of X/Y/Z, but not in the map. Rows are terminated by newlines.
func (v V) AsString(unknown rune) string {
return String(v, unknown)
}
// Count returns a count of the number of cells in v that hold the matching rune.
func (v V4) Count(which rune) int {
count := 0
for _, ch := range v {
if ch == which {
count++
}
}
return count
}
// Count returns a count of the number of cells in v that hold the matching rune.
func (v V) Count(which rune) int {
count := 0
for _, ch := range v {
if ch == which {
count++
}
}
return count
}
// MinMax returns a geom.Vec3 for minimum coordinates, and one for maximum.
func MinMax(vol map[geom.Vec3]rune) (geom.Vec3, geom.Vec3) {
minx, miny, minz, maxx, maxy, maxz := 0, 0, 0, 0, 0, 0
for k := range vol {
if k.X < minx {
minx = k.X
}
if k.X > maxx {
maxx = k.X
}
if k.Y < miny {
miny = k.Y
}
if k.Y > maxy {
maxy = k.Y
}
if k.Z < minz {
minz = k.Z
}
if k.Z > maxz {
maxz = k.Z
}
}
return geom.Vec3{X: minx, Y: miny, Z: minz}, geom.Vec3{X: maxx, Y: maxy, Z: maxz}
}
// MinMax4 returns a geom.Vec4 for minimum coordinates, and one for maximum.
func MinMax4(vol map[geom.Vec4]rune) (geom.Vec4, geom.Vec4) {
var minw, minx, miny, minz, maxw, maxx, maxy, maxz int
for k := range vol {
if k.W < minw {
minw = k.W
}
if k.W > maxw {
maxw = k.W
}
if k.X < minx {
minx = k.X
}
if k.X > maxx {
maxx = k.X
}
if k.Y < miny {
miny = k.Y
}
if k.Y > maxy {
maxy = k.Y
}
if k.Z < minz {
minz = k.Z
}
if k.Z > maxz {
maxz = k.Z
}
}
return geom.Vec4{W: minw, X: minx, Y: miny, Z: minz}, geom.Vec4{W: maxw, X: maxx, Y: maxy, Z: maxz}
}
// Draw takes a map of `geom.Vec3` to `rune`, and prints it out.
// It uses the `unknown` param for positions within the min/max range
// of X/Y/Z, but not in the map.
func Draw(vol map[geom.Vec3]rune, unknown rune) {
fmt.Print(String(vol, unknown))
}
// String takes a map of `geom.Vec3` to `rune`, and stringifies it out.
// It uses the `unknown` param for positions within the min/max range
// of X/Y/Z, but not in the map. Rows are terminated by newlines.
func String(vol map[geom.Vec3]rune, unknown rune) string {
result := ""
min, max := MinMax(vol)
for z := min.Z; z <= max.Z; z++ {
for y := min.Y; y <= max.Y; y++ {
for x := min.X; x <= max.X; x++ {
c, ok := vol[geom.Vec3{X: x, Y: y, Z: z}]
if !ok {
c = unknown
}
result += string(c)
}
result += "\n"
}
result += "\n"
}
return result
}
// New creates a new charvol, filled with the given fill rune.
func New(width, height int, depth int, fill rune) V {
v := map[geom.Vec3]rune{}
for z := 0; z < depth; z++ {
for x := 0; x < width; x++ {
for y := 0; y < height; y++ {
v[geom.Vec3{X: x, Y: y, Z: z}] = fill
}
}
}
return v
}
// Copy creates a copy of a charvol.
func (v V) Copy() map[geom.Vec3]rune {
cp := make(map[geom.Vec3]rune, len(v))
for k, c := range v {
cp[k] = c
}
return cp
}
// Equal tests two charvols for equality.
func (v V) Equal(w V) bool {
if len(v) != len(w) {
return false
}
for k, c := range v {
v2, ok := w[k]
if !ok {
return false
}
if c != v2 {
return false
}
}
return true
}
// FromCharmap promotes a charmap.M to a charvol.V, at the given z-index.
func FromCharmap(m charmap.M, z int) V {
v := make(map[geom.Vec3]rune, len(m))
for k, c := range m {
v[geom.Vec3{X: k.X, Y: k.Y, Z: z}] = c
}
return v
}
// FromCharmap4 promotes a charmap.M to a charvol.V4, at the given w- and z-index.
func FromCharmap4(m charmap.M, w int, z int) V4 {
v := make(map[geom.Vec4]rune, len(m))
for k, c := range m {
v[geom.Vec4{W: w, X: k.X, Y: k.Y, Z: z}] = c
}
return v
} | charvol/charvol.go | 0.810816 | 0.425665 | charvol.go | starcoder |
package lit
import (
"fmt"
"reflect"
"xelf.org/xelf/cor"
"xelf.org/xelf/knd"
"xelf.org/xelf/typ"
)
// Reg is a registry context for type references, reflected types and proxies. Many functions and
// container literals have an optional registry to aid in value conversion and construction.
type Reg struct {
refs map[string]refInfo
proxy map[reflect.Type]Prx
param map[reflect.Type]typInfo
}
type refInfo struct {
Type typ.Type
Mut Mut
}
type typInfo struct {
typ.Type
*params
}
type params struct {
ps []typ.Param
idx [][]int
}
// SetRef registers type and optionally a mutable implementation for ref.
func (reg *Reg) SetRef(ref string, t typ.Type, mut Mut) {
if reg.refs == nil {
reg.refs = make(map[string]refInfo)
}
reg.refs[ref] = refInfo{t, mut}
}
// RefType returns a type for ref or an error.
func (reg *Reg) RefType(ref string) (typ.Type, error) {
nfo, ok := reg.refs[cor.Keyed(ref)]
if ok && nfo.Type != typ.Void {
return nfo.Type, nil
}
return typ.Void, fmt.Errorf("no type found named %s", ref)
}
// Zero returns a zero mutable value for t or an error.
func (reg *Reg) Zero(t typ.Type) (m Mut, err error) {
if t.Kind&knd.List != 0 {
n := typ.Name(typ.ContEl(t))
if n != "" {
nfo := reg.refs[n]
if nfo.Mut != nil {
if s, ok := nfo.Mut.(interface{ Slice() Mut }); ok {
return s.Slice(), nil
}
}
}
} else {
n := typ.Name(t)
if n != "" {
nfo := reg.refs[n]
if nfo.Mut != nil {
return nfo.Mut.New()
}
}
}
k := t.Kind & knd.All
if k.Count() != 1 {
switch {
case k&knd.Num != 0 && k&^knd.Num == 0:
m = new(Int)
case k&knd.Str != 0 && k&^knd.Char == 0:
m = new(Str)
case k&knd.List != 0 && k&^knd.Idxr == 0:
m = &List{Reg: reg, El: typ.ContEl(t)}
case k&knd.Dict != 0 && k&^knd.Keyr == 0:
m = &Dict{Reg: reg, El: typ.ContEl(t)}
default:
return newAnyPrx(reg, t), nil
}
} else {
switch k {
case knd.Typ:
t = typ.El(t)
m = &t
case knd.Bool:
m = new(Bool)
case knd.Int:
m = new(Int)
case knd.Real:
m = new(Real)
case knd.Str:
m = new(Str)
case knd.Raw:
m = new(Raw)
case knd.UUID:
m = new(UUID)
case knd.Time:
m = new(Time)
case knd.Span:
m = new(Span)
case knd.List:
m = &List{Reg: reg, El: typ.ContEl(t)}
case knd.Dict:
m = &Dict{Reg: reg, El: typ.ContEl(t)}
case knd.Rec, knd.Obj:
m, err = NewStrc(reg, t)
if err != nil {
return nil, err
}
default:
return newAnyPrx(reg, t), nil
}
}
if t.Kind&knd.None != 0 {
m = &OptMut{m, nil, true}
}
return m, nil
}
// AddFrom updates the registry with entries from o.
func (reg *Reg) AddFrom(o *Reg) {
for ref, r := range o.refs {
if ri, ok := reg.refs[ref]; !ok || ri.Type == typ.Void || ri.Mut == nil {
reg.SetRef(ref, r.Type, reg.copyMut(r.Mut))
}
}
for rt, p := range o.proxy {
if _, ok := reg.proxy[rt]; !ok && p != nil {
reg.setProxy(rt, reg.copyMut(p).(Prx))
}
}
for rt, p := range o.param {
if _, ok := reg.param[rt]; !ok {
reg.setParam(rt, p)
}
}
}
func (reg *Reg) setParam(rt reflect.Type, nfo typInfo) {
if reg.param == nil {
reg.param = make(map[reflect.Type]typInfo)
}
reg.param[rt] = nfo
}
func (reg *Reg) setProxy(rt reflect.Type, prx Prx) {
if reg.proxy == nil {
reg.proxy = make(map[reflect.Type]Prx)
}
reg.proxy[rt] = prx
}
func (reg *Reg) copyMut(p Mut) Mut {
if p != nil {
p, _ = p.New()
if wr, ok := p.(interface{ WithReg(*Reg) }); ok {
wr.WithReg(reg)
}
}
return p
} | lit/reg.go | 0.607663 | 0.425307 | reg.go | starcoder |
package dendrolog
import (
"fmt"
"regexp"
"strings"
"testing"
)
type trieTree struct {
value string
left *trieTree
middle *trieTree
right *trieTree
}
func (trie *trieTree) setChildren(left string, middle string, right string) (leftTree *trieTree, middleTree *trieTree, rightTree *trieTree) {
if left != "" {
leftTree = &trieTree{
value: left,
}
trie.left = leftTree
}
if middle != "" {
middleTree = &trieTree{
value: middle,
}
trie.middle = middleTree
}
if right != "" {
rightTree = &trieTree{
value: right,
}
trie.right = rightTree
}
return
}
func trieCollector(node interface{}, child func(childPointer interface{})) {
trie := (node).(trieTree)
if trie.left != nil {
child(*trie.left)
} else {
child(nil)
}
if trie.middle != nil {
child(*trie.middle)
} else {
child(nil)
}
if trie.right != nil {
child(*trie.right)
} else {
child(nil)
}
}
func trieCollectorNoNil(node interface{}, child func(childPointer interface{})) {
trie := (node).(trieTree)
if trie.left != nil {
child(*trie.left)
}
if trie.middle != nil {
child(*trie.middle)
}
if trie.right != nil {
child(*trie.right)
}
}
func trieRenderer(node interface{}) string {
if node == nil {
return "nil"
}
trie := (node).(trieTree)
return trie.value
}
func testMatch(t *testing.T, expected string, stringified string) {
re := regexp.MustCompile(`&([^;&]*);`)
matches := re.FindAllStringSubmatch(expected, -1)
expectedRows := make([]string, len(matches))
for i, row := range matches {
expectedRows[i] = strings.TrimRight(row[1], " ")
//fmt.Printf("'%s'\n", expectedRows[i])
}
for i, row := range strings.Split(strings.TrimRight(stringified, "\n "), "\n") {
trimmedRow := strings.TrimRight(row, " ")
if trimmedRow != expectedRows[i] {
fmt.Print(stringified)
t.Fatalf("Expected row %d to be equal.\n Expected row: '%s'\n Got row: '%s'\n Expected Tree:\n%s\n Got Tree:\n%s", i, expectedRows[i], trimmedRow, strings.Join(expectedRows, "\n"), stringified)
}
}
} | testUtils.go | 0.594198 | 0.431225 | testUtils.go | starcoder |
package task
const ProtocolV1 = "1"
const ProtocolV1_1 = "1.1"
const ProtocolV2 = "2"
const HydroStartBlockNumberV1 = 6885289
const HydroExchangeAddressV1 = "0x2cB4B49C0d6E9db2164d94Ce48853BF77C4D883E"
const HydroMatchTopicV1 = "0xdcc6682c66bde605a9e21caeb0cb8f1f6fbd5bbfb2250c3b8d1f43bb9b06df3f"
const HydroExchangeABIV1 = `[
{
"anonymous": false,
"inputs": [
{ "indexed": false, "name": "baseToken", "type": "address" },
{ "indexed": false, "name": "quoteToken", "type": "address" },
{ "indexed": false, "name": "relayer", "type": "address" },
{ "indexed": false, "name": "maker", "type": "address" },
{ "indexed": false, "name": "taker", "type": "address" },
{ "indexed": false, "name": "baseTokenAmount", "type": "uint256" },
{ "indexed": false, "name": "quoteTokenAmount", "type": "uint256" },
{ "indexed": false, "name": "makerFee", "type": "uint256" },
{ "indexed": false, "name": "takerFee", "type": "uint256" },
{ "indexed": false, "name": "makerGasFee", "type": "uint256" },
{ "indexed": false, "name": "makerRebate", "type": "uint256" },
{ "indexed": false, "name": "takerGasFee", "type": "uint256" }
],
"name": "Match",
"type": "event"
}
]`
const HydroStartBlockNumberV1_1 = 7454912
const HydroExchangeAddressV1_1 = "0xE2a0BFe759e2A4444442Da5064ec549616FFF101"
const HydroMatchTopicV1_1 = "0xd3ac06c3b34b93617ba2070b8b7a925029035b3f30fecd2d0fa8e5845724f310"
const HydroExchangeABIV1_1 = `[
{
"anonymous": false,
"inputs": [
{
"components": [
{ "name": "baseToken", "type": "address" },
{ "name": "quoteToken", "type": "address" },
{ "name": "relayer", "type": "address" }
],
"indexed": false,
"name": "addressSet",
"type": "tuple"
},
{
"components": [
{ "name": "maker", "type": "address" },
{ "name": "taker", "type": "address" },
{ "name": "buyer", "type": "address" },
{ "name": "makerFee", "type": "uint256" },
{ "name": "makerRebate", "type": "uint256" },
{ "name": "takerFee", "type": "uint256" },
{ "name": "makerGasFee", "type": "uint256" },
{ "name": "takerGasFee", "type": "uint256" },
{ "name": "baseTokenFilledAmount", "type": "uint256" },
{ "name": "quoteTokenFilledAmount", "type": "uint256" }
],
"indexed": false,
"name": "result",
"type": "tuple"
}
],
"name": "Match",
"type": "event"
}
]`
const HydroStartBlockNumberV2 = 8399662
const HydroExchangeAddressV2 = "0x241e82C79452F51fbfc89Fac6d912e021dB1a3B7"
const HydroMatchTopicV2 = "0x6bf96fcc2cec9e08b082506ebbc10114578a497ff1ea436628ba8996b750677c"
// const HydroExchangeABIV2 = `` | task/contract.go | 0.516839 | 0.417271 | contract.go | starcoder |
package pgkebab
import (
"encoding/json"
"fmt"
"strconv"
"time"
)
// Row holds a single record
type Row struct {
tuple map[string]interface{}
}
// Ready returns true if the tuple contains at least one filled column
func (r Row) Ready() bool {
return len(r.tuple) > 0
}
// Columns returns an string array filled with column names
func (r Row) Columns() []string {
if len(r.tuple) < 1 {
return []string{}
}
var a []string
for x := range r.tuple {
a = append(a, x)
}
return a
}
// has returns true if the tuple is initialized and contains the given key
func (r Row) has(key string) bool {
if len(r.tuple) > 0 {
if _, ok := r.tuple[key]; ok {
return true
}
}
return false
}
// String returns the specified field as string
func (r Row) String(key string) string {
if !r.has(key) {
return ""
}
switch x := r.tuple[key].(type) {
case string:
return x
case int64:
return strconv.Itoa(int(x))
case float64:
return strconv.FormatFloat(x, 'f', -1, 64)
case time.Time:
return x.Format(time.RFC3339)
case bool:
return fmt.Sprintf("%t", x)
case []byte:
return string(x)
}
return ""
}
// Int64 returns the specified field as Int64
func (r Row) Int64(key string) int64 {
if !r.has(key) {
return 0
}
switch x := r.tuple[key].(type) {
case int64:
return x
case string:
{
i, _ := strconv.ParseInt(x, 10, 64)
return i
}
case float64:
return int64(x)
case []byte:
{
if len(x) == 1 {
return int64(x[0])
}
return 0
}
}
return 0
}
// Int returns the specified field as Int
func (r Row) Int(key string) int {
if !r.has(key) {
return 0
}
switch x := r.tuple[key].(type) {
case int64:
return int(x)
case string:
{
i, _ := strconv.ParseInt(x, 10, 64)
return int(i)
}
case float64:
return int(x)
case []byte:
{
if len(x) == 1 {
return int(x[0])
}
return 0
}
}
return 0
}
// Float64 returns the specified field as float64
// Check for conversion details: https://stackoverflow.com/questions/31946344/why-does-go-treat-a-postgresql-numeric-decimal-columns-as-uint8
func (r Row) Float64(key string) float64 {
if !r.has(key) {
return 0
}
switch x := r.tuple[key].(type) {
case float64:
return x
case string:
{
f, _ := strconv.ParseFloat(x, 64)
return f
}
case []uint8:
{
f, _ := strconv.ParseFloat(string(x), 64)
return f
}
case int64:
return float64(x)
}
return 0
}
// Bool returns the specified field as boolean
func (r Row) Bool(key string) bool {
if !r.has(key) {
return false
}
switch x := r.tuple[key].(type) {
case bool:
return x
case string:
{
b, _ := strconv.ParseBool(x)
return b
}
}
return false
}
// Time returns the specified field as time.Time
func (r Row) Time(key string) time.Time {
if !r.has(key) {
return time.Time{}
}
switch x := r.tuple[key].(type) {
case time.Time:
return x
case string:
{
b, _ := time.Parse(time.RFC3339, x)
return b
}
}
return time.Time{}
}
// Timef returns the specified field as time.Time
// it tries to interpret the date/time value according the given format
func (r Row) Timef(key, format string) time.Time {
if !r.has(key) {
return time.Time{}
}
switch x := r.tuple[key].(type) {
case time.Time:
return x
case string:
{
b, _ := time.Parse(format, x)
return b
}
}
return time.Time{}
}
// JSON returns the column content serialized as JSON string
func (r Row) JSON() (string, error) {
if len(r.tuple) < 1 {
return "", fmt.Errorf("empty row")
}
bs, err := json.Marshal(r.tuple)
return string(bs), err
}
// JSONMap returns the column content desserialized and a bool indicating if the map has some content
func (r Row) JSONMap(key string) (map[string]interface{}, bool) {
if !r.has(key) {
return nil, false
}
m := make(map[string]interface{})
switch x := r.tuple[key].(type) {
case []uint8:
if err := json.Unmarshal(x, &m); err != nil {
return m, false
}
case string:
if err := json.Unmarshal([]byte(x), &m); err != nil {
return m, false
}
}
return m, len(m) > 0
}
// JSONStruct returns the column content desserialized to given target struct
func (r Row) JSONStruct(key string, target interface{}) error {
if !r.has(key) {
return fmt.Errorf("unknow column %s", key)
}
switch x := r.tuple[key].(type) {
case []uint8:
if err := json.Unmarshal(x, &target); err != nil {
return err
}
case string:
if err := json.Unmarshal([]byte(x), &target); err != nil {
return err
}
}
return nil
}
// Map returns the current tuple as map[string]interface{}
func (r Row) Map() map[string]interface{} {
m := make(map[string]interface{})
for k, v := range r.tuple {
m[k] = v
}
return m
} | row.go | 0.796925 | 0.468487 | row.go | starcoder |
package soyutil;
import (
"bytes"
"math"
"math/rand"
"strconv"
"strings"
)
type Lener interface {
Len() int
}
func Conditional(cond bool, iftrue SoyData, iffalse SoyData) SoyData {
if cond {
return iftrue
}
return iffalse
}
func InsertWordBreaks(value string, maxCharsBetweenWordBreaks int) string {
result := bytes.NewBuffer(make([]byte, 0, (len(value) + (len(value) / maxCharsBetweenWordBreaks) + 2)))
// These variables keep track of important state while looping through the string below.
isInTag := false // whether we're inside an HTML tag
isMaybeInEntity := false // whether we might be inside an HTML entity
numCharsWithoutBreak := 0 // number of characters since the last word break
for _, codePoint := range value {
// If hit maxCharsBetweenWordBreaks, and next char is not a space, then add <wbr>.
if numCharsWithoutBreak >= maxCharsBetweenWordBreaks && codePoint != ' ' {
result.WriteString("<wbr>")
numCharsWithoutBreak = 0
}
if isInTag {
// If inside an HTML tag and we see '>', it's the end of the tag.
if codePoint == '>' {
isInTag = false
}
} else if (isMaybeInEntity) {
switch codePoint {
// If maybe inside an entity and we see ';', it's the end of the entity. The entity
// that just ended counts as one char, so increment numCharsWithoutBreak.
case ';':
isMaybeInEntity = false
numCharsWithoutBreak++
break
// If maybe inside an entity and we see '<', we weren't actually in an entity. But
// now we're inside and HTML tag.
case '<':
isMaybeInEntity = false
isInTag = true
break
// If maybe inside an entity and we see ' ', we weren't actually in an entity. Just
// correct the state and reset the numCharsWithoutBreak since we just saw a space.
case ' ':
isMaybeInEntity = false
numCharsWithoutBreak = 0
break
}
} else { // !isInTag && !isInEntity
switch codePoint {
// When not within a tag or an entity and we see '<', we're now inside an HTML tag.
case '<':
isInTag = true
break
// When not within a tag or an entity and we see '&', we might be inside an entity.
case '&':
isMaybeInEntity = true
break
// When we see a space, reset the numCharsWithoutBreak count.
case ' ':
numCharsWithoutBreak = 0
break
// When we see a non-space, increment the numCharsWithoutBreak.
default:
numCharsWithoutBreak++
break
}
}
// In addition to adding <wbr>s, we still have to add the original characters.
result.WriteRune(codePoint)
}
return result.String()
}
/**
* Converts \r\n, \r, and \n to <br>s
* @param {*} str The string in which to convert newlines.
* @return {string} A copy of {@code str} with converted newlines.
*/
func ChangeNewlineToBr(str string) string {
// This quick test helps in the case when there are no chars to replace, in
// the worst case this makes barely a difference to the time taken.
if !_CHANGE_NEWLINE_TO_BR_RE.MatchString(str) {
return str
}
return _CHANGE_NEWLINE_TO_BR2_RE.ReplaceAllString(str, "<br/>")
}
func Negative(a SoyData) Float64Data {
if a == nil {
a = NilDataInstance
}
a1 := a.NumberValue();
return NewFloat64Data(-a1);
}
func Plus(a, b SoyData) SoyData {
if a == nil {
a = NilDataInstance
}
if b == nil {
b = NilDataInstance
}
a1 := a.NumberValue()
b1 := b.NumberValue()
return NewFloat64Data(a1 + b1)
}
func Divide(a, b SoyData) SoyData {
if a == nil {
a = NilDataInstance
}
if b == nil {
b = NilDataInstance
}
a1 := a.NumberValue()
b1 := b.NumberValue()
return NewFloat64Data(a1 / b1)
}
func Minus(a, b SoyData) SoyData {
if a == nil {
a = NilDataInstance
}
if b == nil {
b = NilDataInstance
}
a1 := a.NumberValue()
b1 := b.NumberValue()
return NewFloat64Data(a1 - b1)
}
func Times(a, b SoyData) SoyData {
if a == nil {
a = NilDataInstance
}
if b == nil {
b = NilDataInstance
}
a1 := a.NumberValue()
b1 := b.NumberValue()
return NewFloat64Data(a1 * b1)
}
func LessThan(a, b SoyData) BooleanData {
if a == nil {
a = NilDataInstance
}
if b == nil {
b = NilDataInstance
}
a1 := a.NumberValue()
b1 := b.NumberValue()
return NewBooleanData(a1 < b1)
}
func GreaterThan(a, b SoyData) SoyData {
if a == nil {
a = NilDataInstance
}
if b == nil {
b = NilDataInstance
}
a1 := a.NumberValue()
b1 := b.NumberValue()
return NewBooleanData(a1 > b1)
}
func LessThanOrEqual(a, b SoyData) BooleanData {
if a == nil {
a = NilDataInstance
}
if b == nil {
b = NilDataInstance
}
a1 := a.NumberValue()
b1 := b.NumberValue()
return NewBooleanData(a1 <= b1)
}
func GreaterThanOrEqual(a, b SoyData) SoyData {
if a == nil {
a = NilDataInstance
}
if b == nil {
b = NilDataInstance
}
a1 := a.NumberValue()
b1 := b.NumberValue()
return NewBooleanData(a1 >= b1)
}
func round(a float64) float64 {
integral := math.Trunc(a)
var output float64
if math.Signbit(a) {
// negative
if integral - 0.5 >= a {
output = integral - 1
} else {
output = integral
}
} else {
// positive
if integral + 0.5 <= a {
output = integral + 1
} else {
output = integral
}
}
return output
}
func Round(a SoyData) SoyData {
if a == nil {
return NewFloat64Data(defaultFloat64Value())
}
a1 := a.NumberValue()
return NewFloat64Data(round(a1))
}
func Round2(a, b SoyData) SoyData {
if a == nil {
a = NilDataInstance
}
if b == nil {
b = NilDataInstance
}
a1 := a.NumberValue()
b1 := b.IntegerValue()
multiplier := math.Pow10(b1)
return NewFloat64Data(round(a1 * multiplier) / multiplier)
}
func Min(a, b SoyData) SoyData {
if a == nil {
a = NilDataInstance
}
if b == nil {
b = NilDataInstance
}
a1 := a.NumberValue()
b1 := b.NumberValue()
if a1 < b1 {
return a
}
return b
}
func Max(a, b SoyData) SoyData {
if a == nil {
a = NilDataInstance
}
if b == nil {
b = NilDataInstance
}
a1 := a.NumberValue()
b1 := b.NumberValue()
if a1 > b1 {
return a
}
return b
}
func Floor(a float64) SoyData {
//a1 := a.NumberValue()
return NewFloat64Data(math.Floor(a))
}
func Ceiling(a float64) SoyData {
//a1 := a.NumberValue()
return NewFloat64Data(math.Ceil(a))
}
func Len(a SoyData) SoyData {
if a == nil {
a = NilDataInstance
}
output := 0
if a1, ok := a.(Lener); ok {
output = a1.Len()
}
return NewIntegerData(output)
}
func HasData() bool {
return true
}
func RandomInt(a int) IntegerData {
return IntegerData(rand.Intn(a))
}
func GetData(data SoyData, key string) SoyData {
if data == nil {
return NilDataInstance
}
dotIndex := strings.Index(key, ".")
keypart := key
keyleft := ""
if dotIndex >= 0 {
keypart = key[0:dotIndex]
keyleft = key[dotIndex+1:]
}
switch d := data.(type) {
case SoyListData:
lindex, err := strconv.Atoi(keyleft)
if err == nil {
return NilDataInstance
}
v := d.At(lindex)
if len(keyleft) == 0 {
return v
}
return GetData(v, keyleft)
case SoyMapData:
v, found := d[keypart]
if !found {
return NilDataInstance
}
if len(keyleft) == 0 {
return v
}
return GetData(v, keyleft)
default:
return NilDataInstance
}
return NilDataInstance
}
/**
* Builds an augmented data object to be passed when a template calls another,
* and needs to pass both original data and additional params. The returned
* object will contain both the original data and the additional params. If the
* same key appears in both, then the value from the additional params will be
* visible, while the value from the original data will be hidden. The original
* data object will be used, but not modified.
*
* @param {!Object} origData The original data to pass.
* @param {Object} additionalParams The additional params to pass.
* @return {Object} An augmented data object containing both the original data
* and the additional params.
*/
func AugmentData(a, b SoyMapData) SoyMapData {
if a == nil {
a = NewSoyMapData()
}
if b == nil {
b = NewSoyMapData()
}
for k, v := range b {
a[k] = v
}
return a
}
func BoolToInt(value bool) int {
if value {
return 1
}
return 0
} | go/src/closure/template/soyutil/utils.go | 0.681515 | 0.511961 | utils.go | starcoder |
package expect
import (
"testing"
)
// Fault is an expectation that always results in an error
type Fault struct {
*testing.T
err error
}
// Faulty returns a new Fault
func Faulty(t *testing.T, err error) Expectation {
return &Fault{t, err}
}
// To returns the current expectation
func (f *Fault) To() Expectation {
return f
}
// Be returns the current expectation
func (f *Fault) Be() Expectation {
return f
}
// Is returns the current expectation
func (f *Fault) Is() Expectation {
return f
}
// Should returns the current expectation
func (f *Fault) Should() Expectation {
return f
}
// Not returns the current expectation
func (f *Fault) Not() Expectation {
return f
}
// IsNot returns the current expectation
func (f *Fault) IsNot() Expectation {
return f
}
// DoesNot returns the current expectation
func (f *Fault) DoesNot() Expectation {
return f
}
// At returns the current expectation
func (f *Fault) At(index interface{}) Expectation {
return f
}
// Nil always results in an error
func (f *Fault) Nil() {
f.Error(f.err)
}
// True always results in an error
func (f *Fault) True() {
f.Error(f.err)
}
// False always results in an error
func (f *Fault) False() {
f.Error(f.err)
}
// Empty always results in an error
func (f *Fault) Empty() {
f.Error(f.err)
}
// HasLength always results in an error
func (f *Fault) HasLength(expected int) {
f.Error(f.err)
}
// HaveLength always results in an error
func (f *Fault) HaveLength(expected int) {
f.Error(f.err)
}
// Equals always results in an error
func (f *Fault) Equals(expected interface{}) {
f.Error(f.err)
}
// Eq always results in an error
func (f *Fault) Eq(expected interface{}) {
f.Error(f.err)
}
// Equal always results in an error
func (f *Fault) Equal(expected interface{}) {
f.Error(f.err)
}
// EqualTo always results in an error
func (f *Fault) EqualTo(expected interface{}) {
f.Error(f.err)
}
// Matches always results in an error
func (f *Fault) Matches(pattern string) {
f.Error(f.err)
}
// Match always results in an error
func (f *Fault) Match(pattern string) {
f.Error(f.err)
} | expect/fault.go | 0.820757 | 0.462048 | fault.go | starcoder |
package codecs
// H264Payloader payloads H264 packets
type H264Payloader struct{}
const (
fuaHeaderSize = 2
)
func emitNalus(nals []byte, emit func([]byte)) {
nextInd := func(nalu []byte, start int) (indStart int, indLen int) {
zeroCount := 0
for i, b := range nalu[start:] {
if b == 0 {
zeroCount++
continue
} else if b == 1 {
if zeroCount >= 2 {
return start + i - zeroCount, zeroCount + 1
}
}
zeroCount = 0
}
return -1, -1
}
nextIndStart, nextIndLen := nextInd(nals, 0)
if nextIndStart == -1 {
emit(nals)
} else {
for nextIndStart != -1 {
prevStart := nextIndStart + nextIndLen
nextIndStart, nextIndLen = nextInd(nals, prevStart)
if nextIndStart != -1 {
emit(nals[prevStart:nextIndStart])
} else {
// Emit until end of stream, no end indicator found
emit(nals[prevStart:])
}
}
}
}
// Payload fragments a H264 packet across one or more byte arrays
func (p *H264Payloader) Payload(mtu int, payload []byte) [][]byte {
var payloads [][]byte
emitNalus(payload, func(nalu []byte) {
naluType := nalu[0] & 0x1F
naluRefIdc := nalu[0] & 0x60
if naluType == 9 || naluType == 12 {
return
}
// Single NALU
if len(nalu) <= mtu {
out := make([]byte, len(nalu))
copy(out, nalu)
payloads = append(payloads, out)
return
}
// FU-A
maxFragmentSize := mtu - fuaHeaderSize
// The FU payload consists of fragments of the payload of the fragmented
// NAL unit so that if the fragmentation unit payloads of consecutive
// FUs are sequentially concatenated, the payload of the fragmented NAL
// unit can be reconstructed. The NAL unit type octet of the fragmented
// NAL unit is not included as such in the fragmentation unit payload,
// but rather the information of the NAL unit type octet of the
// fragmented NAL unit is conveyed in the F and NRI fields of the FU
// indicator octet of the fragmentation unit and in the type field of
// the FU header. An FU payload MAY have any number of octets and MAY
// be empty.
naluData := nalu
// According to the RFC, the first octet is skipped due to redundant information
naluDataIndex := 1
naluDataLength := len(nalu) - naluDataIndex
naluDataRemaining := naluDataLength
for naluDataRemaining > 0 {
currentFragmentSize := min(maxFragmentSize, naluDataRemaining)
out := make([]byte, fuaHeaderSize+currentFragmentSize)
// +---------------+
// |0|1|2|3|4|5|6|7|
// +-+-+-+-+-+-+-+-+
// |F|NRI| Type |
// +---------------+
out[0] = 28
out[0] |= naluRefIdc
// +---------------+
//|0|1|2|3|4|5|6|7|
//+-+-+-+-+-+-+-+-+
//|S|E|R| Type |
//+---------------+
out[1] = naluType
if naluDataRemaining == naluDataLength {
// Set start bit
out[1] |= 1 << 7
} else if naluDataRemaining-currentFragmentSize == 0 {
// Set end bit
out[1] |= 1 << 6
}
copy(out[fuaHeaderSize:], naluData[naluDataIndex:naluDataIndex+currentFragmentSize])
payloads = append(payloads, out)
naluDataRemaining -= currentFragmentSize
naluDataIndex += currentFragmentSize
}
})
return payloads
} | pkg/rtp/codecs/h264_packet.go | 0.538741 | 0.442335 | h264_packet.go | starcoder |
package cubicSpline
import (
"github.com/helloworldpark/gonaturalspline/knot"
"gonum.org/v1/gonum/mat"
)
// CubicSpline Univariate function
type CubicSpline func(float64) float64
// NaturalCubicSplines Reference from:
// p.141-156, <NAME> et. al., The Elements of Statistical Learning
type NaturalCubicSplines struct {
splines []CubicSpline
knots knot.Knot
coefs *mat.VecDense
lambda float64
solverMatrix *mat.Dense
}
// NewNaturalCubicSplines A new pointer of NaturalCubicSpline struct
func NewNaturalCubicSplines(knots knot.Knot, coefs []float64) *NaturalCubicSplines {
return &NaturalCubicSplines{
splines: buildNaturalCubicSplines(knots),
knots: knots,
coefs: mat.NewVecDense(knots.Count(), coefs),
}
}
// Solve Solve the matrix needed when calculating smoothing spline.
func (ncs *NaturalCubicSplines) Solve(lambda float64) {
ncs.lambda = lambda
N := ncs.calcBasisMatrix()
S := ncs.calcSmoothMatrix()
var NtN mat.Dense
NtN.Mul(N.T(), N)
S.Scale(lambda, S)
n, _ := NtN.Dims()
NtNSym := mat.NewSymDense(n, NtN.RawMatrix().Data)
SSym := mat.NewSymDense(n, S.RawMatrix().Data)
NtNSym.AddSym(NtNSym, SSym)
var chol mat.Cholesky
if ok := chol.Factorize(NtNSym); !ok {
panic(">>>>>>>>>>")
}
var cholInv mat.SymDense
chol.InverseTo(&cholInv)
var all mat.Dense
all.Mul(&cholInv, N.T())
ncs.solverMatrix = &all
}
// Interpolate Calculate the coefficients interpolating y
func (ncs *NaturalCubicSplines) Interpolate(y []float64) {
Y := mat.NewVecDense(len(y), y)
var coefs mat.VecDense
coefs.MulVec(ncs.solverMatrix, Y)
ncs.coefs = &coefs
}
// At Calculate the smoothing spline at x
func (ncs *NaturalCubicSplines) At(x float64) float64 {
var y float64
for i := 0; i < len(ncs.splines); i++ {
y += ncs.coefs.AtVec(i) * ncs.splines[i](x)
}
return y
}
func (ncs *NaturalCubicSplines) calcBasisMatrix() *mat.Dense {
n := len(ncs.splines)
m := mat.NewDense(ncs.knots.Count(), n, nil)
for i := 0; i < ncs.knots.Count(); i++ {
x := ncs.knots.At(i)
for j := 0; j < n; j++ {
v := ncs.splines[j](x)
m.Set(i, j, v)
}
}
return m
}
func (ncs *NaturalCubicSplines) calcSmoothMatrix() *mat.Dense {
n := len(ncs.splines)
p := mat.NewDense(ncs.knots.Count(), n, nil)
knotEnd := ncs.knots.At(ncs.knots.Count() - 1)
knotEndEnd := ncs.knots.At(ncs.knots.Count() - 2)
for j := 2; j < n; j++ {
v := 12.0 * (knotEnd - ncs.knots.At(j-2))
p.Set(j, j, v)
}
for j := 2; j < n; j++ {
knotJ := ncs.knots.At(j - 2)
diffJ := knotEndEnd - knotJ
for m := j + 1; m < n; m++ {
knotM := ncs.knots.At(m - 2)
diffM := knotEndEnd - knotM
v := 12.0*(knotEnd-knotEndEnd) + 6.0*(diffM/diffJ)*(2*knotEndEnd-3*knotM+knotJ)
p.Set(j, m, v)
}
}
for j := 2; j < n; j++ {
for m := 0; m < j; m++ {
p.Set(j, m, p.At(m, j))
}
}
return p
}
func piecewiseCubic(k float64) CubicSpline {
return func(x float64) float64 {
if x < k {
return 0.0
}
t := x - k
return t * t * t
}
}
func buildNaturalCubicSplines(knots knot.Knot) []CubicSpline {
splines := make([]CubicSpline, knots.Count())
splines[0] = func(float64) float64 { return 1 }
splines[1] = func(x float64) float64 { return x }
knotEnd := knots.At(knots.Count() - 1)
pEnd := piecewiseCubic(knotEnd)
dEnd := func(x float64) float64 {
knotLastToSecond := knots.At(knots.Count() - 2)
p := piecewiseCubic(knotLastToSecond)
return (p(x) - pEnd(x)) / (knotEnd - knotLastToSecond)
}
for k := 0; k < knots.Count()-2; k++ {
l := knots.At(k)
splines[k+2] = func(x float64) float64 {
p := piecewiseCubic(l)
return (p(x)-pEnd(x))/(knotEnd-l) - dEnd(x)
}
}
return splines
} | cubicSpline/cubicSpline.go | 0.790692 | 0.581214 | cubicSpline.go | starcoder |
package simpleio
import (
"fmt"
"log"
"math/big"
"strconv"
"strings"
)
// BytesToInt a function that converts a byte slice and return a number, type int.
func BytesToInt(b []byte) int {
answer, err := strconv.Atoi(string(b))
StdError(err)
return answer
}
// StringToInt is a function that converts a string and return a number, type int.
func StringToInt(s string) int {
answer, err := strconv.Atoi(s)
StdError(err)
return answer
}
// StringToUint16 parses a string into a uint16 and will exit on error
func StringToUInt16(s string) uint16 {
n, err := strconv.ParseUint(s, 10, 16)
if err != nil {
log.Panic(fmt.Sprintf("Error: trouble converting \"%s\" to a uint16\n", s))
}
return uint16(n)
}
// StringToInt is a function that converts a string and return a number, type int.
func StringToUInt32(s string) uint32 {
answer, err := strconv.Atoi(s)
StdError(err)
return uint32(answer)
}
// StringToFloat is a function that converts a string to a type float64.
func StringToFloat(s string) float32 {
answer, err := strconv.ParseFloat(s, 32)
StdError(err)
return float32(answer)
}
// StringToFloat is a function that converts a string to a type float64.
func StringToFloat64(s string) float64 {
answer, err := strconv.ParseFloat(s, 64)
StdError(err)
return answer
}
// ScientificNotation will convert a string with scientific notation into a float64
func ScientificNotation(s string) float64 {
num, _, err := big.ParseFloat(s, 10, 0, big.ToNearestEven)
StdError(err)
ans, _ := num.Float64()
return ans
}
// IntToString a function that converts a number of type int and return a string.
func IntToString(i int) string {
return fmt.Sprintf("%d", i)
}
// StringToInts will process strings (usually from column data) and return a slice of []int
func StringToIntSlice(column string) []int {
work := strings.Split(column, ",")
sliceSize := len(work)
if column[len(column)-1] == ',' {
sliceSize--
}
var answer []int = make([]int, sliceSize)
for i := 0; i < sliceSize; i++ {
answer[i] = StringToInt(work[i])
}
return answer
}
// intListToString will process a slice of type int as an input and return a each value separated by a comma as a string.
func IntSliceToString(nums []int) string {
ans := strings.Builder{}
ans.Grow(2 * len(nums))
for i := 0; i < len(nums); i++ {
ans.WriteString(IntToString(nums[i]))
ans.WriteByte(',')
}
return ans.String()
}
func Int16ToString(num int16) string {
return strconv.FormatInt(int64(num), 10)
}
func Float32ToString(num float32) string {
return strconv.FormatFloat(float64(num), 'f', 3, 32)
}
func Float64ToString(num float64) string {
return strconv.FormatFloat(num, 'f', 3, 64)
} | simpleio/parse.go | 0.728169 | 0.545104 | parse.go | starcoder |
package maps
import (
"golang.org/x/exp/constraints"
. "github.com/noxer/nox/dot"
"github.com/noxer/nox/slice"
"github.com/noxer/nox/tuple"
)
// Keys returns an unsorted slice of the keys of m.
func Keys[K comparable, V any](m map[K]V) []K {
keys := make([]K, 0, len(m))
for k := range m {
keys = append(keys, k)
}
return keys
}
// SortedKeys returns a sorted slice of the keys of m.
func SortedKeys[K constraints.Ordered, V any](m map[K]V) []K {
keys := Keys(m)
slice.Sort(keys)
return keys
}
// Values returns an unsorted slice of values of m.
func Values[K comparable, V any](m map[K]V) []V {
values := make([]V, 0, len(m))
for _, v := range m {
values = append(values, v)
}
return values
}
// SortedValues returns a sorted slice of values of m.
func SortedValues[K comparable, V constraints.Ordered](m map[K]V) []V {
values := Values(m)
slice.Sort(values)
return values
}
// ValuesBySortedKeys returns a slice of values of m sorted by the keys of m.
func ValuesBySortedKeys[K constraints.Ordered, V any](m map[K]V) []V {
keys := SortedKeys(m)
values := make([]V, len(m))
for i, k := range keys {
values[i] = m[k]
}
return values
}
// KeyValues returns an unsorted slice of Key-Value pairs.
func KeyValues[K comparable, V any](m map[K]V) []tuple.T2[K, V] {
tuples := make([]tuple.T2[K, V], 0, len(m))
for k, v := range m {
tuples = append(tuples, tuple.T2[K, V]{A: k, B: v})
}
return tuples
}
// SortedKeyValues returns a slice of Key-Value pairs sorted by the keys.
func SortedKeyValues[K constraints.Ordered, V any](m map[K]V) []tuple.T2[K, V] {
keyValues := KeyValues(m)
slice.SortBy(keyValues, func(t tuple.T2[K, V]) K { return t.A })
return keyValues
}
// SumKeys sums up the keys of m.
func SumKeys[K Number, V any](m map[K]V) (sum K) {
for k := range m {
sum += k
}
return
}
// SumValues sums up the values of m.
func SumValues[K comparable, V Number](m map[K]V) (sum V) {
for _, v := range m {
sum += v
}
return
}
// ProdKeys multiplies all the keys of m.
func ProdKeys[K Number, V any](m map[K]V) (sum K) {
for k := range m {
sum *= k
}
return
}
// ProdValues multiplies all the values of m.
func ProdValues[K comparable, V Number](m map[K]V) (sum V) {
for _, v := range m {
sum *= v
}
return
}
type mapEnumerator[K comparable, V any] struct {
m map[K]V
keys []K
}
func (e mapEnumerator[K, V]) Next() bool {
switch len(e.keys) {
case 1:
e.keys = nil
fallthrough
case 0:
return false
}
e.keys = e.keys[1:]
return true
}
func (e mapEnumerator[K, V]) Value() tuple.T2[K, V] {
return tuple.T2[K, V]{A: e.keys[0], B: e.m[e.keys[0]]}
} | maps/maps.go | 0.818047 | 0.433322 | maps.go | starcoder |
package quality
import (
"github.com/biogo/biogo/alphabet"
"github.com/biogo/biogo/seq"
)
// A slice of quality scores that satisfies the alphabet.Slice interface.
type Qsolexas []alphabet.Qsolexa
func (q Qsolexas) Make(len, cap int) alphabet.Slice { return make(Qsolexas, len, cap) }
func (q Qsolexas) Len() int { return len(q) }
func (q Qsolexas) Cap() int { return cap(q) }
func (q Qsolexas) Slice(start, end int) alphabet.Slice { return q[start:end] }
func (q Qsolexas) Append(a alphabet.Slice) alphabet.Slice {
return append(q, a.(Qsolexas)...)
}
func (q Qsolexas) Copy(a alphabet.Slice) int { return copy(q, a.(Qsolexas)) }
type Solexa struct {
seq.Annotation
Qual Qsolexas
Encode alphabet.Encoding
}
// Create a new scoring type.
func NewSolexa(id string, q []alphabet.Qsolexa, encode alphabet.Encoding) *Solexa {
return &Solexa{
Annotation: seq.Annotation{ID: id},
Qual: append([]alphabet.Qsolexa(nil), q...),
Encode: encode,
}
}
// Returns the underlying quality score slice.
func (q *Solexa) Slice() alphabet.Slice { return q.Qual }
// Set the underlying quality score slice.
func (q *Solexa) SetSlice(sl alphabet.Slice) { q.Qual = sl.(Qsolexas) }
// Append to the scores.
func (q *Solexa) Append(a ...alphabet.Qsolexa) { q.Qual = append(q.Qual, a...) }
// Return the raw score at position pos.
func (q *Solexa) At(i int) alphabet.Qsolexa { return q.Qual[i-q.Offset] }
// Return the error probability at position pos.
func (q *Solexa) EAt(i int) float64 { return q.Qual[i-q.Offset].ProbE() }
// Set the raw score at position pos to qual.
func (q *Solexa) Set(i int, qual alphabet.Qsolexa) error { q.Qual[i-q.Offset] = qual; return nil }
// Set the error probability to e at position pos.
func (q *Solexa) SetE(i int, e float64) error {
q.Qual[i-q.Offset] = alphabet.Esolexa(e)
return nil
}
// Encode the quality at position pos to a letter based on the sequence Encode setting.
func (q *Solexa) QEncode(i int) byte {
return q.Qual[i-q.Offset].Encode(q.Encode)
}
// Decode a quality letter to a phred score based on the sequence Encode setting.
func (q *Solexa) QDecode(l byte) alphabet.Qsolexa { return q.Encode.DecodeToQsolexa(l) }
// Return the quality Encode type.
func (q *Solexa) Encoding() alphabet.Encoding { return q.Encode }
// Set the quality Encode type to e.
func (q *Solexa) SetEncoding(e alphabet.Encoding) error { q.Encode = e; return nil }
// Return the length of the score sequence.
func (q *Solexa) Len() int { return len(q.Qual) }
// Return the start position of the score sequence.
func (q *Solexa) Start() int { return q.Offset }
// Return the end position of the score sequence.
func (q *Solexa) End() int { return q.Offset + q.Len() }
// Return a copy of the quality sequence.
func (q *Solexa) Copy() seq.Quality {
c := *q
c.Qual = append([]alphabet.Qsolexa(nil), q.Qual...)
return &c
}
// Reverse the order of elements in the sequence.
func (q *Solexa) Reverse() {
l := q.Qual
for i, j := 0, len(l)-1; i < j; i, j = i+1, j-1 {
l[i], l[j] = l[j], l[i]
}
}
func (q *Solexa) String() string {
qs := make([]byte, 0, len(q.Qual))
for _, s := range q.Qual {
qs = append(qs, s.Encode(q.Encode))
}
return string(qs)
} | seq/quality/solexa.go | 0.798894 | 0.420362 | solexa.go | starcoder |
package pops
import (
"time"
"sort"
)
type Periods struct {
ps []Period
}
func NewPeriods(periods []Period) Periods {
this := Periods{}
this.ps = append([]Period{}, periods...)
return this
}
func NewPeriodsWithSingleTimeRange(startIncl, endExcl time.Time) (periods Periods, err error) {
period, err := NewPeriod(startIncl, endExcl)
if err != nil {
return
}
return NewPeriods([]Period{
period,
}), nil
}
func (periods Periods) AsSlice() []Period {
return append([]Period{}, periods.ps...)
}
func appendTimedBoundaries(inputTimedBoundaries []timedBoundary, periods []Period, impact int) (outputTimedBoundaries []timedBoundary) {
outputTimedBoundaries = append(inputTimedBoundaries)
for _, p := range periods {
outputTimedBoundaries = append(outputTimedBoundaries,
timedBoundary{time: p.startIncl, isStart: true, impact: impact},
timedBoundary{time: p.endExcl, isStart: false, impact: impact})
}
return
}
func stitchAdjacentPeriods(periods []Period) (stitched []Period, err error) {
for pos := 0; pos < len(periods); pos++ {
lowerBoundary := periods[pos].startIncl
upperBoundary := periods[pos].endExcl
for scan := pos + 1; scan < len(periods); scan++ {
periodToMerge := periods[scan]
if periodToMerge.startIncl.Equal(upperBoundary) {
upperBoundary = periodToMerge.endExcl
pos ++
continue
}
break
}
newPeriod, err := NewPeriod(lowerBoundary, upperBoundary)
if err != nil {
return nil, err
}
stitched = append(stitched, newPeriod)
}
return
}
func apply(t, o []Period, tImpact, oImpact int) (result Periods, err error) {
tbs := make([]timedBoundary, len(o) + len(t))
tbs = appendTimedBoundaries(tbs, o, oImpact)
tbs = appendTimedBoundaries(tbs, t, tImpact)
sort.Sort(timedBoundaries(tbs))
var newPeriods []Period
var currentPeriodStart time.Time
previousImpact, currentImpact := 0, 0
for _, tb := range tbs {
if tb.isStart {
currentImpact += tb.impact
} else {
currentImpact -= tb.impact
}
if currentImpact > 0 && previousImpact == 0 {
currentPeriodStart = tb.time
}
if currentImpact == 0 && previousImpact > 0 && !currentPeriodStart.Equal(tb.time){
period, err := NewPeriod(currentPeriodStart, tb.time)
if err != nil {
return Periods{}, err
}
newPeriods = append(newPeriods, period)
}
previousImpact = currentImpact
}
ps, err := stitchAdjacentPeriods(newPeriods)
stitchedNewPeriods := NewPeriods(ps)
return stitchedNewPeriods, err
}
func (periods Periods) Subtract(o Periods) (result Periods, err error) {
return apply(periods.ps, o.ps, 1, -1)
}
func (periods Periods) Union(o Periods) (result Periods, err error) {
return apply(periods.ps, o.ps, 1, 1)
} | pops/periods.go | 0.691289 | 0.502563 | periods.go | starcoder |
package validator
import (
"reflect"
"regexp"
"strconv"
)
var (
fixedLengthRegex = regexp.MustCompile(`^string\((\d+)\)$`)
variableLengthRegex = regexp.MustCompile(`^string\((\d+), ?(\d+)\)$`)
)
// StringType makes the types beloz available in the aicra configuration:
// - "string" considers any string valid
// - "string(n)" considers any string with an exact size of `n` valid
// - "string(a,b)" considers any string with a size between `a` and `b` valid
// > for the last one, `a` and `b` are included in the valid sizes
type StringType struct{}
// GoType returns the `string` type
func (StringType) GoType() reflect.Type {
return reflect.TypeOf(string(""))
}
// Validator for strings with any/fixed/bound sizes
func (s StringType) Validator(typename string, avail ...Type) ValidateFunc {
var (
simple = (typename == "string")
fixedLengthMatches = fixedLengthRegex.FindStringSubmatch(typename)
variableLengthMatches = variableLengthRegex.FindStringSubmatch(typename)
)
// ignore unknown typename
if !simple && fixedLengthMatches == nil && variableLengthMatches == nil {
return nil
}
var (
mustFail bool
min, max int
)
// extract fixed length
if fixedLengthMatches != nil {
exLen, ok := s.getFixedLength(fixedLengthMatches)
if !ok {
return nil
}
min = exLen
max = exLen
// extract variable length
} else if variableLengthMatches != nil {
exMin, exMax, ok := s.getVariableLength(variableLengthMatches)
if !ok {
return nil
}
min = exMin
max = exMax
}
return func(value interface{}) (interface{}, bool) {
// preprocessing error
if mustFail {
return "", false
}
// check type
strValue, isString := value.(string)
byteSliceValue, isByteSlice := value.([]byte)
if !isString && isByteSlice {
strValue = string(byteSliceValue)
isString = true
}
if !isString {
return "", false
}
if simple {
return strValue, true
}
// check length against previously extracted length
l := len(strValue)
return strValue, l >= min && l <= max
}
}
// getFixedLength returns the fixed length from regex matches and a success state.
func (StringType) getFixedLength(regexMatches []string) (int, bool) {
// incoherence error
if regexMatches == nil || len(regexMatches) < 2 {
return 0, false
}
// extract length
fixedLength, err := strconv.ParseInt(regexMatches[1], 10, 64)
if err != nil || fixedLength < 0 {
return 0, false
}
return int(fixedLength), true
}
// getVariableLength returns the length min and max from regex matches and a success state.
func (StringType) getVariableLength(regexMatches []string) (int, int, bool) {
// incoherence error
if regexMatches == nil || len(regexMatches) < 3 {
return 0, 0, false
}
// extract minimum length
minLen, err := strconv.ParseInt(regexMatches[1], 10, 64)
if err != nil || minLen < 0 {
return 0, 0, false
}
// extract maximum length
maxLen, err := strconv.ParseInt(regexMatches[2], 10, 64)
if err != nil || maxLen < 0 {
return 0, 0, false
}
return int(minLen), int(maxLen), true
} | validator/string.go | 0.698124 | 0.528533 | string.go | starcoder |
package mparser
import (
"bytes"
"encoding/xml"
"log"
"github.com/gomarkdown/markdown/ast"
"github.com/mmarkdown/mmark/mast"
"github.com/mmarkdown/mmark/mast/reference"
)
// CitationToBibliography walks the AST and gets all the citations on HTML blocks and groups them into
// normative and informative references.
func CitationToBibliography(doc ast.Node) (normative ast.Node, informative ast.Node) {
seen := map[string]*mast.BibliographyItem{}
raw := map[string][]byte{}
// Gather all citations.
// Gather all reference HTML Blocks to see if we have XML we can output.
ast.WalkFunc(doc, func(node ast.Node, entering bool) ast.WalkStatus {
switch c := node.(type) {
case *ast.Citation:
for i, d := range c.Destination {
if _, ok := seen[string(bytes.ToLower(d))]; ok {
continue
}
ref := &mast.BibliographyItem{}
ref.Anchor = d
ref.Type = c.Type[i]
seen[string(d)] = ref
}
case *ast.HTMLBlock:
anchor := anchorFromReference(c.Literal)
if anchor != nil {
raw[string(bytes.ToLower(anchor))] = c.Literal
}
}
return ast.GoToNext
})
for _, r := range seen {
// If we have a reference anchor and the raw XML add that here.
if raw, ok := raw[string(bytes.ToLower(r.Anchor))]; ok {
var x reference.Reference
if e := xml.Unmarshal(raw, &x); e != nil {
log.Printf("Failed to unmarshal reference: %q: %s", r.Anchor, e)
continue
}
r.Reference = &x
}
switch r.Type {
case ast.CitationTypeInformative:
if informative == nil {
informative = &mast.Bibliography{Type: ast.CitationTypeInformative}
}
ast.AppendChild(informative, r)
case ast.CitationTypeSuppressed:
fallthrough
case ast.CitationTypeNormative:
if normative == nil {
normative = &mast.Bibliography{Type: ast.CitationTypeNormative}
}
ast.AppendChild(normative, r)
}
}
return normative, informative
}
// NodeBackMatter is the place where we should inject the bibliography
func NodeBackMatter(doc ast.Node) ast.Node {
var matter ast.Node
ast.WalkFunc(doc, func(node ast.Node, entering bool) ast.WalkStatus {
if mat, ok := node.(*ast.DocumentMatter); ok {
if mat.Matter == ast.DocumentMatterBack {
matter = mat
return ast.Terminate
}
}
return ast.GoToNext
})
return matter
}
// Parse '<reference anchor='CBR03' target=''>' and return the string after anchor= is the ID for the reference.
func anchorFromReference(data []byte) []byte {
if !bytes.HasPrefix(data, []byte("<reference ")) {
return nil
}
anchor := bytes.Index(data, []byte("anchor="))
if anchor < 0 {
return nil
}
beg := anchor + 7
if beg >= len(data) {
return nil
}
quote := data[beg]
i := beg + 1
// scan for an end-of-reference marker
for i < len(data) && data[i] != quote {
i++
}
// no end-of-reference marker
if i >= len(data) {
return nil
}
return data[beg+1 : i]
}
// ReferenceHook is the hook used to parse reference nodes.
func ReferenceHook(data []byte) (ast.Node, []byte, int) {
ref, ok := IsReference(data)
if !ok {
return nil, nil, 0
}
node := &ast.HTMLBlock{}
node.Literal = fmtReference(ref)
return node, nil, len(ref)
}
// IfReference returns wether data contains a reference.
func IsReference(data []byte) ([]byte, bool) {
if !bytes.HasPrefix(data, []byte("<reference ")) {
return nil, false
}
i := 12
// scan for an end-of-reference marker, across lines if necessary
for i < len(data) &&
!(data[i-12] == '<' && data[i-11] == '/' && data[i-10] == 'r' && data[i-9] == 'e' && data[i-8] == 'f' &&
data[i-7] == 'e' && data[i-6] == 'r' && data[i-5] == 'e' &&
data[i-4] == 'n' && data[i-3] == 'c' && data[i-2] == 'e' &&
data[i-1] == '>') {
i++
}
// no end-of-reference marker
if i > len(data) {
return nil, false
}
return data[:i], true
}
func fmtReference(data []byte) []byte {
var x reference.Reference
if e := xml.Unmarshal(data, &x); e != nil {
return data
}
out, e := xml.MarshalIndent(x, "", " ")
if e != nil {
return data
}
return out
}
// AddBibliography adds the bibliography to the document. It will be
// added just after the backmatter node. If that node can't be found this
// function returns false and does nothing.
func AddBibliography(doc ast.Node) bool {
where := NodeBackMatter(doc)
if where == nil {
return false
}
norm, inform := CitationToBibliography(doc)
if norm != nil {
ast.AppendChild(where, norm)
}
if inform != nil {
ast.AppendChild(where, inform)
}
return (norm != nil) || (inform != nil)
} | vendor/github.com/mmarkdown/mmark/mparser/bibliography.go | 0.52074 | 0.461199 | bibliography.go | starcoder |
package app
import "github.com/timshannon/townsourced/data"
// Help is a help document entry
type Help struct {
Key data.Key `json:"key"`
Title string `json:"title"`
Document string `json:"document"`
}
// HelpGet Retrieves a help document
func HelpGet(key data.Key) (*Help, error) {
return &Help{
Key: key,
Title: "FAQ",
Document: helpPlaceholder,
}, nil
}
// this is a placeholder until the full help system is put into place
const helpPlaceholder = `
# Customize your Profile
Welcome to Townsourced! Setting up your profile is an important step to being an active member of of your Townsourced Towns.
## Why is it important to set up my profile?
* Change your Name
* Townsourced allows you to show only the name you want to be seen.
* If you prefer a nickname or to have your last name abbreviated to just an initial, you can do so at the top of your profile image by clicking the pencil icon.
* Verify your email address
* Verifying email address allows Townsourced to be sure that when notifications are sent to your email, they are actually going to you.
* You can verify your email address by clicking “verify email address” underneath your profile picture.
* Profile image
* You do not have to add a profile image, it is recommended though.
* Townsourced recommends buyers and sellers to look at users profiles before meeting up with other users as a safety precautions. Read more about that under **Safety Tips**.
# Tips to Help Your Post Get Noticed
## Add a picture!
A picture is worth 1,000 words, so make use of Townsourced’s add a photo feature! Clear photos with uncluttered backgrounds will best represent your post.
### You can add pictures two ways.
1. Click the gray “Select an Image” box and choose a picture from your desktop or phone.
1. You can also drag and drop a picture from your computer.
## Add \#Hashtags so people can easily discover your post!
\#Hashtags are a great way to help people find your post. The more \#Hashtags you add that are relevant to your post, the easier your post will be found. The most common \#hashtags that are used in Townsourced will pop up as you type.
### You can insert \#hashtags in two ways
1. Type “ # “ into the text box
* Start typing in your hashtag
* You can also choose from the popup selection of the most used hashtags.
1. Insert a common hashtag image
1. Click the “smiley face emoji” tab
1. Select “Tags” icon.
1. Choose a Tag.
1. If you are not sure what the Hashtags mean, hover over the Hashtag and a tool tip will inform you.
## Try a different Layout!
Not every post works best with the “Standard” layout. Townsourced allows you to customize your post so you can best represent what you are posting about.
After you write your post click on the “Preview” tab next to the “write” tab in the upper left corner of the text box. When you switch to the “Preview” tab you will find four layout options to choose from. You can easily view each option by clicking on a layout button.
* **Standard**
* Standard Layout puts the text on top and the images at the bottom. This is the format that will be selected for your post if you opt not to customize.
* **Article**
* Think of an Article like reading a newspaper or magazine. If you have a lot of text to get across but still want to feature one image, Article is the best fit for your post.
* **Gallery**
* Gallery Layout highlights the photos of your posts and lets people easily toggle through your photos. This is great for photos that show an item from different angles or also showing a wide range of items that will be sold at a Garage Sale.
* **Poster**
* Tryout the Poster layout if you have one great picture that tells people what you want them to know. Think of the Poster Layout like a billboard or a flyer you would stick to a bulletin board.
# Townsourced Safety Tips
Safety is very important to Townsourced. That is why, with Townsourced, you never have to share a personal email account or phone number and your personal details are private.
## Meet Up Safety Tips
Before meeting up with a new person, Townsourced recommends all their users to read and follow these Safety Tips.
* Check the profile of the user you planning on meeting.
* Do they have a personal profile photo?
* When meeting up with someone new it is good to know who you will be looking for and what to expect.
* Note how long they have been a member.
* The longer a user has been a member, the more likely they will be a legitimate user.
* Post History
* Check a person’s post history so you know if a sale of this kind is typical or atypical for the person. Have they sold things before? What kinds of things are they posting to Townsourced?
* Comment History
* What someone comments on the internet says a lot about a person. Are the user’s comments overall constructive and positive? Are they adding value to the community? Are they respectful? These interactions can give you a good idea of what type of person you will be dealing with.
* Meeting Place
* Check your “Towns” profile page to see if there are any recommended meet up places in your area.
* Meet in a well-lit, public place
* Meet up at places with security cameras whenever possible
* If possible, bring a friend
* Always tell someone your plans
* Where you will meet
* What you will be exchanging
* Who you expect to meet
* When the meeting should be done
* Let them know when the meetup is complete
* If possible, bring a cell phone with you
# Categories
To better help filter and find posts, Townsourced has created broad categories.
* **Notices**
* Examples of Notices
* Town's Garbage Pickup day will be a day late due to weather
* Lost and Found
* General Notices that Moderators release to the “Town”
* Town Pool is opening for the summer!
* **Buy & Sell**
* Examples of Buy & Sell
* Garage Sales
* Piano for Sale
* Looking to buy a used snowblower
* **Events**
* Examples of Events
* Upcoming community Parade
* Local Team Fundraiser event
* Music in the Park
* Local Band Gigs
* **Jobs**
* Examples of Jobs
* Looking to hire a dog sitter
* House Painter for Hire
* Teenager looking for babysitter job
* **Volunteer**
* Examples of Volunteer
* Volunteer Opportunity for Park Clean Up
* Looking for Volunteers to help at Food Bank
* I am a Boy Scout looking to earn my Volunteer Badge
* High School student looking to give back to community while beefing up College Resume
* Ride Sharing
* **Housing**
* Examples of Housing
* Basement apartment for rent
* Looking for 3 bedroom home in desirable school district
* Looking for a roommate to help with housing costs.
` | app/help.go | 0.599251 | 0.497253 | help.go | starcoder |
package state
import (
"fmt"
"go-snake-ai/direction"
"go-snake-ai/tile"
"math/rand"
)
// NewState returns a State that has been initialised with empty tiles
func NewState(tileNumX int, tileNumY int) *State {
changed := make([]*tile.Vector, 0)
tiles := make([][]tile.Type, tileNumY)
for y := 0; y < tileNumY; y++ {
row := make([]tile.Type, tileNumX)
for x := 0; x < tileNumX; x++ {
row[x] = tile.TypeNone
changed = append(changed, tile.NewVector(x, y))
}
tiles[y] = row
}
s := &State{
tileNumX: tileNumX,
tileNumY: tileNumY,
totTiles: tileNumX * tileNumY,
tiles: tiles,
changedTiles: changed,
score: 0,
maxScore: (tileNumX * tileNumY) - 1,
}
s.spawnSnake()
s.spawnFruit()
return s
}
// State represents a game field with tile states.
type State struct {
tileNumX int
tileNumY int
totTiles int
tiles [][]tile.Type
changedTiles []*tile.Vector
snakeDir direction.Direction
snake []*tile.Vector
fruit *tile.Vector
score int
maxScore int
}
func (s *State) ValidPosition(x int, y int) bool {
if x < 0 || x > s.tileNumX-1 || y < 0 || y > s.tileNumY-1 {
return false
}
return true
}
// SetTile sets the tile at the given coordinates
func (s *State) SetTile(x int, y int, tileType tile.Type) {
s.tiles[y][x] = tileType
}
// Tile returns the tile from the given vector
func (s *State) Tile(x int, y int) tile.Type {
return s.tiles[y][x]
}
// Tiles returns the tiles from the state
func (s *State) Tiles() [][]tile.Type {
return s.tiles
}
func (s *State) Score() int {
return s.score
}
func (s *State) TotalTiles() int {
return s.totTiles
}
func (s *State) MaxScore() int {
return s.maxScore
}
func (s *State) Won() bool {
return s.score == s.maxScore
}
func (s *State) randomFreeTile() *tile.Vector {
freeVectors := make([]*tile.Vector, 0)
for y, col := range s.tiles {
for x, t := range col {
if t == tile.TypeNone {
freeVectors = append(freeVectors, tile.NewVector(x, y))
}
}
}
if len(freeVectors) == 0 {
return nil
}
randomI := rand.Intn(len(freeVectors))
return freeVectors[randomI]
}
func (s *State) spawnSnake() {
snakePos := s.randomFreeTile()
s.snake = []*tile.Vector{snakePos}
s.SetTile(snakePos.X, snakePos.Y, tile.TypeHead)
freeDirections := make([]direction.Direction, 0)
if snakePos.X > 0 && s.Tile(snakePos.X-1, snakePos.Y) == tile.TypeNone {
freeDirections = append(freeDirections, direction.Left)
}
if snakePos.X < s.tileNumX-1 && s.Tile(snakePos.X+1, snakePos.Y) == tile.TypeNone {
freeDirections = append(freeDirections, direction.Right)
}
if snakePos.Y > 0 && s.Tile(snakePos.X, snakePos.Y-1) == tile.TypeNone {
freeDirections = append(freeDirections, direction.Up)
}
if snakePos.Y < s.tileNumY-1 && s.Tile(snakePos.X, snakePos.Y+1) == tile.TypeNone {
freeDirections = append(freeDirections, direction.Down)
}
randomI := rand.Intn(len(freeDirections))
s.snakeDir = freeDirections[randomI]
}
func (s *State) spawnFruit() bool {
fruitPos := s.randomFreeTile()
if fruitPos == nil {
return false
}
s.SetTile(fruitPos.X, fruitPos.Y, tile.TypeFruit)
s.fruit = fruitPos
return true
}
func (s *State) Move(dir direction.Direction) (bool, error) {
curVec := s.SnakeHead()
var nextVec *tile.Vector
if dir == direction.None || direction.IsOpposite(dir, s.snakeDir) {
dir = s.snakeDir
}
s.snakeDir = dir
if dir == direction.Up {
nextVec = tile.NewVector(curVec.X, curVec.Y-1)
} else if dir == direction.Right {
nextVec = tile.NewVector(curVec.X+1, curVec.Y)
} else if dir == direction.Down {
nextVec = tile.NewVector(curVec.X, curVec.Y+1)
} else if dir == direction.Left {
nextVec = tile.NewVector(curVec.X-1, curVec.Y)
} else {
return false, fmt.Errorf("unable to move")
}
if !s.ValidPosition(nextVec.X, nextVec.Y) {
return false, nil
}
targetTile := s.Tile(nextVec.X, nextVec.Y)
if targetTile == tile.TypeFruit {
// Next tile is fruit - we can eat it and grow!
s.extendSnake(nextVec, false)
// Increment score
s.score++
// Respawn fruit
if !s.spawnFruit() {
return false, nil
}
return true, nil
} else if targetTile == tile.TypeNone {
// Next tile is empty, we can move into it
s.extendSnake(nextVec, true)
return true, nil
} else if targetTile == tile.TypeBody && *nextVec == *s.SnakeTail() {
// Next tile is the tail of the snake. We can move here as the tail will move when we do
s.extendSnake(nextVec, true)
return true, nil
}
return false, nil
}
func (s *State) extendSnake(next *tile.Vector, removeTail bool) {
// Keep track of old head as we need to change it to TypeBody
oldHead := s.SnakeHead()
s.SetTile(oldHead.X, oldHead.Y, tile.TypeBody)
// Eating a fruit! Set the fruit tile to snake body
s.SetTile(next.X, next.Y, tile.TypeHead)
// Set new head of snake to the next vector
s.snake = append([]*tile.Vector{next}, s.snake...)
if removeTail {
// Remove tail of snake
tailVec := s.SnakeTail()
// Head could have potentially taken the place of the tail.
// Don't hide the head in this case.
if *tailVec != *s.SnakeHead() {
// Set tile of tail to None
s.SetTile(tailVec.X, tailVec.Y, tile.TypeNone)
}
// Remove last vector from snake slice
s.snake = s.snake[:len(s.snake)-1]
}
}
func (s *State) Fruit() *tile.Vector {
return s.fruit
}
func (s *State) SnakeDir() direction.Direction {
return s.snakeDir
}
func (s *State) SnakeHead() *tile.Vector {
return s.snake[0]
}
func (s *State) SnakeTail() *tile.Vector {
snakeLen := len(s.snake)
return s.snake[snakeLen-1]
} | state/state.go | 0.628749 | 0.519826 | state.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.